diff --git a/.git-blame-ignore-revs b/.git-blame-ignore-revs new file mode 100644 index 0000000000..a4bdd6106a --- /dev/null +++ b/.git-blame-ignore-revs @@ -0,0 +1,31 @@ +# You can configure git to automatically use this file with the following config: +# git config --global blame.ignoreRevsFile .git-blame-ignore-revs + +e0f57ad81fb6a9f5100cfdefdd30661326edb031 # Bump Python version used for linters to 3.10 +4beac2a236993bd7890279f3f22a2eda3cd89bad # pre-commit: Prepare for bump +399dfcc0e59124fde68376b77089b20485f6566f # pre-commit: Migrate pyupgrade to ruff-format +187e88a5593e4f475379738811043ee4528a7df3 # pre-commit: Migrate from black to ruff format +c7010a2f929de9fad4e1a7c7f5a17cb8e210432a # Bump black to 23.3.0 +a36f514295a4b4e6157ce69a210f653bcc4df7f2 # Blackify everything else +004c7352d0a4fb467a319ae9743eb6ca5ee9ce7f # Blackify openstack.cloud +c2ff7336cecabc665e7bf04cbe87ef8d0c2e6f9f # Blackify openstack.clustering +073abda5a94b12a319c79d6a9b8594036f95fc65 # Blackify openstack.container_infrastructure_management +570b81f0ec3b3876aefbb223c78093f2a957bb01 # Blackify openstack.accelerator +33bed575013f11e4d408593e53c6c99ca66d6110 # Blackify openstack.instance_ha +10018dbf5be5e19c87543a5931f6809006eba4c5 # Blackify openstack.dns +19ec9ba383d14f4af6a1bb78dbbeaa6638ee8a4f # Blackify openstack.database +0e2b5d263fdf12e0c8a67503712afab2816ef2d0 # Blackify openstack.message +9d3d986241ce110e8f6bdf3ecb19609dc417a10a # Blackify openstack.workflow +874ea74103a0c833df7668a45b96b7145a8158a2 # Blackify openstack.orchestration +409f648ce506d7e768305f75025c4b01c5fa3008 # Blackify openstack.placement +93d8f41713ec2128210bf0a8479a5f3872ce0382 # Blackify openstack.key_manager +3d2511f98025d2d2826e13cea8be7545e90990f7 # Blackify openstack.shared_file_system +82c2a534024cff7690620876723422a98e8f371a # Blackify openstack.load_balancer +f8e42017e756e383367145c4caf39de796babcba # Blackify openstack.baremetal, openstack.baremetal_introspection +4589e293e829950d2fd4c705cce2f7ce30ca9e29 # Blackify openstack.object_store +34da09f3125ccd0408f2e0019c85d95188fef573 # Blackify openstack.block_storage +542ddaa1ad5cfc9b9876de3de0759941c9a9ea83 # Blackify openstack.identity +f526b990f31de03a1b6181a4724976e1b86a654a # Blackify openstack.network +bcf99f3433ceecf9a210d0aa0580a67645ccf7ee # Blackify openstack.image +69735d3bd8fd874a9817c26b5b009921110fb416 # Blackify openstack.compute (tests) +395a77298ecd79623b1af75ad0dc7653f5e4eb61 # Blackify openstack.compute diff --git a/.gitignore b/.gitignore index f8b6eb209f..70c3c4095b 100644 --- a/.gitignore +++ b/.gitignore @@ -29,6 +29,7 @@ cover/* .tox nosetests.xml .testrepository +.stestr # Translations *.mo diff --git a/.gitreview b/.gitreview index d838a518fb..9e465c3ef0 100644 --- a/.gitreview +++ b/.gitreview @@ -1,4 +1,4 @@ [gerrit] -host=review.openstack.org +host=review.opendev.org port=29418 -project=openstack/python-openstacksdk.git +project=openstack/openstacksdk.git diff --git a/.mailmap b/.mailmap index cc92f17b8d..c7b4804d72 100644 --- a/.mailmap +++ b/.mailmap @@ -1,3 +1,6 @@ # Format is: # -# \ No newline at end of file +# + + + diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml new file mode 100644 index 0000000000..eec38c36d1 --- /dev/null +++ b/.pre-commit-config.yaml @@ -0,0 +1,33 @@ +--- +repos: + - repo: https://github.com/pre-commit/pre-commit-hooks + rev: v6.0.0 + hooks: + - id: trailing-whitespace + - id: mixed-line-ending + args: ['--fix', 'lf'] + exclude: '.*\.(svg)$' + - id: fix-byte-order-marker + - id: check-executables-have-shebangs + - id: check-merge-conflict + - id: debug-statements + - id: check-yaml + files: .*\.(yaml|yml)$ + exclude: '^zuul.d/.*$' + - repo: https://github.com/PyCQA/doc8 + rev: v2.0.0 + hooks: + - id: doc8 + - repo: https://github.com/astral-sh/ruff-pre-commit + rev: v0.15.1 + hooks: + - id: ruff-check + args: ['--fix', '--unsafe-fixes'] + - id: ruff-format + - repo: https://opendev.org/openstack/hacking + rev: 8.0.0 + hooks: + - id: hacking + additional_dependencies: + - flake8-import-order~=0.19.2 + exclude: '^(doc|releasenotes|tools)/.*$' diff --git a/.stestr.conf b/.stestr.conf new file mode 100644 index 0000000000..f42846f415 --- /dev/null +++ b/.stestr.conf @@ -0,0 +1,3 @@ +[DEFAULT] +test_path=./openstack/tests/unit +top_dir=./ diff --git a/.testr.conf b/.testr.conf deleted file mode 100644 index 499b7a3706..0000000000 --- a/.testr.conf +++ /dev/null @@ -1,8 +0,0 @@ -[DEFAULT] -test_command=OS_STDOUT_CAPTURE=${OS_STDOUT_CAPTURE:-1} \ - OS_STDERR_CAPTURE=${OS_STDERR_CAPTURE:-1} \ - OS_TEST_TIMEOUT=${OS_TEST_TIMEOUT:-60} \ - ${PYTHON:-python} -m subunit.run discover -t ./ ${OS_TEST_PATH:-./openstack/tests/unit} $LISTOPT $IDOPTION -test_id_option=--load-list $IDFILE -test_list_option=--list -group_regex=([^\.]+\.)+ diff --git a/CONTRIBUTING.rst b/CONTRIBUTING.rst index 8fee794148..1c61eb4265 100644 --- a/CONTRIBUTING.rst +++ b/CONTRIBUTING.rst @@ -1,16 +1,49 @@ -If you would like to contribute to the development of OpenStack, -you must follow the steps in this page: +.. _contributing: - http://docs.openstack.org/infra/manual/developers.html +============================ +Contributing to openstacksdk +============================ -Once those steps have been completed, changes to OpenStack -should be submitted for review via the Gerrit tool, following -the workflow documented at: +If you're interested in contributing to the openstacksdk project, +the following will help get you started. - http://docs.openstack.org/infra/manual/developers.html#development-workflow +Developer Certificate of Origin +------------------------------- +.. index:: + single: license; agreement + +In order to contribute to the openstacksdk project, you need to adhere +to the `Developer Certificate of Origin`_. OpenStack utilizes the Developer +Certificate of Origin (DCO) as a lightweight means to confirm that you are +entitled to contribute the code you submit. This ensures that you are +providing your contributions under the project's license and that you have +the right to do so. + +Please read `DeveloperWorkflow`_ before sending your first patch for review. Pull requests submitted through GitHub will be ignored. -Bugs should be filed on Launchpad, not GitHub: +.. seealso:: + + * https://docs.openstack.org/contributors/common/dco.html + +.. _Developer Certificate of Origin: https://developercertificate.org/ +.. _DeveloperWorkflow: https://docs.openstack.org/infra/manual/developers.html#development-workflow + +Project Hosting Details +----------------------- + +Project Documentation + https://docs.openstack.org/openstacksdk/latest/ + +Bug tracker + https://bugs.launchpad.net/openstacksdk + +Mailing list (prefix subjects with ``[sdk]`` for faster responses) + https://lists.openstack.org/mailman3/lists/openstack-discuss.lists.openstack.org/ + +Code Hosting + https://opendev.org/openstack/openstacksdk - https://bugs.launchpad.net/python-openstacksdk \ No newline at end of file +Code Review + https://review.opendev.org/#/q/status:open+project:openstack/openstacksdk,n,z diff --git a/HACKING.rst b/HACKING.rst index bfb22b7fa9..cf66c368ca 100644 --- a/HACKING.rst +++ b/HACKING.rst @@ -1,4 +1,63 @@ -python-openstacksdk Style Commandments -====================================== +openstacksdk Style Commandments +=============================== -Read the OpenStack Style Commandments http://docs.openstack.org/developer/hacking/ +Read the OpenStack Style Commandments +https://docs.openstack.org/hacking/latest/ + +Indentation +----------- + +PEP-8 allows for 'visual' indentation. **Do not use it**. +Visual indentation looks like this: + +.. code-block:: python + + return_value = self.some_method(arg1, arg1, + arg3, arg4) + +Visual indentation makes refactoring the code base unnecessarily hard. + +Instead of visual indentation, use this: + +.. code-block:: python + + return_value = self.some_method( + arg1, arg1, arg3, arg4) + +That way, if some_method ever needs to be renamed, the only line that needs +to be touched is the line with some_method. + +Additionally, if you need to line break at the top of a block, please indent +the continuation line an additional 4 spaces, like this: + +.. code-block:: python + + for val in self.some_method( + arg1, arg1, arg3, arg4): + self.do_something_awesome() + +Neither of these are 'mandated' by PEP-8. However, they are prevailing styles +within this code base. + +Unit Tests +---------- + +Unit tests should be virtually instant. If a unit test takes more than 1 second +to run, it is a bad unit test. Honestly, 1 second is too slow. + +All unit test classes should subclass `openstack.tests.unit.base.TestCase`. The +base TestCase class takes care of properly creating `Connection` objects +in a way that protects against local environment. + +Test cases should use requests-mock to mock out HTTP interactions rather than +using mock to mock out object access. + +Don't Use setUpClass +-------------------- + +setUpClass looks like it runs once for the class. In parallel test execution +environments though, it runs once per execution context. This makes reasoning +about when it is going to actually run and what is going to happen extremely +difficult and can produce hard to debug test issues. + +Don't ever use it. It makes baby pandas cry. diff --git a/MANIFEST.in b/MANIFEST.in deleted file mode 100644 index 90f8a7aefd..0000000000 --- a/MANIFEST.in +++ /dev/null @@ -1,6 +0,0 @@ -include AUTHORS -include ChangeLog -exclude .gitignore -exclude .gitreview - -global-exclude *.pyc \ No newline at end of file diff --git a/README.rst b/README.rst index a75f448a62..51b862be7f 100644 --- a/README.rst +++ b/README.rst @@ -1,36 +1,390 @@ -OpenStack Python SDK -==================== +============ +openstacksdk +============ -The ``python-openstacksdk`` is a collection of libraries for building -applications to work with OpenStack clouds. The project aims to provide -a consistent and complete set of interactions with OpenStack's many -services, along with complete documentation, examples, and tools. +openstacksdk is a client library for building applications to work +with OpenStack clouds. The project aims to provide a consistent and +complete set of interactions with OpenStack's many services, along with +complete documentation, examples, and tools. -This SDK is under active development, and in the interests of providing -a high-quality interface, the APIs provided in this release may differ -from those provided in future release. +It also contains an abstraction interface layer. Clouds can do many things, but +there are probably only about 10 of them that most people care about with any +regularity. If you want to do complicated things, the per-service oriented +portions of the SDK are for you. However, if what you want is to be able to +write an application that talks to any OpenStack cloud regardless of +configuration, then the Cloud Abstraction layer is for you. -Usage ------ +More information about the history of openstacksdk can be found at +https://docs.openstack.org/openstacksdk/latest/contributor/history.html + +Getting started +--------------- + +.. rubric:: Authentication and connection management + +openstacksdk aims to talk to any OpenStack cloud. To do this, it requires a +configuration file. openstacksdk favours ``clouds.yaml`` files, but can also +use environment variables. The ``clouds.yaml`` file should be provided by your +cloud provider or deployment tooling. An example: + +.. code-block:: yaml + + clouds: + mordred: + region_name: Dallas + auth: + username: 'mordred' + password: XXXXXXX + project_name: 'demo' + auth_url: 'https://identity.example.com' + +openstacksdk will look for ``clouds.yaml`` files in the following locations: + +* If set, the path indicated by the ``OS_CLIENT_CONFIG_FILE`` environment + variable +* ``.`` (the current directory) +* ``$HOME/.config/openstack`` +* ``/etc/openstack`` + +You can create a connection using the ``openstack.connect`` function. The cloud +name can be either passed directly to this function or specified using the +``OS_CLOUD`` environment variable. If you don't have a ``clouds.yaml`` file and +instead use environment variables for configuration then you can use the +special ``envvars`` cloud name to load configuration from the environment. For +example: + +.. code-block:: python + + import openstack + + # Initialize connection from a clouds.yaml by passing a cloud name + conn_from_cloud_name = openstack.connect(cloud='mordred') + + # Initialize connection from a clouds.yaml using the OS_CLOUD envvar + conn_from_os_cloud = openstack.connect() + + # Initialize connection from environment variables + conn_from_env_vars = openstack.connect(cloud='envvars') + +.. note:: + + How this is all achieved is described in more detail `below + `__. + +.. rubric:: The cloud layer + +openstacksdk consists of four layers which all build on top of each other. The +highest level layer is the *cloud* layer. Cloud layer methods are available via +the top level ``Connection`` object returned by ``openstack.connect``. For +example: + +.. code-block:: python + + import openstack + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + # Initialize connection + conn = openstack.connect(cloud='mordred') + + # List the servers + for server in conn.list_servers(): + print(server.to_dict()) + +The cloud layer is based on logical operations that can potentially touch +multiple services. The benefit of this layer is mostly seen in more complicated +operations that take multiple steps and where the steps vary across providers. +For example: + +.. code-block:: python + + import openstack + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + # Initialize connection + conn = openstack.connect(cloud='mordred') + + # Upload an image to the cloud + image = conn.create_image( + 'ubuntu-trusty', filename='ubuntu-trusty.qcow2', wait=True) + + # Find a flavor with at least 512M of RAM + flavor = conn.get_flavor_by_ram(512) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public IP address for it. + conn.create_server( + 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) + +.. rubric:: The proxy layer + +The next layer is the *proxy* layer. Most users will make use of this layer. +The proxy layer is service-specific, so methods will be available under +service-specific connection attributes of the ``Connection`` object such as +``compute``, ``block_storage``, ``image`` etc. For example: + +.. code-block:: python + + import openstack + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + # Initialize connection + conn = openstack.connect(cloud='mordred') + + # List the servers + for server in conn.compute.servers(): + print(server.to_dict()) + +.. note:: + + A list of supported services is given `below `__. + +.. rubric:: The resource layer + +Below this there is the *resource* layer. This provides support for the basic +CRUD operations supported by REST APIs and is the base building block for the +other layers. You typically will not need to use this directly but it can be +helpful for operations where you already have a ``Resource`` object to hand. +For example: + +.. code-block:: python + + import openstack + import openstack.config.loader + import openstack.compute.v2.server + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + # Initialize connection + conn = openstack.connect(cloud='mordred') + + # List the servers + for server in openstack.compute.v2.server.Server.list(session=conn.compute): + print(server.to_dict()) + +.. rubric:: The raw HTTP layer -The following example simply connects to an OpenStack cloud and lists -the containers in the Object Store service.:: +Finally, there is the *raw HTTP* layer. This exposes raw HTTP semantics and +is effectively a wrapper around the `requests`__ API with added smarts to +handle stuff like authentication and version management. As such, you can use +the ``requests`` API methods you know and love, like ``get``, ``post`` and +``put``, and expect to receive a ``requests.Response`` object in response +(unlike the other layers, which mostly all return objects that subclass +``openstack.resource.Resource``). Like the *resource* layer, you will typically +not need to use this directly but it can be helpful to interact with APIs that +have not or will not be supported by openstacksdk. For example: - from openstack import connection - conn = connection.Connection(auth_url="http://openstack:5000/v3", - project_name="big_project", - username="SDK_user", - password="Super5ecretPassw0rd") - for container in conn.object_store.containers(): - print(container.name) +.. code-block:: python -Documentation + import openstack + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + # Initialize connection + conn = openstack.connect(cloud='mordred') + + # List servers + for server in openstack.compute.get('/servers').json(): + print(server) + +.. __: https://requests.readthedocs.io/en/latest/ + +.. _openstack.config: + +Configuration ------------- -Documentation is available at -http://developer.openstack.org/sdks/python/openstacksdk/ +openstacksdk uses the ``openstack.config`` module to parse configuration. +``openstack.config`` will find cloud configuration for as few as one cloud and +as many as you want to put in a config file. It will read environment variables +and config files, and it also contains some vendor specific default values so +that you don't have to know extra info to use OpenStack + +* If you have a config file, you will get the clouds listed in it +* If you have environment variables, you will get a cloud named `envvars` +* If you have neither, you will get a cloud named `defaults` with base defaults + +You can view the configuration identified by openstacksdk in your current +environment by running ``openstack.config.loader``. For example: + +.. code-block:: bash + + $ python -m openstack.config.loader + +More information at https://docs.openstack.org/openstacksdk/latest/user/config/configuration.html + +.. _supported-services: + +Supported services +------------------ + +The following services are currently supported. A full list of all available +OpenStack service can be found in the `Project Navigator`__. + +.. note:: + + Support here does not guarantee full-support for all APIs. It simply means + some aspect of the project is supported. + +.. list-table:: Supported services + :widths: 15 25 10 40 + :header-rows: 1 + + * - Service + - Description + - Cloud Layer + - Proxy & Resource Layer + + * - **Compute** + - + - + - + + * - Nova + - Compute + - ✔ + - ✔ (``openstack.compute``) + + * - **Hardware Lifecycle** + - + - + - + + * - Ironic + - Bare metal provisioning + - ✔ + - ✔ (``openstack.baremetal``, ``openstack.baremetal_introspection``) + + * - Cyborg + - Lifecycle management of accelerators + - ✔ + - ✔ (``openstack.accelerator``) + + * - **Storage** + - + - + - -License -------- + * - Cinder + - Block storage + - ✔ + - ✔ (``openstack.block_storage``) + + * - Swift + - Object store + - ✔ + - ✔ (``openstack.object_store``) + + * - Cinder + - Shared filesystems + - ✔ + - ✔ (``openstack.shared_file_system``) + + * - **Networking** + - + - + - + + * - Neutron + - Networking + - ✔ + - ✔ (``openstack.network``) + + * - Octavia + - Load balancing + - ✔ + - ✔ (``openstack.load_balancer``) + + * - Designate + - DNS + - ✔ + - ✔ (``openstack.dns``) + + * - **Shared services** + - + - + - + + * - Keystone + - Identity + - ✔ + - ✔ (``openstack.identity``) + + * - Placement + - Placement + - ✔ + - ✔ (``openstack.placement``) + + * - Glance + - Image storage + - ✔ + - ✔ (``openstack.image``) + + * - Barbican + - Key management + - ✔ + - ✔ (``openstack.key_manager``) + + * - **Workload provisioning** + - + - + - + + * - Magnum + - Container orchestration engine provisioning + - ✔ + - ✔ (``openstack.container_infrastructure_management``) + + * - **Orchestration** + - + - + - + + * - Heat + - Orchestration + - ✔ + - ✔ (``openstack.orchestration``) + + * - Senlin + - Clustering + - ✔ + - ✔ (``openstack.clustering``) + + * - Mistral + - Workflow + - ✔ + - ✔ (``openstack.workflow``) + + * - Zaqar + - Messaging + - ✔ + - ✔ (``openstack.message``) + + * - **Application lifecycle** + - + - + - + + * - Masakari + - Instances high availability service + - ✔ + - ✔ (``openstack.instance_ha``) + +.. __: https://www.openstack.org/software/project-navigator/openstack-components#openstack-services + +Links +----- -Apache 2.0 +* `Issue Tracker `_ +* `Code Review `_ +* `Documentation `_ +* `PyPI `_ +* `Mailing list `_ +* `Release Notes `_ diff --git a/SHADE-MERGE-TODO.rst b/SHADE-MERGE-TODO.rst new file mode 100644 index 0000000000..e34d878103 --- /dev/null +++ b/SHADE-MERGE-TODO.rst @@ -0,0 +1,137 @@ +Tasks Needed for rationalizing shade and openstacksdk +===================================================== + +A large portion of the important things have already been done and landed +already. For reference, those are: + +* shade and os-client-config library content have been merged into the tree. +* Use official service-type names from Service Types Authority via + os-service-types to refer to services and proxies. +* Automatically also add properties to the connection for every known alias + for each service-type. +* Made openstack.proxy.Proxy a subclass of keystoneauth1.adapter.Adapter. + Removed local logic that duplicates keystoneauth logic. This means every + proxy also has direct REST primitives available. For example: + + .. code-block:: python + + connection = connection.Connection() + servers = connection.compute.servers() + server_response = connection.compute.get('/servers') + +* Removed the Profile object in favor of openstack.config. +* Removed the Session object in favor of using keystoneauth. +* Plumbed Proxy use of Adapter through the Adapter subclass from shade that + uses the TaskManager to run REST calls. +* Finish migrating to Resource2 and Proxy2, rename them to Resource and Proxy. +* Merge OpenStackCloud into Connection. This should result + in being able to use the connection interact with the cloud using all three + interfaces. For instance: + + .. code-block:: python + + conn = connection.Connection() + servers = conn.list_servers() # High-level resource interface from shade + servers = conn.compute.servers() # SDK Service/Object Interface + response = conn.compute.get('/servers') # REST passthrough +* Removed ServiceFilter and the various Service objects in favor of discovery. + +Next steps +========== + +* Maybe rename self.session and session parameter in all usage in proxy and + resource to self.adapter. They are Adapters not Sessions, but that may not + mean anything to people. +* Migrate unit tests to requests-mock instead of mocking python calls to + session. +* Replace _prepare_request with requests.Session.prepare_request. + +shade integration +----------------- + +* Invent some terminology that is clear and makes sense to distinguish between + the object interface that came originally from openstacksdk and the + interface that came from shade. +* Shift the shade interface methods to use the Object Interface for their + operations. It's possible there may be cases where the REST layer needs to + be used instead, but we should try to sort those out. +* Investigate options and then make a plan as to whether shade methods should + return SDK objects or return dicts/munches as they do today. Should we make + Resource objects extend dict/munch so they can be used like the shade ones + today? Or should we just have the external shade shim library get objects + from the high-level SDK 'shade' interface and call to_dict() on them all? +* Add support for shade expressing normalization model/contract into Resource, + or for just leveraging what's in Resource for shade-layer normalization. +* Make a plan for normalization supporting shade users continuing + to get shade normalized resource Munch objects from shade API calls, sdk + proxy/resource users getting SDK objects, and both of them being able to opt + in to "strict" normalization at Connection constructor time. Perhaps making + Resource subclass Munch would allow mixed use? Needs investigation. +* Investigate auto-generating the bulk of shade's API based on introspection of + SDK objects, leaving only the code with extra special logic in the shade + layer. + +Service Proxies +--------------- + +These are all things to think about. + +* Authenticate at Connection() creation time? Having done that, use the + catalog in the token to determine which service proxies to add to the + Connection object. +* Filter the above service list from the token by has_service() from + openstack.config. +* Add a has_service method to Connection which will BASICALLY just be + hasattr(self, 'service') - but will look nicer. +* Consider adding magic to Connection for every service that a given cloud + DOESN'T have that will throw an exception on any attribute access that is + "cloud doesn't have service blah" rather than simply Attribute Not Found. + The SDK has a python api regardless of the services remotely, it would be + nice if trimming the existing attribute list wouldn't make it impossible for + someone to validate their code correctness. It's also possible that instead + of not having services, we always mount proxy objects for every service, but + we mount a "NotFound" proxy for each service that isn't there. +* Since openstacksdk uses version discovery now, there is always a good path + to "the" version of a given service. However, a cloud may have more than one. + Attach the discovered service proxy to connection as today under the service + type name. Add a property to each service proxy for each version the SDK + knows about. For instance: + + .. code-block:: python + + connection = openstack.Connection() + connection.volume # openstack.volume.v3._proxy + connection.volume.v2 # openstack.volume.v2._proxy + connection.volume.v3 # openstack.volume.v3._proxy + + Those versioned proxies should be done as Adapters with min and max version + set explicitly. This should allow a common pattern for people to write code + that just wants to use the discovered or configured service, or who want to + attempt to use a specific version of the API if they know what they're doing + and at the very least wind up with a properly configured Adapter they can + make rest calls on. Because: + + .. code-block:: python + + connection = openstack.Connection() + connection.dns.v2.get('/zones') + + should always work on an OpenStack cloud with designate even if the SDK + authors don't know anything about Designate and haven't added Resource or + Proxy explicitly for it. +* Decide what todo about non-OpenStack services. Do we add base Proxy + properties to Connection for every service we find in the catalog regardless + of official/non-official? If so, do we let someone pass a dict of + service-type, Proxy to connection that would let the provide a local service + we don't know about? If we do that- we should disallow passing in overrides + for services we DO know about to discourage people writing local tools that + have different Compute behavior, for instance. + +Microversions +------------- + +* keystoneauth.adapter.Adapter knows how to send microversion headers, and + get_endpoint_data knows how to fetch supported ranges. As microversion + support is added to calls, it needs to be on a per-request basis. This + has implications to both Resource and Proxy, as cloud payloads for data + mapping can be different on a per-microversion basis. diff --git a/babel.cfg b/babel.cfg deleted file mode 100644 index efceab818b..0000000000 --- a/babel.cfg +++ /dev/null @@ -1 +0,0 @@ -[python: **.py] diff --git a/bindep.txt b/bindep.txt new file mode 100644 index 0000000000..a74f386aed --- /dev/null +++ b/bindep.txt @@ -0,0 +1,8 @@ +# This is a cross-platform list tracking distribution packages needed by tests; +# see http://docs.openstack.org/infra/bindep/ for additional information. + +build-essential [platform:dpkg] +python3-dev [platform:dpkg] +libffi-dev [platform:dpkg] +libffi-devel [platform:rpm] +openssl-devel [platform:rpm] diff --git a/create_yaml.sh b/create_yaml.sh deleted file mode 100755 index 3da2078414..0000000000 --- a/create_yaml.sh +++ /dev/null @@ -1,30 +0,0 @@ -#!/bin/bash -# -# NOTE(thowe): There are some issues with OCC envvars that force us to do -# this for now. -# -mkdir -p ~/.config/openstack/ -FILE=~/.config/openstack/clouds.yaml -echo 'clouds:' >$FILE -echo ' test_cloud:' >>$FILE -env | grep OS_ | tr '=' ' ' | while read k v -do - k=$(echo $k | sed -e 's/OS_//') - k=$(echo $k | tr '[A-Z]' '[a-z]') - case "$k" in - region_name|*_api_version) - echo " $k: $v" >>$FILE - esac -done -echo " auth:" >>$FILE -env | grep OS_ | tr '=' ' ' | while read k v -do - k=$(echo $k | sed -e 's/OS_//') - k=$(echo $k | tr '[A-Z]' '[a-z]') - case "$k" in - region_name|*_api_version) - ;; - *) - echo " $k: $v" >>$FILE - esac -done diff --git a/devstack/plugin.sh b/devstack/plugin.sh new file mode 100644 index 0000000000..d1a53c15db --- /dev/null +++ b/devstack/plugin.sh @@ -0,0 +1,54 @@ +# Install and configure **openstacksdk** library in devstack +# +# To enable openstacksdk in devstack add an entry to local.conf that looks like +# +# [[local|localrc]] +# enable_plugin openstacksdk https://opendev.org/openstack/openstacksdk + +function preinstall_openstacksdk { + : +} + +function install_openstacksdk { + if use_library_from_git "openstacksdk"; then + # don't clone, it'll be done by the plugin install + setup_dev_lib "openstacksdk" + else + pip_install "openstacksdk" + fi +} + +function configure_openstacksdk { + : +} + +function initialize_openstacksdk { + : +} + +function unstack_openstacksdk { + : +} + +function clean_openstacksdk { + : +} + +# This is the main for plugin.sh +if [[ "$1" == "stack" && "$2" == "pre-install" ]]; then + preinstall_openstacksdk +elif [[ "$1" == "stack" && "$2" == "install" ]]; then + install_openstacksdk +elif [[ "$1" == "stack" && "$2" == "post-config" ]]; then + configure_openstacksdk +elif [[ "$1" == "stack" && "$2" == "extra" ]]; then + initialize_openstacksdk +fi + +if [[ "$1" == "unstack" ]]; then + unstack_openstacksdk +fi + +if [[ "$1" == "clean" ]]; then + clean_openstacksdk +fi diff --git a/doc/Makefile b/doc/Makefile deleted file mode 100644 index 2cdd0f5cd9..0000000000 --- a/doc/Makefile +++ /dev/null @@ -1,136 +0,0 @@ -# Makefile for Sphinx documentation -# - -# You can set these variables from the command line. -SPHINXOPTS = -SPHINXBUILD = sphinx-build -PAPER = -BUILDDIR = build - -# Internal variables. -PAPEROPT_a4 = -D latex_paper_size=a4 -PAPEROPT_letter = -D latex_paper_size=letter -ALLSPHINXOPTS = -d $(BUILDDIR)/doctrees $(PAPEROPT_$(PAPER)) $(SPHINXOPTS) source - -.PHONY: help clean html pdf dirhtml singlehtml pickle json htmlhelp qthelp devhelp epub latex latexpdf text man changes linkcheck doctest - -help: - @echo "Please use \`make ' where is one of" - @echo " html to make standalone HTML files" - @echo " pdf to make pdf with rst2pdf" - @echo " dirhtml to make HTML files named index.html in directories" - @echo " singlehtml to make a single large HTML file" - @echo " pickle to make pickle files" - @echo " json to make JSON files" - @echo " htmlhelp to make HTML files and a HTML help project" - @echo " qthelp to make HTML files and a qthelp project" - @echo " devhelp to make HTML files and a Devhelp project" - @echo " epub to make an epub" - @echo " latex to make LaTeX files, you can set PAPER=a4 or PAPER=letter" - @echo " latexpdf to make LaTeX files and run them through pdflatex" - @echo " text to make text files" - @echo " man to make manual pages" - @echo " changes to make an overview of all changed/added/deprecated items" - @echo " linkcheck to check all external links for integrity" - @echo " doctest to run all doctests embedded in the documentation (if enabled)" - -clean: - -rm -rf $(BUILDDIR)/* - -html: - $(SPHINXBUILD) -b html $(ALLSPHINXOPTS) $(BUILDDIR)/html - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/html." - -pdf: - $(SPHINXBUILD) -b pdf $(ALLSPHINXOPTS) $(BUILDDIR)/pdf - @echo - @echo "Build finished. The PDFs are in $(BUILDDIR)/pdf." - -dirhtml: - $(SPHINXBUILD) -b dirhtml $(ALLSPHINXOPTS) $(BUILDDIR)/dirhtml - @echo - @echo "Build finished. The HTML pages are in $(BUILDDIR)/dirhtml." - -singlehtml: - $(SPHINXBUILD) -b singlehtml $(ALLSPHINXOPTS) $(BUILDDIR)/singlehtml - @echo - @echo "Build finished. The HTML page is in $(BUILDDIR)/singlehtml." - -pickle: - $(SPHINXBUILD) -b pickle $(ALLSPHINXOPTS) $(BUILDDIR)/pickle - @echo - @echo "Build finished; now you can process the pickle files." - -json: - $(SPHINXBUILD) -b json $(ALLSPHINXOPTS) $(BUILDDIR)/json - @echo - @echo "Build finished; now you can process the JSON files." - -htmlhelp: - $(SPHINXBUILD) -b htmlhelp $(ALLSPHINXOPTS) $(BUILDDIR)/htmlhelp - @echo - @echo "Build finished; now you can run HTML Help Workshop with the" \ - ".hhp project file in $(BUILDDIR)/htmlhelp." - -qthelp: - $(SPHINXBUILD) -b qthelp $(ALLSPHINXOPTS) $(BUILDDIR)/qthelp - @echo - @echo "Build finished; now you can run "qcollectiongenerator" with the" \ - ".qhcp project file in $(BUILDDIR)/qthelp, like this:" - @echo "# qcollectiongenerator $(BUILDDIR)/qthelp/NebulaDocs.qhcp" - @echo "To view the help file:" - @echo "# assistant -collectionFile $(BUILDDIR)/qthelp/NebulaDocs.qhc" - -devhelp: - $(SPHINXBUILD) -b devhelp $(ALLSPHINXOPTS) $(BUILDDIR)/devhelp - @echo - @echo "Build finished." - @echo "To view the help file:" - @echo "# mkdir -p $$HOME/.local/share/devhelp/NebulaDocs" - @echo "# ln -s $(BUILDDIR)/devhelp $$HOME/.local/share/devhelp/NebulaDocs" - @echo "# devhelp" - -epub: - $(SPHINXBUILD) -b epub $(ALLSPHINXOPTS) $(BUILDDIR)/epub - @echo - @echo "Build finished. The epub file is in $(BUILDDIR)/epub." - -latex: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo - @echo "Build finished; the LaTeX files are in $(BUILDDIR)/latex." - @echo "Run \`make' in that directory to run these through (pdf)latex" \ - "(use \`make latexpdf' here to do that automatically)." - -latexpdf: - $(SPHINXBUILD) -b latex $(ALLSPHINXOPTS) $(BUILDDIR)/latex - @echo "Running LaTeX files through pdflatex..." - make -C $(BUILDDIR)/latex all-pdf - @echo "pdflatex finished; the PDF files are in $(BUILDDIR)/latex." - -text: - $(SPHINXBUILD) -b text $(ALLSPHINXOPTS) $(BUILDDIR)/text - @echo - @echo "Build finished. The text files are in $(BUILDDIR)/text." - -man: - $(SPHINXBUILD) -b man $(ALLSPHINXOPTS) $(BUILDDIR)/man - @echo - @echo "Build finished. The manual pages are in $(BUILDDIR)/man." - -changes: - $(SPHINXBUILD) -b changes $(ALLSPHINXOPTS) $(BUILDDIR)/changes - @echo - @echo "The overview file is in $(BUILDDIR)/changes." - -linkcheck: - $(SPHINXBUILD) -b linkcheck $(ALLSPHINXOPTS) $(BUILDDIR)/linkcheck - @echo - @echo "Link check complete; look for any errors in the above output " \ - "or in $(BUILDDIR)/linkcheck/output.txt." - -doctest: - $(SPHINXBUILD) -b doctest $(ALLSPHINXOPTS) $(BUILDDIR)/doctest - @echo "Testing of doctests in the sources finished, look at the " \ - "results in $(BUILDDIR)/doctest/output.txt." diff --git a/doc/requirements.txt b/doc/requirements.txt new file mode 100644 index 0000000000..925ca4dc8b --- /dev/null +++ b/doc/requirements.txt @@ -0,0 +1,5 @@ +docutils>=0.11 # OSI-Approved Open Source, Public Domain +openstackdocstheme>=2.2.1 # Apache-2.0 +reno>=3.1.0 # Apache-2.0 +sphinx>=2.0.0,!=2.1.0 # BSD +sphinxcontrib-svg2pdfconverter>=0.1.0 # BSD diff --git a/doc/source/conf.py b/doc/source/conf.py index 05443eb388..e73900777f 100644 --- a/doc/source/conf.py +++ b/doc/source/conf.py @@ -1,4 +1,3 @@ -# -*- coding: utf-8 -*- # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at @@ -15,18 +14,25 @@ import os import sys -import openstackdocstheme - sys.path.insert(0, os.path.abspath('../..')) +sys.path.insert(0, os.path.abspath('.')) + # -- General configuration ---------------------------------------------------- # Add any Sphinx extension module names here, as strings. They can be # extensions coming with Sphinx (named 'sphinx.ext.*') or your custom ones. extensions = [ 'sphinx.ext.autodoc', - 'sphinx.ext.intersphinx', + 'openstackdocstheme', + 'sphinxcontrib.rsvgconverter', ] +# openstackdocstheme options +openstackdocs_repo_name = 'openstack/openstacksdk' +openstackdocs_pdf_link = True +openstackdocs_use_storyboard = False +html_theme = 'openstackdocs' + # autodoc generation is a bit aggressive and a nuisance when doing heavy # text edit cycles. # execute "export SPHINX_DEBUG=1" in your terminal to disable @@ -41,38 +47,7 @@ master_doc = 'index' # General information about the project. -project = u'python-openstacksdk' -copyright = u'2015, OpenStack Foundation' - -# The version info for the project you're documenting, acts as replacement for -# |version| and |release|, also used in various other places throughout the -# built documents. -# -# "version" and "release" are used by the "log-a-bug" feature -# -# The short X.Y version. -version = '1.0' -# The full version, including alpha/beta/rc tags. -release = '1.0' - -# A few variables have to be set for the log-a-bug feature. -# giturl: The location of conf.py on Git. Must be set manually. -# gitsha: The SHA checksum of the bug description. Extracted from git log. -# bug_tag: Tag for categorizing the bug. Must be set manually. -# bug_project: Launchpad project to file bugs against. -# These variables are passed to the logabug code via html_context. -giturl = u'http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/doc/source' -git_cmd = "/usr/bin/git log | head -n1 | cut -f2 -d' '" -gitsha = os.popen(git_cmd).read().strip('\n') -bug_tag = "docs" -# source tree -pwd = os.getcwd() -# html_context allows us to pass arbitrary values into the html template -html_context = {"pwd": pwd, - "gitsha": gitsha, - "bug_tag": bug_tag, - "giturl": giturl, - "bug_project": "python-openstacksdk"} +copyright = '2017, Various members of the OpenStack Foundation' # If true, '()' will be appended to :func: etc. cross-reference text. add_function_parentheses = True @@ -82,40 +57,41 @@ add_module_names = True # The name of the Pygments (syntax highlighting) style to use. -pygments_style = 'sphinx' +pygments_style = 'native' -autodoc_member_order = "bysource" +autodoc_member_order = 'bysource' + +# Include both the class and __init__ docstrings when describing the class +autoclass_content = 'both' + +# Don't document type hints as they're too noisy +autodoc_typehints = 'none' # Locations to exclude when looking for source files. exclude_patterns = [] # -- Options for HTML output ---------------------------------------------- -# The theme to use for HTML and HTML Help pages. See the documentation for -# a list of builtin themes. -html_theme = 'openstackdocs' - -# Add any paths that contain custom themes here, relative to this directory. -html_theme_path = [openstackdocstheme.get_html_theme_path()] - # Don't let openstackdocstheme insert TOCs automatically. theme_include_auto_toc = False -# Output file base name for HTML help builder. -htmlhelp_basename = '%sdoc' % project +# -- Options for LaTeX output --------------------------------------------- # Grouping the document tree into LaTeX files. List of tuples # (source start file, target name, title, author, documentclass # [howto/manual]). latex_documents = [ - ('index', - '%s.tex' % project, - u'%s Documentation' % project, - u'OpenStack Foundation', 'manual'), + ( + 'index', + 'doc-openstacksdk.tex', + 'OpenStackSDK Documentation', + 'OpenStack Foundation', + 'manual', + ), ] -# Example configuration for intersphinx: refer to the Python standard library. -intersphinx_mapping = {'http://docs.python.org/3/': None} +# Allow deeper levels of nesting for \begin...\end stanzas +latex_elements = {'maxlistdepth': 10} -# Include both the class and __init__ docstrings when describing the class -autoclass_content = "both" +# Disable usage of xindy https://bugzilla.redhat.com/show_bug.cgi?id=1643664 +latex_use_xindy = False diff --git a/doc/source/contributor/clouds.yaml b/doc/source/contributor/clouds.yaml new file mode 100644 index 0000000000..2c141c5e5d --- /dev/null +++ b/doc/source/contributor/clouds.yaml @@ -0,0 +1,37 @@ +clouds: + devstack: + auth: + auth_url: http://xxx.xxx.xxx.xxx/identity + password: password + project_domain_id: default + project_name: demo + user_domain_id: default + username: demo + identity_api_version: '3' + region_name: RegionOne + volume_api_version: '3' + devstack-admin: + auth: + auth_url: http://xxx.xxx.xxx.xxx/identity + password: password + project_domain_id: default + project_name: admin + user_domain_id: default + username: admin + identity_api_version: '3' + region_name: RegionOne + volume_api_version: '3' + devstack-alt: + auth: + auth_url: http://xxx.xxx.xxx.xxx/identity + password: password + project_domain_id: default + project_name: alt_demo + user_domain_id: default + username: alt_demo + identity_api_version: '3' + region_name: RegionOne + volume_api_version: '3' +example: + image_name: cirros-0.5.2-x86_64-disk + flavor_name: m1.small diff --git a/doc/source/contributor/coding.rst b/doc/source/contributor/coding.rst new file mode 100644 index 0000000000..e1640e46d8 --- /dev/null +++ b/doc/source/contributor/coding.rst @@ -0,0 +1,104 @@ +OpenStack SDK Developer Coding Standards +======================================== + +In the beginning, there were no guidelines. And it was good. But that +didn't last long. As more and more people added more and more code, +we realized that we needed a set of coding standards to make sure that +the *openstacksdk* API at least *attempted* to display some form of +consistency. + +Thus, these coding standards/guidelines were developed. Note that not +all of *openstacksdk* adheres to these standards just yet. Some older code has +not been updated because we need to maintain backward compatibility. +Some of it just hasn't been changed yet. But be clear, all new code +*must* adhere to these guidelines. + +Below are the patterns that we expect *openstacksdk* developers to follow. + + +Release Notes +------------- + +*openstacksdk* uses `reno `_ for +managing its release notes. A new release note should be added to +your contribution anytime you add new API calls, fix significant bugs, +add new functionality or parameters to existing API calls, or make any +other significant changes to the code base that we should draw attention +to for the user base. + +It is *not* necessary to add release notes for minor fixes, such as +correction of documentation typos, minor code cleanup or reorganization, +or any other change that a user would not notice through normal usage. + + +Exceptions +---------- + +Exceptions should NEVER be wrapped and re-raised inside of a new exception. +This removes important debug information from the user. All of the exceptions +should be raised correctly the first time. + + +openstack.cloud API Methods +--------------------------- + +The ``openstack.cloud`` layer has some specific rules: + +- When an API call acts on a resource that has both a unique ID and a + name, that API call should accept either identifier with a name_or_id + parameter. + +- All resources should adhere to the get/list/search interface that + control retrieval of those resources. E.g., ``get_image()``, + ``list_images()``, ``search_images()``. + +- Resources should have ``create_RESOURCE()``, ``delete_RESOURCE()``, + ``update_RESOURCE()`` API methods (as it makes sense). + +- For those methods that should behave differently for omitted or None-valued + parameters, use the ``_utils.valid_kwargs`` decorator. This includes all + Neutron ``update_*`` functions. + +- Deleting a resource should return True if the delete succeeded, or False + if the resource was not found. + +Returned Resources +~~~~~~~~~~~~~~~~~~ + +The ``openstack.cloud`` layer should rely on the proxy layer for the given +service. This will ensure complex objects returned to the caller are of +``openstack.resource.Resource`` type. + +Nova vs. Neutron +~~~~~~~~~~~~~~~~ + +- Recognize that not all cloud providers support Neutron, so never + assume it will be present. If a task can be handled by either + Neutron or Nova, code it to be handled by either. + +- For methods that accept either a Nova pool or Neutron network, the + parameter should just refer to the network, but documentation of it + should explain about the pool. See: ``create_floating_ip()`` and + ``available_floating_ip()`` methods. + + +Tests +----- + +- New API methods *must* have unit tests! + +- New unit tests should only mock at the REST layer using ``requests_mock``. + Any mocking of *openstacksdk* itself should be considered legacy and to be + avoided. Exceptions to this rule can be made when attempting to test the + internals of a logical shim where the inputs and output of the method aren't + actually impacted by remote content. + +- Functional tests should be added, when possible. + +- In functional tests, always use unique names (for resources that have this + attribute) and use it for clean up (see next point). + +- In functional tests, always define cleanup functions to delete data added + by your test, should something go wrong. Data removal should be wrapped in + a try except block and try to delete as many entries added by the test as + possible. diff --git a/doc/source/contributor/contributing.rst b/doc/source/contributor/contributing.rst new file mode 100644 index 0000000000..b1cd2f37dc --- /dev/null +++ b/doc/source/contributor/contributing.rst @@ -0,0 +1 @@ +.. include:: ../../../CONTRIBUTING.rst diff --git a/doc/source/contributor/create/examples/resource/fake.py b/doc/source/contributor/create/examples/resource/fake.py new file mode 100644 index 0000000000..b02175c12a --- /dev/null +++ b/doc/source/contributor/create/examples/resource/fake.py @@ -0,0 +1,26 @@ +# Apache 2 header omitted for brevity + +from openstack import resource + + +class Fake(resource.Resource): + resource_key = "resource" + resources_key = "resources" + base_path = "/fake" + + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_head = True + + #: The transaction date and time. + timestamp = resource.Header("x-timestamp") + #: The name of this resource. + name = resource.Body("name", alternate_id=True) + #: The value of the resource. Also available in headers. + value = resource.Body("value", alias="x-resource-value") + #: Is this resource cool? If so, set it to True. + #: This is a multi-line comment about cool stuff. + cool = resource.Body("cool", type=bool) diff --git a/doc/source/contributor/create/examples/resource/fake_service.py b/doc/source/contributor/create/examples/resource/fake_service.py new file mode 100644 index 0000000000..34000b9869 --- /dev/null +++ b/doc/source/contributor/create/examples/resource/fake_service.py @@ -0,0 +1,12 @@ +# Apache 2 header omitted for brevity + +from openstack import service_description +from openstack.fake.v2 import _proxy as _proxy_v2 + + +class FakeService(service_description.ServiceDescription): + """The fake service.""" + + supported_versions = { + '2': _proxy_v2.Proxy, + } diff --git a/doc/source/contributor/create/resource.rst b/doc/source/contributor/create/resource.rst new file mode 100644 index 0000000000..76b8954ea9 --- /dev/null +++ b/doc/source/contributor/create/resource.rst @@ -0,0 +1,186 @@ +.. TODO(shade) Update this guide. + +Creating a New Resource +======================= + +This guide will walk you through how to add resources for a service. + +Naming Conventions +------------------ + +Above all, names across this project conform to Python's naming standards, +as laid out in `PEP 8 `_. + +The relevant details we need to know are as follows: + + * Module names are lower case, and separated by underscores if more than + one word. For example, ``openstack.object_store`` + * Class names are capitalized, with no spacing, and each subsequent word is + capitalized in a name. For example, ``ServerMetadata``. + * Attributes on classes, including methods, are lower case and separated + by underscores. For example, ``allow_list`` or ``get_data``. + +Services +******** + +Services in the OpenStack SDK are named after their program name, not their +code name. For example, the project often known as "Nova" is always called +"compute" within this SDK. + +This guide walks through creating service for an OpenStack program called +"Fake". Following our guidelines, the code for its service would +live under the ``openstack.fake`` namespace. What follows is the creation +of a :class:`~openstack.resource.Resource` class for the "Fake" service. + +Resources +********* + +Resources are named after the server-side resource, which is set in the +``base_path`` attribute of the resource class. This guide creates a +resource class for the ``/fake`` server resource, so the resource module +is called ``fake.py`` and the class is called ``Fake``. + +An Example +---------- + +``openstack/fake/fake_service.py`` + +.. literalinclude:: examples/resource/fake_service.py + :language: Python + :linenos: + +``openstack/fake/v2/fake.py`` + +.. literalinclude:: examples/resource/fake.py + :language: Python + :linenos: + +``fake.Fake`` Attributes +------------------------ + +Each service's resources inherit from :class:`~openstack.resource.Resource`, +so they can override any of the base attributes to fit the way their +particular resource operates. + +``resource_key`` and ``resources_key`` +************************************** + +These attributes are set based on how your resource responds with data. +The default values for each of these are ``None``, which works fine +when your resource returns a JSON body that can be used directly without a +top-level key, such as ``{"name": "Ernie Banks", ...}"``. + +However, our ``Fake`` resource returns JSON bodies that have the details of +the resource one level deeper, such as +``{"resources": {"name": "Ernie Banks", ...}, {...}}``. It does a similar +thing with single resources, putting them inside a dictionary keyed on +``"resource"``. + +By setting ``Fake.resource_key`` on *line 8*, we tell the ``Resource.create``, +``Resource.get``, and ``Resource.update`` methods that we're either sending +or receiving a resource that is in a dictionary with that key. + +By setting ``Fake.resources_key`` on *line 9*, we tell the ``Resource.list`` +method that we're expecting to receive multiple resources inside a dictionary +with that key. + +``base_path`` +************* + +The ``base_path`` is the URL we're going to use to make requests for this +resource. In this case, *line 10* sets ``base_path = "/fake"``, which also +corresponds to the name of our class, ``Fake``. + +Most resources follow this basic formula. Some cases are more complex, where +the URL to make requests to has to contain some extra data. The volume service +has several resources which make either basic requests or detailed requests, +so they use ``base_path = "/volumes/%s(detailed)"``. Before a request is made, +if ``detailed = True``, they convert it to a string so the URL becomes +``/volumes/detailed``. If it's ``False``, they only send ``/volumes/``. + +``service`` +*********** + +*Line 11* is an instance of the service we're implementing. Each resource +ties itself to the service through this setting, so that the proper URL +can be constructed. + +In ``fake_service.py``, we specify the valid versions as well as what this +service is called in the service catalog. When a request is made for this +resource, the Session now knows how to construct the appropriate URL using +this ``FakeService`` instance. + +Supported Operations +-------------------- + +The base :class:`~openstack.resource.Resource` disallows all types of requests +by default, requiring each resource to specify which requests they support. +On *lines 14-19*, our ``Fake`` resource specifies that it'll work with all +of the operations. + +In order to have the following methods work, you must allow the corresponding +value by setting it to ``True``: + ++----------------------------------------------+----------------+ +| :class:`~openstack.resource.Resource.create` | allow_create | ++----------------------------------------------+----------------+ +| :class:`~openstack.resource.Resource.delete` | allow_delete | ++----------------------------------------------+----------------+ +| :class:`~openstack.resource.Resource.head` | allow_head | ++----------------------------------------------+----------------+ +| :class:`~openstack.resource.Resource.list` | allow_list | ++----------------------------------------------+----------------+ +| :class:`~openstack.resource.Resource.fetch` | allow_fetch | ++----------------------------------------------+----------------+ +| :class:`~openstack.resource.Resource.commit` | allow_commit | ++----------------------------------------------+----------------+ + +An additional attribute to set is ``commit_method``. It defaults to ``PUT``, +but some services use ``POST`` or ``PATCH`` to commit changes back to the +remote resource. + +Properties +---------- + +.. TODO(shade) Especially this section + +The way resource classes communicate values between the user and the server +are :class:`~openstack.resource.prop` objects. These act similarly to Python's +built-in property objects, but they share only the name - they're not the same. + +Properties are set based on the contents of a response body or headers. +Based on what your resource returns, you should set ``prop``\s to map +those values to ones on your :class:`~openstack.resource.Resource` object. + +*Line 22* sets a prop for ``timestamp`` , which will cause the +``Fake.timestamp`` attribute to contain the value returned in an +``X-Timestamp`` header, such as from a ``Fake.head`` request. + +*Line 24* sets a prop for ``name``, which is a value returned in a body, such +as from a ``Fake.get`` request. Note from *line 12* that ``name`` is +specified its ``id`` attribute, so when this resource +is populated from a response, ``Fake.name`` and ``Fake.id`` are the same +value. + +*Line 26* sets a prop which contains an alias. ``Fake.value`` will be set +when a response body contains a ``value``, or when a header contains +``X-Resource-Value``. + +*Line 28* specifies a type to be checked before sending the value in a request. +In this case, we can only set ``Fake.cool`` to either ``True`` or ``False``, +otherwise a TypeError will be raised if the value can't be converted to the +expected type. + +Documentation +------------- + +We use Sphinx's ``autodoc`` feature in order to build API documentation for +each resource we expose. The attributes we override from +:class:`~openstack.resource.Resource` don't need to be documented, but any +:class:`~openstack.resource.prop` attributes must be. All you need to do is +add a comment *above* the line to document, with a colon following the +pound-sign. + +*Lines 21, 23, 25, and 27-28* are comments which will then appear in the API +documentation. As shown in *lines 27 & 28*, these comments can span multiple +lines. diff --git a/doc/source/contributor/history.rst b/doc/source/contributor/history.rst new file mode 100644 index 0000000000..b2901ff3f3 --- /dev/null +++ b/doc/source/contributor/history.rst @@ -0,0 +1,49 @@ +A Brief History +=============== + +*openstacksdk* started its life as three different libraries: *shade*, +*os-client-config* and *python-openstacksdk*. + +*shade* + *shade* started its life as some code inside of OpenStack Infra's `nodepool`_ + project, and as some code inside of the `Ansible OpenStack Modules`_. + Ansible had a bunch of different OpenStack related modules, and there was a + ton of duplicated code. Eventually, between refactoring that duplication into + an internal library, and adding the logic and features that the OpenStack + Infra team had developed to run client applications at scale, it turned out + that we'd written nine-tenths of what we'd need to have a standalone library. + + Because of its background from nodepool, *shade* contained abstractions to + work around deployment differences and is resource oriented rather than + service oriented. This allows a user to think about Security Groups without + having to know whether Security Groups are provided by Nova or Neutron on a + given cloud. On the other hand, as an interface that provides an abstraction, + it deviates from the published OpenStack REST API and adds its own opinions, + which may not get in the way of more advanced users with specific needs. + +*os-client-config* + *os-client-config* was a library for collecting client configuration for + using an OpenStack cloud in a consistent and comprehensive manner, which + introduced the ``clouds.yaml`` file for expressing named cloud + configurations. + +*python-openstacksdk* + *python-openstacksdk* was a library that exposed the OpenStack APIs to + developers in a consistent and predictable manner. + +After a while it became clear that there was value in both the high-level +layer that contains additional business logic and the lower-level SDK that +exposes services and their resources faithfully and consistently as Python +objects. Even with both of those layers, it is still beneficial at times to be +able to make direct REST calls and to do so with the same properly configured +`Session`_ from `python-requests`_. This led to the merge of the three +projects. + +The original contents of the *shade* library have been moved into +``openstack.cloud`` and *os-client-config* has been moved in to +``openstack.config``. + +.. _nodepool: https://docs.openstack.org/infra/nodepool/ +.. _Ansible OpenStack Modules: http://docs.ansible.com/ansible/latest/list_of_cloud_modules.html#openstack +.. _Session: http://docs.python-requests.org/en/master/user/advanced/#session-objects +.. _python-requests: http://docs.python-requests.org/en/master/ diff --git a/doc/source/contributor/index.rst b/doc/source/contributor/index.rst new file mode 100644 index 0000000000..d25021d008 --- /dev/null +++ b/doc/source/contributor/index.rst @@ -0,0 +1,112 @@ +Contributing to the OpenStack SDK +================================= + +This section of documentation pertains to those who wish to contribute to the +development of this SDK. If you're looking for documentation on how to use +the SDK to build applications, refer to the `user <../user>`_ section. + +About the Project +----------------- + +The OpenStack SDK is a OpenStack project aimed at providing a complete +software development kit for the programs which make up the OpenStack +community. It is a Python library with corresponding documentation, +examples, and tools released under the Apache 2 license. + +.. toctree:: + :maxdepth: 2 + + history + +Contribution Mechanics +---------------------- + +.. toctree:: + :maxdepth: 2 + + contributing + +Contacting the Developers +------------------------- + +IRC +~~~ + +The developers of this project are available in the `#openstack-sdks`__ channel +on OFTC IRC. This channel includes conversation on SDKs and tools within the +general OpenStack community, including OpenStackClient as well as occasional +talk about SDKs created for languages outside of Python. + +.. __: http://webchat.oftc.net?channels=%23openstack-sdks + +Email +~~~~~ + +The `openstack-discuss`__ mailing list fields questions of all types on +OpenStack. Using the ``[sdk]`` filter to begin your email subject will ensure +that the message gets to SDK developers. + +.. __: mailto:openstack-discuss@lists.openstack.org?subject=[sdk]%20Question%20about%20openstacksdk + +Coding Standards +---------------- + +We are a bit stricter than usual in the coding standards department. It's a +good idea to read through the :doc:`coding ` section. + +.. toctree:: + :maxdepth: 2 + + coding + +Development Environment +----------------------- + +The first step towards contributing code and documentation is to setup your +development environment. We use a pretty standard setup, but it is fully +documented in our :doc:`setup ` section. + +.. toctree:: + :maxdepth: 2 + + setup + +Testing +------- + +The project contains two test packages, one for unit tests and one for +functional tests. The ``openstack.tests.unit`` package tests the SDK's features +in isolation. The ``openstack.tests.functional`` package tests the SDK's +features and examples against an OpenStack cloud. + +.. toctree:: + + testing + +Project Layout +-------------- + +The project contains a top-level ``openstack`` package, which houses several +modules that form the foundation upon which each service's API is built on. +Under the ``openstack`` package are packages for each of those services, +such as ``openstack.compute``. + +.. toctree:: + + layout + +Adding Features +--------------- + +Does this SDK not do what you need it to do? Is it missing a service? Are you +a developer on another project who wants to add their service? You're in the +right place. Below are examples of how to add new features to the +OpenStack SDK. + +.. toctree:: + :maxdepth: 2 + + create/resource + +.. TODO(briancurtin): document how to create a proxy +.. TODO(briancurtin): document how to create auth plugins diff --git a/doc/source/contributor/layout.rst b/doc/source/contributor/layout.rst new file mode 100644 index 0000000000..b91d359855 --- /dev/null +++ b/doc/source/contributor/layout.rst @@ -0,0 +1,104 @@ +How the SDK is organized +======================== + +The following diagram shows how the project is laid out. + +.. literalinclude:: layout.txt + +Resource +-------- + +The :class:`openstack.resource.Resource` base class is the building block +of any service implementation. ``Resource`` objects correspond to the +resources each service's REST API works with, so the +:class:`openstack.compute.v2.server.Server` subclass maps to the compute +service's ``https://openstack:1234/v2/servers`` resource. + +The base ``Resource`` contains methods to support the typical +`CRUD `_ +operations supported by REST APIs, and handles the construction of URLs +and calling the appropriate HTTP verb on the given ``Adapter``. + +Values sent to or returned from the service are implemented as attributes +on the ``Resource`` subclass with type :class:`openstack.resource.prop`. +The ``prop`` is created with the exact name of what the API expects, +and can optionally include a ``type`` to be validated against on requests. +You should choose an attribute name that follows PEP-8, regardless of what +the server-side expects, as this ``prop`` becomes a mapping between the two.:: + + is_public = resource.prop('os-flavor-access:is_public', type=bool) + +There are six additional attributes which the ``Resource`` class checks +before making requests to the REST API. ``allow_create``, ``allow_retreive``, +``allow_commit``, ``allow_delete``, ``allow_head``, and ``allow_list`` are set +to ``True`` or ``False``, and are checked before making the corresponding +method call. + +The ``base_path`` attribute should be set to the URL which corresponds to +this resource. Many ``base_path``\s are simple, such as ``"/servers"``. +For ``base_path``\s which are composed of non-static information, Python's +string replacement is used, e.g., ``base_path = "/servers/%(server_id)s/ips"``. + +``resource_key`` and ``resources_key`` are attributes to set when a +``Resource`` returns more than one item in a response, or otherwise +requires a key to obtain the response value. For example, the ``Server`` +class sets ``resource_key = "server"`` as an individual ``Server`` is +stored in a dictionary keyed with the singular noun, +and ``resources_key = "servers"`` as multiple ``Server``\s are stored in +a dictionary keyed with the plural noun in the response. + +Proxy +----- + +Each service implements a ``Proxy`` class based on +:class:`~openstack.proxy.Proxy`, within the +``openstack//vX/_proxy.py`` module. For example, the v2 compute +service's ``Proxy`` exists in ``openstack/compute/v2/_proxy.py``. + +The :class:`~openstack.proxy.Proxy` class is based on +:class:`~keystoneauth1.adapter.Adapter`. + +.. autoclass:: openstack.proxy.Proxy + :members: + :show-inheritance: + +Each service's ``Proxy`` provides a higher-level interface for users to work +with via a :class:`~openstack.connection.Connection` instance. + +Rather than requiring users to maintain their own ``Adapter`` and work with +lower-level :class:`~openstack.resource.Resource` objects, the ``Proxy`` +interface offers a place to make things easier for the caller. + +Each ``Proxy`` class implements methods which act on the underlying +``Resource`` classes which represent the service. For example:: + + def list_flavors(self, **params): + return flavor.Flavor.list(self.session, **params) + +This method is operating on the ``openstack.compute.v2.flavor.Flavor.list`` +method. For the time being, it simply passes on the ``Adapter`` maintained +by the ``Proxy``, and returns what the underlying ``Resource.list`` method +does. + +Cloud +----- + +.. todo + +TODO. + +Connection +---------- + +The :class:`openstack.connection.Connection` class builds atop a +:class:`openstack.config.cloud_region.CloudRegion` object, and provides a +higher level interface constructed of ``Proxy`` objects from each of the +services. + +The ``Connection`` class' primary purpose is to act as a high-level interface +to this SDK, managing the lower level connection bits and exposing the +``Resource`` objects through their corresponding `Proxy`_ object. + +If you've built proper ``Resource`` objects and implemented methods on the +corresponding ``Proxy`` object, the high-level interface to your service +should now be exposed. diff --git a/doc/source/contributors/layout.txt b/doc/source/contributor/layout.txt similarity index 93% rename from doc/source/contributors/layout.txt rename to doc/source/contributor/layout.txt index 2dd7121d5b..eeffbac87b 100644 --- a/doc/source/contributors/layout.txt +++ b/doc/source/contributor/layout.txt @@ -1,7 +1,6 @@ openstack/ connection.py resource.py - session.py compute/ compute_service.py v2/ diff --git a/doc/source/contributor/setup.rst b/doc/source/contributor/setup.rst new file mode 100644 index 0000000000..36c5aa13bf --- /dev/null +++ b/doc/source/contributor/setup.rst @@ -0,0 +1,116 @@ +Creating a Development Environment +================================== + +Required Tools +-------------- + +Python +~~~~~~ + +As the OpenStack SDK is developed in Python, you will need at least one +version of Python installed. Our continuous integration system runs against +several versions, so ultimately we will have the proper test coverage, but +having multiple versions locally results in less time spent in code review when +changes unexpectedly break other versions. + +Python can be downloaded from https://www.python.org/downloads. + +virtualenv +~~~~~~~~~~ + +In order to isolate our development environment from the system-based Python +installation, we use `virtualenv `_. +This allows us to install all of our necessary dependencies without +interfering with anything else, and preventing others from interfering with us. +Virtualenv must be installed on your system in order to use it, and it can be +had from PyPI, via pip, as follows. Note that you may need to run this +as an administrator in some situations.:: + + $ apt-get install python3-virtualenv # Debian based platforms + $ dnf install python3-virtualenv # Red Hat based platforms + $ pip install virtualenv # Mac OS X and other platforms + +You can create a virtualenv in any location. A common usage is to store all +of your virtualenvs in the same place, such as under your home directory. +To create a virtualenv for the default Python, run the following:: + + $ virtualenv $HOME/envs/sdk + +To create an environment for a different version, run the following:: + + $ virtualenv -p python3 $HOME/envs/sdk3 + +When you want to enable your environment so that you can develop inside of it, +you *activate* it. To activate an environment, run the /bin/activate +script inside of it, like the following:: + + $ source $HOME/envs/sdk3/bin/activate + (sdk3)$ + +Once you are activated, you will see the environment name in front of your +command prompt. In order to exit that environment, run the ``deactivate`` +command. + +tox +~~~ + +We use `tox `_ as our test runner, +which allows us to run the same test commands against multiple versions +of Python. Inside any of the virtualenvs you use for working on the SDK, +run the following to install ``tox`` into it.:: + + (sdk3)$ pip install tox + +Git +~~~ + +The source of the OpenStack SDK is stored in Git. In order to work with our +source repository, you must have Git installed on your system. If your +system has a package manager, it can likely be had from there. If not, +you can find downloads or the source at http://git-scm.com. + +Getting the Source Code +----------------------- + +.. TODO(briancurtin): We should try and distill the following document + into the minimally necessary parts to include directly in this section. + I've talked to several people who are discouraged by that large of a + document to go through before even getting into the project they want + to work on. I don't want that to happen to us because we have the potential + to be more public facing than a lot of other projects. + +.. note:: Before checking out the code, please read the OpenStack + `Developer's Guide `_ + for details on how to use the continuous integration and code + review systems that we use. + +The canonical Git repository is hosted on opendev.org at +http://opendev.org/openstack/openstacksdk/:: + + (sdk3)$ git clone https://opendev.org/openstack/openstacksdk + (sdk3)$ cd openstacksdk + +Installing Dependencies +----------------------- + +In order to work with the SDK locally, such as in the interactive interpreter +or to run example scripts, you need to install the project's dependencies.:: + + (sdk3)$ pip install -r requirements.txt + +After the downloads and installs are complete, you'll have a fully functional +environment to use the SDK in. + +Building the Documentation +-------------------------- + +Our documentation is written in reStructured Text and is built using +Sphinx. A ``docs`` command is available in our ``tox.ini``, allowing you +to build the documentation like you'd run tests. The ``docs`` command is +not evaluated by default.:: + + (sdk3)$ tox -e docs + +That command will cause the documentation, which lives in the ``docs`` folder, +to be built. HTML output is the most commonly referenced, which is located +in ``docs/build/html``. diff --git a/doc/source/contributor/testing.rst b/doc/source/contributor/testing.rst new file mode 100644 index 0000000000..91041c7d10 --- /dev/null +++ b/doc/source/contributor/testing.rst @@ -0,0 +1,108 @@ +Testing +======= + +The tests are run with `tox `_ and +configured in ``tox.ini``. The test results are tracked by +`stestr `_ and configured +in ``.stestr.conf`` and via command line options passed to the ``stestr`` +executable when it's called by ``tox``. + + +Unit Tests +---------- + +Running tests +~~~~~~~~~~~~~ + +In order to run the entire unit test suite, simply run the ``tox`` command +inside of your source checkout. This will attempt to run every test command +listed inside of ``tox.ini``, which includes Python 3.x, and a PEP 8 check. +You should run the full test suite on all versions before +submitting changes for review in order to avoid unexpected failures in the +continuous integration system.:: + + $ tox + ... + py3: commands succeeded + pep8: commands succeeded + congratulations :) + +During development, it may be more convenient to run a subset of the tests +to keep test time to a minimum. You can choose to run the tests only on one +version. A step further is to run only the tests you are working on.:: + + # Run run the tests on Python 3.13 + $ tox -e py313 + # Run only the compute unit tests on Python 3.13 + $ tox -e py313 openstack.tests.unit.compute + # Run only the tests in a specific file on Python 3.13 + $ tox -e py313 -- -n openstack/tests/unit/compute/test_version.py + + +Functional Tests +---------------- + +The functional tests assume that you have a public or private OpenStack cloud +that you can run the tests against. The tests must be able to be run against +public clouds but first and foremost they must be run against OpenStack. In +practice, this means that the tests should initially be run against a stable +branch of `DevStack `_. + +Configuration +~~~~~~~~~~~~~ + +To connect the functional tests to an OpenStack cloud we require a +``clouds.yaml`` file, as discussed in :doc:`/user/config/configuration`. +You can place this ``clouds.yaml`` file in the root of your source checkout or +in one of the other standard locations, ``$HOME/.config/openstack`` or +``/etc/openstack``. + +There must be at least three clouds configured, or rather three accounts +configured for the one cloud. These accounts are: + +- An admin account, which defaults to ``devstack-admin`` but is configurable + via the ``OPENSTACKSDK_OPERATOR_CLOUD`` environment variable, +- A user account, which defaults to ``devstack`` but is configurable + via the ``OPENSTACKSDK_DEMO_CLOUD`` environment variable, and +- An alternate user account, which defaults to ``devstack-demo`` but is + configurable via the ``OPENSTACKSDK_DEMO_CLOUD_ALT`` environment variable + +In addition, you must indicate the names of the flavor and image that should be +used for tests. These can be configured via ``OPENSTACKSDK_FLAVOR`` and +``OPENSTACKSDK_IMAGE`` environment variables or ``functional.flavor_name`` and +``functional.image_name`` settings in the ``clouds.yaml`` file, respectively. + +Finally, you can configure the timeout for tests using the +``OPENSTACKSDK_FUNC_TEST_TIMEOUT`` environment variable (defaults to 300 +seconds). Some test modules take specific timeout values. For example, all +tests in ``openstack.tests.functional.compute`` will check for the +``OPENSTACKSDK_FUNC_TEST_TIMEOUT_COMPUTE`` environment variable before checking +for ``OPENSTACKSDK_FUNC_TEST_TIMEOUT``. + +.. note:: + + Recent versions of DevStack will configure a suitable ``clouds.yaml`` file + for you, which will be placed at ``/etc/openstack/clouds.yaml``. + +This is an example of a minimal configuration for a ``clouds.yaml`` that +connects the functional tests to a DevStack instance. + +.. literalinclude:: clouds.yaml + :language: yaml + +Replace ``xxx.xxx.xxx.xxx`` with the IP address or FQDN of your DevStack +instance. + +Running tests +~~~~~~~~~~~~~ + +Functional tests are also run against multiple Python versions. In order to run +the entire functional test suite against the default Python 3 version in your +environment, run the ``tox -e functional`` command inside of your source +checkout. This will attempt to run every tests in the +``openstack/tests/functional`` directory. For example:: + + $ tox -e functional + ... + functional: commands succeeded + congratulations :) diff --git a/doc/source/contributors/clouds.yaml b/doc/source/contributors/clouds.yaml deleted file mode 100644 index 04cd4aaf31..0000000000 --- a/doc/source/contributors/clouds.yaml +++ /dev/null @@ -1,19 +0,0 @@ -clouds: - test_cloud: - region_name: RegionOne - auth: - auth_url: http://xxx.xxx.xxx.xxx:5000/v2.0/ - username: demo - password: secrete - project_name: demo - example: - image_name: fedora-20.x86_64 - flavor_name: m1.small - network_name: private - rackspace: - cloud: rackspace - auth: - username: joe - password: joes-password - project_name: 123123 - region_name: IAD diff --git a/doc/source/contributors/create/examples/resource/fake.py b/doc/source/contributors/create/examples/resource/fake.py deleted file mode 100644 index e87416f09d..0000000000 --- a/doc/source/contributors/create/examples/resource/fake.py +++ /dev/null @@ -1,29 +0,0 @@ -# Apache 2 header omitted for brevity - -from openstack.fake import fake_service -from openstack import resource - - -class Fake(resource.Resource): - resource_key = "resource" - resources_key = "resources" - base_path = "/fake" - service = fake_service.FakeService() - id_attribute = "name" - - allow_create = True - allow_retrieve = True - allow_update = True - allow_delete = True - allow_list = True - allow_head = True - - #: The transaction date and time. - timestamp = resource.prop("x-timestamp") - #: The name of this resource. - name = resource.prop("name") - #: The value of the resource. Also available in headers. - value = resource.prop("value", alias="x-resource-value") - #: Is this resource cool? If so, set it to True. - #: This is a multi-line comment about cool stuff. - cool = resource.prop("cool", type=bool) diff --git a/doc/source/contributors/create/examples/resource/fake_service.py b/doc/source/contributors/create/examples/resource/fake_service.py deleted file mode 100644 index 9524b967db..0000000000 --- a/doc/source/contributors/create/examples/resource/fake_service.py +++ /dev/null @@ -1,13 +0,0 @@ -# Apache 2 header omitted for brevity - -from openstack import service_filter - - -class FakeService(service_filter.ServiceFilter): - """The fake service.""" - - valid_versions = [service_filter.ValidVersion('v2')] - - def __init__(self, version=None): - """Create a fake service.""" - super(FakeService, self).__init__(service_type='fake', version=version) diff --git a/doc/source/contributors/create/resource.rst b/doc/source/contributors/create/resource.rst deleted file mode 100644 index f4552b53d8..0000000000 --- a/doc/source/contributors/create/resource.rst +++ /dev/null @@ -1,191 +0,0 @@ -Creating a New Resource -======================= - -This guide will walk you through how to add resources for a service. - -Naming Conventions ------------------- - -Above all, names across this project conform to Python's naming standards, -as laid out in `PEP 8 `_. - -The relevant details we need to know are as follows: - - * Module names are lower case, and separated by underscores if more than - one word. For example, ``openstack.object_store`` - * Class names are capitalized, with no spacing, and each subsequent word is - capitalized in a name. For example, ``ServerMetadata``. - * Attributes on classes, including methods, are lower case and separated - by underscores. For example, ``allow_list`` or ``get_data``. - -Services -******** - -Services in the OpenStack SDK are named after their program name, not their -code name. For example, the project often known as "Nova" is always called -"compute" within this SDK. - -This guide walks through creating service for an OpenStack program called -"Fake". Following our guidelines, the code for its service would -live under the ``openstack.fake`` namespace. What follows is the creation -of a :class:`~openstack.resource.Resource` class for the "Fake" service. - -Resources -********* - -Resources are named after the server-side resource, which is set in the -``base_path`` attribute of the resource class. This guide creates a -resouce class for the ``/fake`` server resource, so the resource module -is called ``fake.py`` and the class is called ``Fake``. - -An Example ----------- - -``openstack/fake/fake_service.py`` - -.. literalinclude:: examples/resource/fake_service.py - :language: Python - :linenos: - -``openstack/fake/v2/fake.py`` - -.. literalinclude:: examples/resource/fake.py - :language: Python - :linenos: - -``fake.Fake`` Attributes ------------------------- - -Each service's resources inherit from :class:`~openstack.resource.Resource`, -so they can override any of the base attributes to fit the way their -particular resource operates. - -``resource_key`` and ``resources_key`` -************************************** - -These attributes are set based on how your resource responds with data. -The default values for each of these are ``None``, which works fine -when your resource returns a JSON body that can be used directly without a -top-level key, such as ``{"name": "Ernie Banks", ...}"``. - -However, our ``Fake`` resource returns JSON bodies that have the details of -the resource one level deeper, such as -``{"resources": {"name": "Ernie Banks", ...}, {...}}``. It does a similar -thing with single resources, putting them inside a dictionary keyed on -``"resource"``. - -By setting ``Fake.resource_key`` on *line 8*, we tell the ``Resource.create``, -``Resource.get``, and ``Resource.update`` methods that we're either sending -or receiving a resource that is in a dictionary with that key. - -By setting ``Fake.resources_key`` on *line 9*, we tell the ``Resource.list`` -method that we're expecting to receive multiple resources inside a dictionary -with that key. - -``base_path`` -************* - -The ``base_path`` is the URL we're going to use to make requests for this -resource. In this case, *line 10* sets ``base_path = "/fake"``, which also -corresponds to the name of our class, ``Fake``. - -Most resources follow this basic formula. Some cases are more complex, where -the URL to make requests to has to contain some extra data. The volume service -has several resources which make either basic requests or detailed requests, -so they use ``base_path = "/volumes/%s(detailed)"``. Before a request is made, -if ``detailed = True``, they convert it to a string so the URL becomes -``/volumes/detailed``. If it's ``False``, they only send ``/volumes/``. - -``service`` -*********** - -*Line 11* is an instance of the service we're implementing. Each resource -ties itself to the service through this setting, so that the proper URL -can be constructed. - -In ``fake_service.py``, we specify the valid versions as well as what this -service is called in the service catalog. When a request is made for this -resource, the Session now knows how to construct the appropriate URL using -this ``FakeService`` instance. - -``id_attribute`` -**************** - -*Line 12* specifies that this resource uses a different identifier than -the default of ``id``. While IDs are used internally, such as for creating -request URLs to interact with an individual resource, they are exposed for -consistency so users always have one place to find the resource's identity. - -Supported Operations --------------------- - -The base :class:`~openstack.resource.Resource` disallows all types of requests -by default, requiring each resource to specify which requests they support. -On *lines 14-19*, our ``Fake`` resource specifies that it'll work with all -of the operations. - -In order to have the following methods work, you must allow the corresponding -value by setting it to ``True``: - -+----------------------------------------------+----------------+ -| :class:`~openstack.resource.Resource.create` | allow_create | -+----------------------------------------------+----------------+ -| :class:`~openstack.resource.Resource.delete` | allow_delete | -+----------------------------------------------+----------------+ -| :class:`~openstack.resource.Resource.head` | allow_head | -+----------------------------------------------+----------------+ -| :class:`~openstack.resource.Resource.list` | allow_list | -+----------------------------------------------+----------------+ -| :class:`~openstack.resource.Resource.get` | allow_retrieve | -+----------------------------------------------+----------------+ -| :class:`~openstack.resource.Resource.update` | allow_update | -+----------------------------------------------+----------------+ - -An additional attribute to set is ``put_update`` if your service uses ``PUT`` -requests in order to update a resource. By default, ``PATCH`` requests are -used for ``Resource.update``. - -Properties ----------- - -The way resource classes communicate values between the user and the server -are :class:`~openstack.resource.prop` objects. These act similarly to Python's -built-in property objects, but they share only the name - they're not the same. - -Properties are set based on the contents of a response body or headers. -Based on what your resource returns, you should set ``prop``\s to map -those those values to ones on your :class:`~openstack.resource.Resource` -object. - -*Line 22* sets a prop for ``timestamp`` , which will cause the -``Fake.timestamp`` attribute to contain the value returned in an -``X-Timestamp`` header, such as from a ``Fake.head`` request. - -*Line 24* sets a prop for ``name``, which is a value returned in a body, such -as from a ``Fake.get`` request. Note from *line 12* that ``name`` is -specified its ``id`` attribute, so when this resource -is populated from a response, ``Fake.name`` and ``Fake.id`` are the same -value. - -*Line 26* sets a prop which contains an alias. ``Fake.value`` will be set -when a response body contains a ``value``, or when a header contains -``X-Resource-Value``. - -*Line 28* specifies a type to be checked before sending the value in a request. -In this case, we can only set ``Fake.cool`` to either ``True`` or ``False``, -otherwise a TypeError will be raised if the value can't be converted to the -expected type. - -Documentation -------------- - -We use Sphinx's ``autodoc`` feature in order to build API documentation for -each resource we expose. The attributes we override from -:class:`~openstack.resource.Resource` don't need to be documented, but any -:class:`~openstack.resource.prop` attributes must be. All you need to do is -add a comment *above* the line to document, with a colon following the -pound-sign. - -*Lines 21, 23, 25, and 27-28* are comments which will then appear in the API -documentation. As shown in *lines 27 & 28*, these comments can span multiple -lines. diff --git a/doc/source/contributors/index.rst b/doc/source/contributors/index.rst deleted file mode 100644 index 8b246a2452..0000000000 --- a/doc/source/contributors/index.rst +++ /dev/null @@ -1,87 +0,0 @@ -Contributing to the OpenStack SDK -================================= - -This section of documentation pertains to those who wish to contribute to the -development of this SDK. If you're looking for documentation on how to use -the SDK to build applications, please see the `user <../users>`_ section. - -About the Project ------------------ - -The OpenStack SDK is a OpenStack project aimed at providing a complete -software development kit for the programs which make up the OpenStack -community. It is a set of Python-based libraries, documentation, examples, -and tools released under the Apache 2 license. - -Contacting the Developers -------------------------- - -IRC -*** - -The developers of this project are available in the -`#openstack-sdks `_ -channel on Freenode. This channel includes conversation on SDKs and tools -within the general OpenStack community, including OpenStackClient as well -as occasional talk about SDKs created for languages outside of Python. - -Email -***** - -The `openstack-dev `_ -mailing list fields questions of all types on OpenStack. Using the -``[python-openstacksdk]`` filter to begin your email subject will ensure -that the message gets to SDK developers. - -Development Environment ------------------------ - -The first step towards contributing code and documentation is to setup your -development environment. We use a pretty standard setup, but it is fully -documented in our :doc:`setup ` section. - -.. toctree:: - :maxdepth: 2 - - setup - -Testing -------- - -The project contains three test packages, one for unit tests, one for -functional tests and one for examples tests. The ``openstack.tests.unit`` -package tests the SDK's features in isolation. The ``openstack.tests.functional`` -and ``openstack.tests.examples`` packages test the SDK's features and examples -against an OpenStack cloud. - -.. toctree:: - - testing - -Project Layout --------------- - -The project contains a top-level ``openstack`` package, which houses several -modules that form the foundation upon which each service's API is built on. -Under the ``openstack`` package are packages for each of those services, -such as ``openstack.compute``. - -.. toctree:: - - layout - -Adding Features ---------------- - -Does this SDK not do what you need it to do? Is it missing a service? Are you -a developer on another project who wants to add their service? You're in the -right place. Below are examples of how to add new features to the -OpenStack SDK. - -.. toctree:: - :maxdepth: 2 - - create/resource - -.. TODO(briancurtin): document how to create a proxy -.. TODO(briancurtin): document how to create auth plugins diff --git a/doc/source/contributors/layout.rst b/doc/source/contributors/layout.rst deleted file mode 100644 index c221bc7930..0000000000 --- a/doc/source/contributors/layout.rst +++ /dev/null @@ -1,101 +0,0 @@ -How the SDK is organized -======================== - -The following diagram shows how the project is laid out. - -.. literalinclude:: layout.txt - -Session -------- - -The :class:`openstack.session.Session` manages an authenticator, -transport, and user profile. It exposes methods corresponding to -HTTP verbs, and injects your authentication token into a request, -determines any service preferences callers may have set, gets the endpoint -from the authenticator, and sends the request out through the transport. - -Resource --------- - -The :class:`openstack.resource.Resource` base class is the building block -of any service implementation. ``Resource`` objects correspond to the -resources each service's REST API works with, so the -:class:`openstack.compute.v2.server.Server` subclass maps to the compute -service's ``https://openstack:1234/v2/servers`` resource. - -The base ``Resource`` contains methods to support the typical -`CRUD `_ -operations supported by REST APIs, and handles the construction of URLs -and calling the appropriate HTTP verb on the given ``Session``. - -Values sent to or returned from the service are implemented as attributes -on the ``Resource`` subclass with type :class:`openstack.resource.prop`. -The ``prop`` is created with the exact name of what the API expects, -and can optionally include a ``type`` to be validated against on requests. -You should choose an attribute name that follows PEP-8, regardless of what -the server-side expects, as this ``prop`` becomes a mapping between the two.:: - - is_public = resource.prop('os-flavor-access:is_public', type=bool) - -There are six additional attributes which the ``Resource`` class checks -before making requests to the REST API. ``allow_create``, ``allow_retreive``, -``allow_update``, ``allow_delete``, ``allow_head``, and ``allow_list`` are set -to ``True`` or ``False``, and are checked before making the corresponding -method call. - -The ``base_path`` attribute should be set to the URL which corresponds to -this resource. Many ``base_path``\s are simple, such as ``"/servers"``. -For ``base_path``\s which are composed of non-static information, Python's -string replacement is used, e.g., ``base_path = "/servers/%(server_id)s/ips"``. - -``resource_key`` and ``resources_key`` are attributes to set when a -``Resource`` returns more than one item in a response, or otherwise -requires a key to obtain the response value. For example, the ``Server`` -class sets ``resource_key = "server"`` as an individual ``Server`` is -stored in a dictionary keyed with the singular noun, -and ``resource_keys = "servers"`` as multiple ``Server``\s are stored in -a dictionary keyed with the plural noun in the response. - -Proxy ------ - -Each service implements a ``Proxy`` class, within the -``openstack//vX/_proxy.py`` module. For example, the v2 compute -service's ``Proxy`` exists in ``openstack/compute/v2/_proxy.py``. - -This ``Proxy`` class manages a :class:`~openstack.sessions.Session` and -provides a higher-level interface for users to work with via a -:class:`~openstack.connection.Connection` instance. Rather than requiring -users to maintain their own session and work with lower-level -:class:`~openstack.resource.Resource` objects, the ``Proxy`` interface -offers a place to make things easier for the caller. - -Each ``Proxy`` class implements methods which act on the underlying -``Resource`` classes which represent the service. For example:: - - def list_flavors(self, **params): - return flavor.Flavor.list(self.session, **params) - -This method is operating on the ``openstack.compute.v2.flavor.Flavor.list`` -method. For the time being, it simply passes on the ``Session`` maintained -by the ``Proxy``, and returns what the underlying ``Resource.list`` method -does. - -The implementations and method signatures of ``Proxy`` methods are currently -under construction, as we figure out the best way to implement them in a -way which will apply nicely across all of the services. - -Connection ----------- - -The :class:`openstack.connection.Connection` class builds atop a ``Session`` -object, and provides a higher level interface constructed of ``Proxy`` -objects from each of the services. - -The ``Connection`` class' primary purpose is to act as a high-level interface -to this SDK, managing the lower level connecton bits and exposing the -``Resource`` objects through their corresponding `Proxy`_ object. - -If you've built proper ``Resource`` objects and implemented methods on the -corresponding ``Proxy`` object, the high-level interface to your service -should now be exposed. diff --git a/doc/source/contributors/local.conf b/doc/source/contributors/local.conf deleted file mode 100644 index d471162176..0000000000 --- a/doc/source/contributors/local.conf +++ /dev/null @@ -1,66 +0,0 @@ -[[local|localrc]] -# Configure passwords and the Swift Hash -MYSQL_PASSWORD=DEVSTACK_PASSWORD -RABBIT_PASSWORD=DEVSTACK_PASSWORD -SERVICE_TOKEN=DEVSTACK_PASSWORD -ADMIN_PASSWORD=DEVSTACK_PASSWORD -SERVICE_PASSWORD=DEVSTACK_PASSWORD -SWIFT_HASH=DEVSTACK_PASSWORD - -# Configure the stable OpenStack branches used by DevStack -# For stable branches see -# http://git.openstack.org/cgit/openstack-dev/devstack/refs/ -CINDER_BRANCH=stable/OPENSTACK_VERSION -CEILOMETER_BRANCH=stable/OPENSTACK_VERSION -GLANCE_BRANCH=stable/OPENSTACK_VERSION -HEAT_BRANCH=stable/OPENSTACK_VERSION -HORIZON_BRANCH=stable/OPENSTACK_VERSION -KEYSTONE_BRANCH=stable/OPENSTACK_VERSION -NEUTRON_BRANCH=stable/OPENSTACK_VERSION -NOVA_BRANCH=stable/OPENSTACK_VERSION -SWIFT_BRANCH=stable/OPENSTACK_VERSION -ZAQAR_BRANCH=stable/OPENSTACK_VERSION - -# Enable Swift -enable_service s-proxy -enable_service s-object -enable_service s-container -enable_service s-account - -# Disable Nova Network and enable Neutron -disable_service n-net -enable_service q-svc -enable_service q-agt -enable_service q-dhcp -enable_service q-l3 -enable_service q-meta -enable_service q-metering - -# Enable Ceilometer -enable_service ceilometer-acompute -enable_service ceilometer-acentral -enable_service ceilometer-anotification -enable_service ceilometer-collector -enable_service ceilometer-alarm-evaluator -enable_service ceilometer-alarm-notifier -enable_service ceilometer-api - -# Enable Zaqar -enable_plugin zaqar https://github.com/openstack/zaqar -enable_service zaqar-server - -# Enable Heat -enable_service h-eng -enable_service h-api -enable_service h-api-cfn -enable_service h-api-cw - -# Automatically download and register a VM image that Heat can launch -# For more information on Heat and DevStack see -# http://docs.openstack.org/developer/heat/getting_started/on_devstack.html -IMAGE_URLS+=",http://cloud.fedoraproject.org/fedora-20.x86_64.qcow2" - -# Logging -LOGDAYS=1 -LOGFILE=/opt/stack/logs/stack.sh.log -LOGDIR=/opt/stack/logs diff --git a/doc/source/contributors/setup.rst b/doc/source/contributors/setup.rst deleted file mode 100644 index 628b685b2c..0000000000 --- a/doc/source/contributors/setup.rst +++ /dev/null @@ -1,123 +0,0 @@ -Creating a Development Environment -================================== - -Required Tools --------------- - -Python -****** - -As the OpenStack SDK is developed in Python, you will need at least one -version of Python installed. It is strongly preferred that you have at least -one of version 2 and one of version 3 so that your tests are run against both. -Our continuous integration system runs against several versions, so ultimately -we will have the proper test coverage, but having multiple versions locally -results in less time spent in code review when changes unexpectedly break -other versions. - -Python can be downloaded from https://www.python.org/downloads. - -virtualenv -********** - -In order to isolate our development environment from the system-based Python -installation, we use `virtualenv `_. -This allows us to install all of our necessary dependencies without -interfering with anything else, and preventing others from interfering with us. -Virtualenv must be installed on your system in order to use it, and it can be -had from PyPI, via pip, as follows. Note that you may need to run this -as an administrator in some situations.:: - - $ apt-get install python-virtualenv # Debian based platforms - $ yum install python-virtualenv # Red Hat based platforms - $ pip install virtualenv # Mac OS X and other platforms - -You can create a virtualenv in any location. A common usage is to store all -of your virtualenvs in the same place, such as under your home directory. -To create a virtualenv for the default Python, likely a version 2, run -the following:: - - $ virtualenv $HOME/envs/sdk - -To create an environment for a different version, such as Python 3, run -the following:: - - $ virtualenv -p python3.4 $HOME/envs/sdk3 - -When you want to enable your environment so that you can develop inside of it, -you *activate* it. To activate an environment, run the /bin/activate -script inside of it, like the following:: - - $ source $HOME/envs/sdk3/bin/activate - (sdk3)$ - -Once you are activated, you will see the environment name in front of your -command prompt. In order to exit that environment, run the ``deactivate`` -command. - -tox -*** - -We use `tox `_ as our test runner, -which allows us to run the same test commands against multiple versions -of Python. Inside any of the virtualenvs you use for working on the SDK, -run the following to install ``tox`` into it.:: - - (sdk3)$ pip install tox - -Git -*** - -The source of the OpenStack SDK is stored in Git. In order to work with our -source repository, you must have Git installed on your system. If your -system has a package manager, it can likely be had from there. If not, -you can find downloads or the source at http://git-scm.com. - -Getting the Source Code ------------------------ - -.. TODO(briancurtin): We should try and distill the following document - into the minimally necessary parts to include directly in this section. - I've talked to several people who are discouraged by that large of a - document to go through before even getting into the project they want - to work on. I don't want that to happen to us because we have the potential - to be more public facing than a lot of other projects. - -.. note:: Before checking out the code, please read the OpenStack - `Developer's Guide `_ - for details on how to use the continuous integration and code - review systems that we use. - -The canonical Git repository is hosted on openstack.org at -http://git.openstack.org/cgit/openstack/python-openstacksdk/, with a -mirror on GitHub at https://github.com/openstack/python-openstacksdk. -Because of how Git works, you can create a local clone from either of those, -or your own personal fork.:: - - (sdk3)$ git clone https://git.openstack.org/openstack/python-openstacksdk.git - (sdk3)$ cd python-openstacksdk - -Installing Dependencies ------------------------ - -In order to work with the SDK locally, such as in the interactive interpreter -or to run example scripts, you need to install the project's dependencies.:: - - (sdk3)$ pip install -r requirements.txt - -After the downloads and installs are complete, you'll have a fully functional -environment to use the SDK in. - -Building the Documentation --------------------------- - -Our documentation is written in reStructured Text and is built using -Sphinx. A ``docs`` command is available in our ``tox.ini``, allowing you -to build the documentation like you'd run tests. The ``docs`` command is -not evaluated by default.:: - - (sdk3)$ tox -e docs - -That command will cause the documentation, which lives in the ``docs`` folder, -to be built. HTML output is the most commonly referenced, which is located -in ``docs/build/html``. diff --git a/doc/source/contributors/testing.rst b/doc/source/contributors/testing.rst deleted file mode 100644 index dc884a2132..0000000000 --- a/doc/source/contributors/testing.rst +++ /dev/null @@ -1,131 +0,0 @@ -Testing -======= - -The tests are run with `tox `_ and -configured in ``tox.ini``. The test results are tracked by -`testr `_ and configured -in ``.testr.conf``. - -Unit Tests ----------- - -Run -*** - -In order to run the entire unit test suite, simply run the ``tox`` command -inside of your source checkout. This will attempt to run every test command -listed inside of ``tox.ini``, which includes Python 2.7, 3.4, PyPy, -and a PEP 8 check. You should run the full test suite on all versions before -submitting changes for review in order to avoid unexpected failures in the -continuous integration system.:: - - (sdk3)$ tox - ... - py34: commands succeeded - py27: commands succeeded - pypy: commands succeeded - pep8: commands succeeded - congratulations :) - -During development, it may be more convenient to run a subset of the tests -to keep test time to a minimum. You can choose to run the tests only on one -version. A step further is to run only the tests you are working on.:: - - (sdk3)$ tox -e py34 # Run run the tests on Python 3.4 - (sdk3)$ tox -e py34 TestContainer # Run only the TestContainer tests on 3.4 - -Functional Tests ----------------- - -The functional tests assume that you have a public or private OpenStack cloud -that you can run the tests against. The tests must be able to be run against -public clouds but first and foremost they must be run against OpenStack. In -practice, this means that the tests should initially be run against a stable -branch of `DevStack `_. - -DevStack -******** - -There are many ways to run and configure DevStack. The link above will show -you how to run DevStack a number of ways. You'll need to choose a method -you're familiar with and can run in your environment. Wherever DevStack is -running, we need to make sure that python-openstacksdk contributors are -using the same configuration. - -This is the ``local.conf`` file we use to configure DevStack. - -.. literalinclude:: local.conf - -Replace ``DEVSTACK_PASSWORD`` with a password of your choice. - -Replace ``OPENSTACK_VERSION`` with a `stable branch `_ -of OpenStack (without the ``stable/`` prefix on the branch name). - -os-client-config -**************** - -To connect the functional tests to an OpenStack cloud we use -`os-client-config `_. -To setup os-client-config create a ``clouds.yaml`` file in the root of your -source checkout. - -This is an example of a minimal configuration for a ``clouds.yaml`` that -connects the functional tests to a DevStack instance. Note that one cloud -under ``clouds`` must be named ``test_cloud``. - -.. literalinclude:: clouds.yaml - :language: yaml - -Replace ``xxx.xxx.xxx.xxx`` with the IP address or FQDN of your DevStack instance. - -You can also create a ``~/.config/openstack/clouds.yaml`` file for your -DevStack cloud environment using the following commands. Replace -``DEVSTACK_SOURCE`` with your DevStack source checkout.:: - - (sdk3)$ source DEVSTACK_SOURCE/accrc/admin/admin - (sdk3)$ ./create_yaml.sh - -Run -*** - -Functional tests are run against both Python 2 and 3. In order to run the -entire functional test suite, run the ``tox -e functional`` and -``tox -e functional3`` command inside of your source checkout. This will -attempt to run every test command under ``/openstack/tests/functional/`` -in the source tree. You should run the full functional test suite before -submitting changes for review in order to avoid unexpected failures in -the continuous integration system.:: - - (sdk3)$ tox -e functional - ... - functional: commands succeeded - congratulations :) - (sdk3)$ tox -e functional3 - ... - functional3: commands succeeded - congratulations :) - -Examples Tests --------------- - -Similar to the functional tests, the examples tests assume that you have a -public or private OpenStack cloud that you can run the tests against. In -practice, this means that the tests should initially be run against a stable -branch of `DevStack `_. -And like the functional tests, the examples tests connect to an OpenStack cloud -using `os-client-config `_. -See the functional tests instructions for information on setting up DevStack and -os-client-config. - -Run -*** - -In order to run the entire examples test suite, simply run the -``tox -e examples`` command inside of your source checkout. This will -attempt to run every test command under ``/openstack/tests/examples/`` -in the source tree.:: - - (sdk3)$ tox -e examples - ... - examples: commands succeeded - congratulations :) diff --git a/doc/source/history.rst b/doc/source/history.rst deleted file mode 100644 index 69ed4fe6c2..0000000000 --- a/doc/source/history.rst +++ /dev/null @@ -1 +0,0 @@ -.. include:: ../../ChangeLog diff --git a/doc/source/index.rst b/doc/source/index.rst index e2c2bb526d..d935708cd8 100644 --- a/doc/source/index.rst +++ b/doc/source/index.rst @@ -1,10 +1,21 @@ -Welcome to the OpenStack SDK! -============================= +openstacksdk +============ -This documentation is split into two sections: one for -:doc:`users ` looking to build applications which make use of -OpenStack, and another for those looking to -:doc:`contribute ` to this project. +This documentation is split into three sections: + +* An :doc:`installation ` guide +* A section for :doc:`users ` looking to build applications + which make use of OpenStack +* A section for those looking to :doc:`contribute ` + to this project + +Installation +------------ + +.. toctree:: + :maxdepth: 2 + + install/index For Users --------- @@ -12,7 +23,7 @@ For Users .. toctree:: :maxdepth: 2 - users/index + user/index For Contributors ---------------- @@ -20,7 +31,7 @@ For Contributors .. toctree:: :maxdepth: 2 - contributors/index + contributor/index General Information ------------------- @@ -31,4 +42,4 @@ General information about the SDK including a glossary and release history. :maxdepth: 1 Glossary of Terms - Release History + Release Notes diff --git a/doc/source/install/index.rst b/doc/source/install/index.rst new file mode 100644 index 0000000000..33255ae9af --- /dev/null +++ b/doc/source/install/index.rst @@ -0,0 +1,17 @@ +Installation guide +================== + +The OpenStack SDK is available on `PyPI`__ under the name **openstacksdk**. To +install it, use ``pip``: + +.. code-block:: bash + + $ pip install openstacksdk + +To check the installed version you can call the module with: + +.. code-block:: bash + + $ python -m openstack version + +.. __: https://pypi.org/project/openstacksdk diff --git a/doc/source/releasenotes.rst b/doc/source/releasenotes.rst new file mode 100644 index 0000000000..b4eb5e488e --- /dev/null +++ b/doc/source/releasenotes.rst @@ -0,0 +1,5 @@ +Release Notes +============= + +Release notes for `openstacksdk` can be found at +https://releases.openstack.org/teams/openstacksdk.html diff --git a/doc/source/user/config/configuration.rst b/doc/source/user/config/configuration.rst new file mode 100644 index 0000000000..9a70cb2b6c --- /dev/null +++ b/doc/source/user/config/configuration.rst @@ -0,0 +1,642 @@ +.. _openstack-config: + +====================================== +Configuring OpenStack SDK Applications +====================================== + +.. _config-environment-variables: + +Environment Variables +--------------------- + +`openstacksdk` honors all of the normal `OS_*` variables. It does not +provide backwards compatibility to service-specific variables such as +`NOVA_USERNAME`. + +If you have OpenStack environment variables set, `openstacksdk` will +produce a cloud config object named `envvars` containing your values from the +environment. If you don't like the name `envvars`, that's ok, you can override +it by setting `OS_CLOUD_NAME`. + +Service specific settings, like the nova service type, are set with the +default service type as a prefix. For instance, to set a special service_type +for trove set + +.. code-block:: bash + + export OS_DATABASE_SERVICE_TYPE=rax:database + +.. _config-clouds-yaml: + +Config Files +------------ + +`openstacksdk` will look for a file called `clouds.yaml` in the following +locations: + +* ``.`` (the current directory) +* ``$HOME/.config/openstack`` +* ``/etc/openstack`` + +The first file found wins. + +You can also set the environment variable `OS_CLIENT_CONFIG_FILE` to an +absolute path of a file to look for and that location will be inserted at the +front of the file search list. + +The keys are all of the keys you'd expect from `OS_*` - except lower case +and without the OS prefix. So, region name is set with `region_name`. + +Service specific settings, like the nova service type, are set with the +default service type as a prefix. For instance, to set a special service_type +for trove (because you're using Rackspace) set: + +.. code-block:: yaml + + database_service_type: 'rax:database' + + +Site Specific File Locations +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +In addition to `~/.config/openstack` and `/etc/openstack` - some platforms +have other locations they like to put things. `openstacksdk` will also +look in an OS specific config dir + +* `USER_CONFIG_DIR` +* `SITE_CONFIG_DIR` + +`USER_CONFIG_DIR` is different on Linux, OSX and Windows. + +* Linux: `~/.config/openstack` +* OSX: `~/Library/Application Support/openstack` +* Windows: `C:\\Users\\USERNAME\\AppData\\Local\\OpenStack\\openstack` + +`SITE_CONFIG_DIR` is different on Linux, OSX and Windows. + +* Linux: `/etc/openstack` +* OSX: `/Library/Application Support/openstack` +* Windows: `C:\\ProgramData\\OpenStack\\openstack` + +An example config file is probably helpful: + +.. code-block:: yaml + + clouds: + mtvexx: + profile: https://vexxhost.com + auth: + username: mordred@inaugust.com + password: XXXXXXXXX + project_name: mordred@inaugust.com + region_name: ca-ymq-1 + dns_api_version: 1 + mordred: + region_name: RegionOne + auth: + username: 'mordred' + password: XXXXXXX + project_name: 'shade' + auth_url: 'https://montytaylor-sjc.openstack.blueboxgrid.com:5001/v2.0' + infra: + profile: rackspace + auth: + username: openstackci + password: XXXXXXXX + project_id: 610275 + regions: + - DFW + - ORD + - IAD + +You may note a few things. First, since `auth_url` settings are silly +and embarrassingly ugly, known cloud vendor profile information is included and +may be referenced by name or by base URL to the cloud in question if the +cloud serves a vendor profile. One of the benefits of that is that `auth_url` +isn't the only thing the vendor defaults contain. For instance, since +Rackspace lists `rax:database` as the service type for trove, `openstacksdk` +knows that so that you don't have to. In case the cloud vendor profile is not +available, you can provide one called `clouds-public.yaml`, following the same +location rules previously mentioned for the config files. + +`regions` can be a list of regions. When you call `get_all_clouds`, +you'll get a cloud config object for each cloud/region combo. + +As seen with `dns_service_type`, any setting that makes sense to be +per-service, like `service_type` or `endpoint` or `api_version` can be set +by prefixing the setting with the default service type. That might strike you +funny when setting `service_type` and it does me too - but that's just the +world we live in. + +Auth Settings +------------- + +Keystone has auth plugins - which means it's not possible to know ahead of time +which auth settings are needed. `openstacksdk` sets the default plugin type +to `password`, which is what things all were before plugins came about. In +order to facilitate validation of values, all of the parameters that exist +as a result of a chosen plugin need to go into the auth dict. For password +auth, this includes `auth_url`, `username` and `password` as well as anything +related to domains, projects and trusts. + +API Settings +------------ + +The following settings are passed to keystoneauth and are common to +all services. + +``api_timeout`` + A timeout for API requests. This should be a numerical value + indicating some amount (or fraction) of seconds or 0 for no + timeout. (optional, defaults to 0) + +``collect_timing`` + Whether or not to collect per-method timing information for each + API call. (optional, defaults to False) + +Splitting Secrets +----------------- + +In some scenarios, such as configuration management controlled environments, +it might be easier to have secrets in one file and non-secrets in another. +This is fully supported via an optional file `secure.yaml` which follows all +the same location rules as `clouds.yaml`. It can contain anything you put +in `clouds.yaml` and will take precedence over anything in the `clouds.yaml` +file. + +You can also set the environment variable `OS_CLIENT_SECURE_FILE` to an +absolute path of a file to look for and that location will be inserted at the +front of the file search list. + +.. code-block:: yaml + + # clouds.yaml + clouds: + internap: + profile: internap + auth: + username: api-55f9a00fb2619 + project_name: inap-17037 + regions: + - ams01 + - nyj01 + # secure.yaml + clouds: + internap: + auth: + password: XXXXXXXXXXXXXXXXX + +SSL Settings +------------ + +When the access to a cloud is done via a secure connection, `openstacksdk` +will always verify the SSL cert by default. This can be disabled by setting +`verify` to `False`. In case the cert is signed by an unknown CA, a specific +cacert can be provided via `cacert`. **WARNING:** `verify` will always have +precedence over `cacert`, so when setting a CA cert but disabling `verify`, the +cloud cert will never be validated. + +Client certs are also configurable. `cert` will be the client cert file +location. In case the cert key is not included within the client cert file, +its file location needs to be set via `key`. + +.. code-block:: yaml + + # clouds.yaml + clouds: + regular-secure-cloud: + auth: + auth_url: https://signed.cert.domain:5000 + ... + unknown-ca-with-client-cert-secure-cloud: + auth: + auth_url: https://unknown.ca.but.secure.domain:5000 + ... + key: /home/myhome/client-cert.key + cert: /home/myhome/client-cert.crt + cacert: /home/myhome/ca.crt + self-signed-insecure-cloud: + auth: + auth_url: https://self.signed.cert.domain:5000 + ... + verify: False + +Note for parity with ``openstack`` command-line options the `insecure` +boolean is also recognised (with the opposite semantics to `verify`; +i.e. `True` ignores certificate failures). This should be considered +deprecated for `verify`. + + +Cache Settings +-------------- + +.. versionchanged:: 1.0.0 + + Previously, caching was managed exclusively in the cloud layer. Starting in + openstacksdk 1.0.0, caching is moved to the proxy layer. As the cloud layer + depends on the proxy layer in 1.0.0, this means both layers can benefit from + the cache. + +Authenticating and accessing resources on a cloud is often expensive. It is +therefore quite common that applications will wish to do some client-side +caching of both credentials and cloud resources. To facilitate this, +*openstacksdk* supports caching credentials and resources using the system +keyring and *dogpile.cache*, respectively. + +.. tip:: + + It is important to emphasise that *openstacksdk* does not actually cache + anything itself. Rather, it collects and presents the cache information + so that your various applications that are connecting to OpenStack can share + a cache should you desire. It is important that your cache backend is + correctly configured according to the needs of your application. + +Caching in enabled or disabled globally, rather than on a cloud-by-cloud basis. +This is done by setting configuring the``cache`` top-level key. Caching of +authentication tokens can be configured using the following settings: + +``cache.auth`` + A boolean indicating whether tokens should be cached in the keyring. + When enabled, this allows the consequent connections to the same cloud to + skip fetching new token. When the token expires or is invalidated, + `openstacksdk` will automatically establish a new connection. + Defaults to ``false``. + +For example, to configure caching of authentication tokens. + +.. code-block:: yaml + + cache: + auth: true + +Caching of resources can be configured using the following settings: + +``cache.expiration_time`` + The expiration time in seconds for a cache entry. + This should be an integer. + Defaults to ``0``. + +``cache.class`` + The cache backend to use, which can include any backend supported by + *dogpile.cache* natively as well as backend provided by third-part packages. + This should be a string. + Defaults to ``dogpile.cache.memory``. + +``cache.arguments`` + A mapping of arbitrary arguments to pass into the cache backend. These are + backend specific. Keys should correspond to a configuration option for the + configured cache backend. + Defaults to ``{}``. + +``cache.expirations`` + A mapping of resource types to expiration times. The keys should be specified + in the same way as the metrics are emitted, by joining meaningful resource + URL segments with ``.``. For example, both ``/servers`` and ``/servers/ID`` + should be specified as ``servers``, while ``/servers/ID/metadata/KEY`` should + be specified as `server.metadata`. Values should be an expiration time in + seconds. A value of ``-1`` indicates that the cache should never expire, + while a value of ``0`` disables caching for the resource. + Defaults to ``{}`` + +For example, to configure caching with the ``dogpile.cache.memory`` backend +with a 1 hour expiration. + +.. code-block:: yaml + + cache: + expiration_time: 3600 + +To configure caching with the ``dogpile.cache.memory`` backend with a 1 hour +expiration but only for requests to the OpenStack Compute service's +``/servers`` API: + +.. code-block:: yaml + + cache: + expirations: + servers: 3600 + +To configure caching with the ``dogpile.cache.pylibmc`` backend with a 1 hour +expiration time and a memcached server running on your localhost. + +.. code-block:: yaml + + cache: + expiration_time: 3600 + arguments: + url: + - 127.0.0.1 + +To configure caching with the ``dogpile.cache.pylibmc`` backend with a 1 hour +expiration time, a memcached server running on your localhost, and multiple +per-resource cache expiration times. + +.. code-block:: yaml + + cache: + class: dogpile.cache.pylibmc + expiration_time: 3600 + arguments: + url: + - 127.0.0.1 + expiration: + server: 5 + flavor: -1 + compute.servers: 5 + compute.flavors: -1 + image.images: 5 + +Finally, if the ``cache`` key is undefined, a null cache is enabled meaning +caching is effectively disabled. + +.. note:: + + Non ``GET`` requests cause cache invalidation based on the caching key + prefix. This means that, for example, a ``PUT`` request to ``/images/ID`` + will invalidate all images cache (list and all individual entries). Moreover + it is possible to explicitly pass the ``skip_cache`` parameter to the + ``proxy._get`` function to bypass cache and invalidate what is already + there. This is happening automatically in the ``wait_for_status`` methods + where it is expected that resource will change some of the attributes over + the time. Forcing complete cache invalidation can be achieved calling + ``conn._cache.invalidate`` + +MFA Support +----------- + +MFA support requires a specially prepared configuration file. In this case a +combination of two different authorization plugins is used with their +individual requirements to the specified parameters. + +.. code-block:: yaml + + clouds: + mfa: + auth_type: "v3multifactor" + auth_methods: + - v3password + - v3totp + auth: + auth_url: https://identity.cloud.com + username: user + user_id: uid + password: XXXXXXXXX + project_name: project + user_domain_name: udn + project_domain_name: pdn + + +IPv6 +---- + +IPv6 is the future, and you should always use it if your cloud +supports it and if your local network supports it. Both of those are +easily detectable and all friendly software should do the right thing. + +However, sometimes a cloud API may return IPv6 information that is not +useful to a production deployment. For example, the API may provide +an IPv6 address for a server, but not provide that to the host +instance via metadata (configdrive) or standard IPv6 autoconfiguration +methods (i.e. the host either needs to make a bespoke API call, or +otherwise statically configure itself). + +For such situations, you can set the ``force_ipv4``, or ``OS_FORCE_IPV4`` +boolean environment variable. For example: + +.. code-block:: yaml + + clouds: + mtvexx: + profile: vexxhost + auth: + username: mordred@inaugust.com + password: XXXXXXXXX + project_name: mordred@inaugust.com + region_name: ca-ymq-1 + dns_api_version: 1 + monty: + profile: fooprovider + force_ipv4: true + auth: + username: mordred@inaugust.com + password: XXXXXXXXX + project_name: mordred@inaugust.com + region_name: RegionFoo + +The above snippet will tell client programs to prefer the IPv4 address +and leave the ``public_v6`` field of the `Server` object blank for the +``fooprovider`` cloud . You can also set this with a client flag for +all clouds: + +.. code-block:: yaml + + client: + force_ipv4: true + + +Per-region settings +------------------- + +Sometimes you have a cloud provider that has config that is common to the +cloud, but also with some things you might want to express on a per-region +basis. For instance, Internap provides a public and private network specific +to the user in each region, and putting the values of those networks into +config can make consuming programs more efficient. + +To support this, the region list can actually be a list of dicts, and any +setting that can be set at the cloud level can be overridden for that +region. + +.. code-block:: yaml + + clouds: + internap: + profile: internap + auth: + password: XXXXXXXXXXXXXXXXX + username: api-55f9a00fb2619 + project_name: inap-17037 + regions: + - name: ams01 + values: + networks: + - name: inap-17037-WAN1654 + routes_externally: true + - name: inap-17037-LAN6745 + - name: nyj01 + values: + networks: + - name: inap-17037-WAN1654 + routes_externally: true + - name: inap-17037-LAN6745 + + +Setting Precedence +------------------ + +Some settings are redundant, e.g. ``project-name`` and ``project-id`` both +specify the project. In a conflict between redundant settings, the +``_name`` ``clouds.yaml`` option (or equivalent ``-name`` CLI option and ``_NAME`` environment variable) will be used. + +Some environment variables or commandline flags can override the settings from +clouds.yaml. These are: + +- ``--domain-id`` (``OS_DOMAIN_ID``) +- ``--domain-name`` (``OS_DOMAIN_NAME``) +- ``--user-domain-id`` (``OS_USER_DOMAIN_ID``) +- ``--user-domain-name`` (``OS_USER_DOMAIN_NAME``) +- ``--project-domain-id`` (``OS_PROJECT_DOMAIN_ID``) +- ``--project-domain-name`` (``OS_PROJECT_DOMAIN_NAME``) +- ``--auth-token`` (``OS_AUTH_TOKEN``) +- ``--project-id`` (``OS_PROJECT_ID``) +- ``--project-name`` (``OS_PROJECT_NAME``) +- ``--tenant-id`` (``OS_TENANT_ID``) (deprecated for ``--project-id``) +- ``--tenant-name`` (``OS_TENANT_NAME``) (deprecated for ``--project-name``) + +Similarly, if one of the above settings is specified in ``clouds.yaml`` as +part of the ``auth`` section as well as the main section, the ``auth`` settings +will be overridden. For example in this config section, note that project is +specified multiple times: + +.. code-block:: yaml + + clouds: + mtvexx: + profile: https://vexxhost.com + auth: + username: mordred@inaugust.com + password: XXXXXXXXX + project_name: mylessfavoriteproject + project_id: 0bedab75-898c-4521-a038-0b4b71c41bed + region_name: ca-ymq-1 + project_name: myfavoriteproject + project_id: 2acf9403-25e8-479e-a3c6-d67540c424a4 + +In the above example, the ``project_id`` configuration values will be ignored +in favor of the ``project_name`` configuration values, and the higher-level +project will be chosen over the auth-specified project. So the actual project +used will be ```myfavoriteproject```. + + +Examples +-------- + +``auth`` +~~~~~~~~ + +.. rubric:: Password-based authentication (project-scoped) + +.. code-block:: yaml + + example: + auth: + auth_url: http://example.com/identity + password: password + project_domain_id: default + project_name: admin + user_domain_id: default + username: admin + region_name: RegionOne + +.. rubric:: Password-based authentication (domain-scoped) + +.. code-block:: yaml + + example: + auth: + auth_url: http://example.com/identity + domain_id: default + password: password + username: admin + region_name: RegionOne + +.. rubric:: Password-based authentication (trust-scoped) + +.. code-block:: yaml + + example-trust: + auth: + auth_url: http://example.com/identity + password: password + username: admin + trust_id: 95946f9eef864fdc993079d8fe3e5747 + region_name: RegionOne + +.. rubric:: Password-based authentication (system-scoped) + +.. code-block:: yaml + + example-system: + auth: + auth_url: http://example.com/identity + password: password + system_scope: all + username: admin + region_name: RegionOne + +.. rubric:: Application credential-based authentication + +.. code-block:: yaml + + example-appcred: + auth: + auth_url: http://example.com/identity + application_credential_id: 9da0a8da3d394d09bf49dfc27014d254 + application_credential_secret: pKfDSvUOFwO2t2_XxCajAFhzCKAVHI7yfqPb6xjshVDnMUHF7ifju8gMdhHTI4Eo56UP_hEc8ssmgA1NNtKMpA + auth_type: v3applicationcredential + region_name: RegionOne + +.. rubric:: Token-based authentication + +.. code-block:: yaml + + example-token: + auth: + auth_url: http://example.com/identity + token: gAAAAABl32ptw2PN6L9JyBeO16PwQU1SrdMUvUz8Eon7LC2PFItdGRWFpOkK0qwH3JkukTuEM5qbYK9ucowRXET1RBMjZlfVpUa8Nz3qjQdzXw7pBKH4w1e4tekvDCOKfn15ZoujBOvdGqgtpW-febVGaW9oJzf6R3WTMDxWz3YRJjmiOBpwcN8 + project_id: 1fd93a4455c74d2ea94b929fc5f0e488 + auth_type: v3token + region_name: RegionOne + +.. note:: + + This is a toy example: by their very definition tokens are short-lived. + You are unlikely to store them in a ``clouds.yaml`` file. + Instead, you would likely pass the TOTP token via the command line + (``--os-token``) or as an environment variable (``OS_TOKEN``). + +.. rubric:: TOTP-based authentication + +.. code-block:: yaml + + example-totp: + auth: + auth_url: http://example.com/identity + passcode: password + project_domain_id: default + project_name: admin + user_domain_id: default + username: admin + auth_type: v3totp + region_name: RegionOne + +.. note:: + + This is a toy example: by their very definition TOTP tokens are + short-lived. You are unlikely to store them in a ``clouds.yaml`` file. + Instead, you would likely pass the TOTP token via the command line + (``--os-passcode``) or as an environment variable (``OS_PASSCODE``). + +.. rubric:: OAuth1-based authentication + +.. code-block:: yaml + + example-oauth: + auth: + auth_url: http://example.com/identity + consumer_key: foo + consumer_secret: secret + access_key: bar + access_secret: secret + auth_type: v3oauth1 + region_name: RegionOne diff --git a/doc/source/user/config/index.rst b/doc/source/user/config/index.rst new file mode 100644 index 0000000000..11637239d3 --- /dev/null +++ b/doc/source/user/config/index.rst @@ -0,0 +1,12 @@ +====================== +Using os-client-config +====================== + +.. toctree:: + :maxdepth: 2 + + configuration + using + vendor-support + network-config + reference diff --git a/doc/source/user/config/network-config.rst b/doc/source/user/config/network-config.rst new file mode 100644 index 0000000000..ea85414787 --- /dev/null +++ b/doc/source/user/config/network-config.rst @@ -0,0 +1,67 @@ +============== +Network Config +============== + +There are several different qualities that networks in OpenStack might have +that might not be able to be automatically inferred from the available +metadata. To help users navigate more complex setups, `os-client-config` +allows configuring a list of network metadata. + +.. code-block:: yaml + + clouds: + amazing: + networks: + - name: blue + routes_externally: true + - name: purple + routes_externally: true + default_interface: true + - name: green + routes_externally: false + - name: yellow + routes_externally: false + nat_destination: true + - name: chartreuse + routes_externally: false + routes_ipv6_externally: true + - name: aubergine + routes_ipv4_externally: false + routes_ipv6_externally: true + +Every entry must have a name field, which can hold either the name or the id +of the network. + +`routes_externally` is a boolean field that labels the network as handling +north/south traffic off of the cloud. In a public cloud this might be thought +of as the "public" network, but in private clouds it's possible it might +be an RFC1918 address. In either case, it's provides IPs to servers that +things not on the cloud can use. This value defaults to `false`, which +indicates only servers on the same network can talk to it. + +`routes_ipv4_externally` and `routes_ipv6_externally` are boolean fields to +help handle `routes_externally` in the case where a network has a split stack +with different values for IPv4 and IPv6. Either entry, if not given, defaults +to the value of `routes_externally`. + +`default_interface` is a boolean field that indicates that the network is the +one that programs should use. It defaults to false. An example of needing to +use this value is a cloud with two private networks, and where a user is +running ansible in one of the servers to talk to other servers on the private +network. Because both networks are private, there would otherwise be no way +to determine which one should be used for the traffic. There can only be one +`default_interface` per cloud. + +`nat_destination` is a boolean field that indicates which network floating +ips should be attached to. It defaults to false. Normally this can be inferred +by looking for a network that has subnets that have a gateway_ip. But it's +possible to have more than one network that satisfies that condition, so the +user might want to tell programs which one to pick. There can be only one +`nat_destination` per cloud. + +`nat_source` is a boolean field that indicates which network floating +ips should be requested from. It defaults to false. Normally this can be +inferred by looking for a network that is attached to a router. But it's +possible to have more than one network that satisfies that condition, so the +user might want to tell programs which one to pick. There can be only one +`nat_source` per cloud. diff --git a/doc/source/user/config/reference.rst b/doc/source/user/config/reference.rst new file mode 100644 index 0000000000..b4909ad32c --- /dev/null +++ b/doc/source/user/config/reference.rst @@ -0,0 +1,14 @@ +============= +API Reference +============= + +.. module:: openstack.config + :synopsis: OpenStack client configuration + +.. autoclass:: openstack.config.OpenStackConfig + :members: + :inherited-members: + +.. autoclass:: openstack.config.cloud_region.CloudRegion + :members: + :inherited-members: diff --git a/doc/source/user/config/using.rst b/doc/source/user/config/using.rst new file mode 100644 index 0000000000..2359de00ca --- /dev/null +++ b/doc/source/user/config/using.rst @@ -0,0 +1,52 @@ +======================================== +Using openstack.config in an Application +======================================== + +Usage +----- + +The simplest and least useful thing you can do is: + +.. code-block:: python + + python -m openstack.config.loader + +Which will print out whatever if finds for your config. If you want to use +it from python, which is much more likely what you want to do, things like: + +Get a named cloud. + +.. code-block:: python + + import openstack.config + + cloud_region = openstack.config.OpenStackConfig().get_one( + 'internap', region_name='ams01') + print(cloud_region.name, cloud_region.region, cloud_region.config) + +Or, get all of the clouds. + +.. code-block:: python + + import openstack.config + + cloud_regions = openstack.config.OpenStackConfig().get_all() + for cloud_region in cloud_regions: + print(cloud_region.name, cloud_region.region, cloud_region.config) + +argparse +-------- + +If you're using `openstack.config` from a program that wants to process +command line options, there is a registration function to register the +arguments that both `openstack.config` and keystoneauth know how to deal +with - as well as a consumption argument. + +.. code-block:: python + + import argparse + + import openstack + + parser = argparse.ArgumentParser() + cloud = openstack.connect(options=parser) diff --git a/doc/source/user/config/vendor-support.rst b/doc/source/user/config/vendor-support.rst new file mode 100644 index 0000000000..6a8bb0add9 --- /dev/null +++ b/doc/source/user/config/vendor-support.rst @@ -0,0 +1,364 @@ +============== +Vendor Support +============== + +OpenStack presents deployers with many options, some of which can expose +differences to end users. `os-client-config` tries its best to collect +information about various things a user would need to know. The following +is a text representation of the vendor related defaults `os-client-config` +knows about. + +Default Values +-------------- + +These are the default behaviors unless a cloud is configured differently. + +* Identity uses `password` authentication +* Identity API Version is 2 +* Image API Version is 2 +* Volume API Version is 2 +* Compute API Version is 2.1 +* Images must be in `qcow2` format +* Images are uploaded using PUT interface +* Public IPv4 is directly routable via DHCP from Neutron +* IPv6 is not provided +* Floating IPs are not required +* Floating IPs are provided by Neutron +* Security groups are provided by Neutron +* Vendor specific agents are not used + +AURO +---- + +https://api.auro.io:5000/v2.0 + +============== ================ +Region Name Location +============== ================ +van1 Vancouver, BC +============== ================ + +* Public IPv4 is provided via NAT with Neutron Floating IP + +Betacloud +--------- + +https://api-1.betacloud.de:5000 + +============== ================== +Region Name Location +============== ================== +betacloud-1 Karlsruhe, Germany +============== ================== + +* Identity API Version is 3 +* Images must be in `raw` format +* Public IPv4 is provided via NAT with Neutron Floating IP +* Volume API Version is 3 + +Binero +------ + +https://auth.binero.cloud:5000/v3 + +============== ================== +Region Name Location +============== ================== +europe-se-1 Stockholm, SE +============== ================== + +* Identity API Version is 3 +* Volume API Version is 3 +* Public IPv4 is directly routable via DHCP from Neutron +* Public IPv4 is provided via NAT with Neutron Floating IP + +Catalyst +-------- + +https://api.cloud.catalyst.net.nz:5000/v2.0 + +============== ================ +Region Name Location +============== ================ +nz-por-1 Porirua, NZ +nz_wlg_2 Wellington, NZ +============== ================ + +* Identity API Version is 3 +* Compute API Version is 2 +* Images must be in `raw` format +* Volume API Version is 3 + +City Cloud +---------- + +https://%(region_name)s.citycloud.com:5000/v3/ + +============== ================ +Region Name Location +============== ================ +Buf1 Buffalo, NY +dx1 Dubai, UAE +Fra1 Frankfurt, DE +Kna1 Karlskrona, SE +Lon1 London, UK +Sto2 Stockholm, SE +tky1 Tokyo, JP +============== ================ + +* Identity API Version is 3 +* Public IPv4 is provided via NAT with Neutron Floating IP +* Volume API Version is 1 + +ConoHa +------ + +https://identity.%(region_name)s.conoha.io + +============== ================ +Region Name Location +============== ================ +tyo1 Tokyo, JP +sin1 Singapore +sjc1 San Jose, CA +============== ================ + +* Image upload is not supported + +DreamCompute +------------ + +https://iad2.dream.io:5000 + +============== ================ +Region Name Location +============== ================ +RegionOne Ashburn, VA +============== ================ + +* Identity API Version is 3 +* Images must be in `raw` format +* IPv6 is provided to every server + +Open Telekom Cloud +------------------ + +https://iam.%(region_name)s.otc.t-systems.com/v3 + +============== =================== +Region Name Location +============== =================== +eu-de Biere/Magdeburg, DE +eu-nl Amsterdam, NL +============== =================== + +* Identity API Version is 3 +* Public IPv4 is provided via NAT with Neutron Floating IP + +ELASTX +------ + +https://ops.elastx.cloud:5000/v3 + +============== ================ +Region Name Location +============== ================ +se-sto Stockholm, SE +============== ================ + +* Identity API Version is 3 +* Public IPv4 is provided via NAT with Neutron Floating IP + +Enter Cloud Suite +----------------- + +https://api.entercloudsuite.com/v2.0 + +============== ================ +Region Name Location +============== ================ +nl-ams1 Amsterdam, NL +it-mil1 Milan, IT +de-fra1 Frankfurt, DE +============== ================ + +* Compute API Version is 2 + +Fuga +---- + +https://identity.api.fuga.io:5000 + +============== ================ +Region Name Location +============== ================ +cystack Netherlands +============== ================ + +* Identity API Version is 3 +* Volume API Version is 3 + +Internap +-------- + +https://identity.api.cloud.inap.com/v2.0 + +============== ================ +Region Name Location +============== ================ +ams01 Amsterdam, NL +da01 Dallas, TX +nyj01 New York, NY +sin01 Singapore +sjc01 San Jose, CA +============== ================ + +* Floating IPs are not supported + +Limestone Networks +------------------ + +https://auth.cloud.lstn.net:5000/v3 + +============== ================== +Region Name Location +============== ================== +us-dfw-1 Dallas, TX +us-slc Salt Lake City, UT +============== ================== + +* Identity API Version is 3 +* Images must be in `raw` format +* IPv6 is provided to every server connected to the `Public Internet` network + +OVH +--- + +https://auth.cloud.ovh.net/v3 + +============== ================ +Region Name Location +============== ================ +BHS1 Beauharnois, QC +SBG1 Strassbourg, FR +GRA1 Gravelines, FR +============== ================ + +* Images may be in `raw` format. The `qcow2` default is also supported +* Floating IPs are not supported + +Rackspace +--------- + +https://identity.api.rackspacecloud.com/v2.0/ + +============== ================ +Region Name Location +============== ================ +DFW Dallas, TX +HKG Hong Kong +IAD Washington, D.C. +LON London, UK +ORD Chicago, IL +SYD Sydney, NSW +============== ================ + +* Database Service Type is `rax:database` +* Compute Service Name is `cloudServersOpenStack` +* Images must be in `vhd` format +* Images must be uploaded using the Glance Task Interface +* Floating IPs are not supported +* Public IPv4 is directly routable via static config by Nova +* IPv6 is provided to every server +* Security groups are not supported +* Uploaded Images need properties to not use vendor agent:: + :vm_mode: hvm + :xenapi_use_agent: False +* Block Storage API Version is 2 +* The Block Storage API supports version 2 but only version 1 is in + the catalog. The Block Storage endpoint is + https://{region_name}.blockstorage.api.rackspacecloud.com/v2/{project_id} +* While passwords are recommended for use, API keys do work as well. + The `rackspaceauth` python package must be installed, and then the following + can be added to clouds.yaml:: + + auth: + username: myusername + api_key: myapikey + auth_type: rackspace_apikey + +SWITCHengines +------------- + +https://keystone.cloud.switch.ch:5000/v3 + +============== ================ +Region Name Location +============== ================ +LS Lausanne, CH +ZH Zurich, CH +============== ================ + +* Identity API Version is 3 +* Compute API Version is 2 +* Images must be in `raw` format +* Volume API Version is 3 + +Ultimum +------- + +https://console.ultimum-cloud.com:5000/v2.0 + +============== ================ +Region Name Location +============== ================ +RegionOne Prague, CZ +============== ================ + +* Volume API Version is 1 + +UnitedStack +----------- + +https://identity.api.ustack.com/v3 + +============== ================ +Region Name Location +============== ================ +bj1 Beijing, CN +gd1 Guangdong, CN +============== ================ + +* Identity API Version is 3 +* Images must be in `raw` format +* Volume API Version is 1 + +VEXXHOST +-------- + +http://auth.vexxhost.net + +============== ================ +Region Name Location +============== ================ +ca-ymq-1 Montreal, QC +sjc1 Santa Clara, CA +============== ================ + +* DNS API Version is 1 +* Identity API Version is 3 +* Volume API Version is 3 + +Zetta +----- + +https://identity.api.zetta.io/v3 + +============== ================ +Region Name Location +============== ================ +no-osl1 Oslo, NO +============== ================ + +* DNS API Version is 2 +* Identity API Version is 3 diff --git a/doc/source/user/connection.rst b/doc/source/user/connection.rst new file mode 100644 index 0000000000..21833c3141 --- /dev/null +++ b/doc/source/user/connection.rst @@ -0,0 +1,26 @@ +Connection +========== +.. automodule:: openstack.connection + + from_config + ----------- + .. autofunction:: openstack.connection.from_config + +Connection Object +----------------- + +.. autoclass:: openstack.connection.Connection + :members: + :inherited-members: + + +Transitioning from Profile +-------------------------- + +Support exists for users coming from older releases of OpenStack SDK who have +been using the :class:`~openstack.profile.Profile` interface. + +.. toctree:: + :maxdepth: 1 + + transition_from_profile diff --git a/doc/source/user/examples b/doc/source/user/examples new file mode 120000 index 0000000000..9f9d1de886 --- /dev/null +++ b/doc/source/user/examples @@ -0,0 +1 @@ +../../../examples \ No newline at end of file diff --git a/doc/source/user/exceptions.rst b/doc/source/user/exceptions.rst new file mode 100644 index 0000000000..3e85f6f985 --- /dev/null +++ b/doc/source/user/exceptions.rst @@ -0,0 +1,15 @@ +Exceptions +========== + +openstacksdk provides a number of `exceptions`__ for commonly encountered +issues, such as missing API endpoints, various HTTP error codes, timeouts and +so forth. It is the responsibility of the calling application to handle these +exceptions appropriately. + +Available exceptions +-------------------- + +.. automodule:: openstack.exceptions + :members: + +.. __: https://docs.python.org/3/library/exceptions.html diff --git a/doc/source/user/guides/baremetal.rst b/doc/source/user/guides/baremetal.rst new file mode 100644 index 0000000000..cbfd9665e2 --- /dev/null +++ b/doc/source/user/guides/baremetal.rst @@ -0,0 +1,67 @@ +Using OpenStack Baremetal +========================= + +Before working with the Bare Metal service, you'll need to create a +connection to your OpenStack cloud by following the :doc:`connect` user +guide. This will provide you with the ``conn`` variable used in the examples +below. + +.. contents:: Table of Contents + :local: + +The primary resource of the Bare Metal service is the **node**. + +Below are a few usage examples. For a reference to all the available methods, +see :doc:`/user/proxies/baremetal`. + +CRUD operations +~~~~~~~~~~~~~~~ + +List Nodes +---------- + +A **node** is a bare metal machine. + +.. literalinclude:: ../examples/baremetal/list.py + :pyobject: list_nodes + +Full example: `baremetal resource list`_ + +Provisioning operations +~~~~~~~~~~~~~~~~~~~~~~~ + +Provisioning actions are the main way to manipulate the nodes. See `Bare Metal +service states documentation`_ for details. + +Manage and inspect Node +----------------------- + +*Managing* a node in the ``enroll`` provision state validates the management +(IPMI, Redfish, etc) credentials and moves the node to the ``manageable`` +state. *Managing* a node in the ``available`` state moves it to the +``manageable`` state. In this state additional actions, such as configuring +RAID or inspecting, are available. + +*Inspecting* a node detects its properties by either talking to its BMC or by +booting a special ramdisk. + +.. literalinclude:: ../examples/baremetal/provisioning.py + :pyobject: manage_and_inspect_node + +Full example: `baremetal provisioning`_ + +Provide Node +------------ + +*Providing* a node in the ``manageable`` provision state makes it available +for deployment. + +.. literalinclude:: ../examples/baremetal/provisioning.py + :pyobject: provide_node + +Full example: `baremetal provisioning`_ + + +.. _baremetal resource list: http://opendev.org/openstack/openstacksdk/src/branch/master/examples/baremetal/list.py +.. _baremetal provisioning: http://opendev.org/openstack/openstacksdk/src/branch/master/examples/baremetal/provisioning.py +.. _Bare Metal service states documentation: https://docs.openstack.org/ironic/latest/contributor/states.html diff --git a/doc/source/user/guides/block_storage.rst b/doc/source/user/guides/block_storage.rst new file mode 100644 index 0000000000..8f2661d096 --- /dev/null +++ b/doc/source/user/guides/block_storage.rst @@ -0,0 +1,9 @@ +Using OpenStack Block Storage +============================= + +Before working with the Block Storage service, you'll need to create a +connection to your OpenStack cloud by following the :doc:`connect` user +guide. This will provide you with the ``conn`` variable used in the examples +below. + +.. TODO(thowe): Implement this guide diff --git a/doc/source/user/guides/clustering.rst b/doc/source/user/guides/clustering.rst new file mode 100644 index 0000000000..af56db52c8 --- /dev/null +++ b/doc/source/user/guides/clustering.rst @@ -0,0 +1,37 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + + +========================== +Using OpenStack Clustering +========================== + +Before working with the Clustering service, you'll need to create a +connection to your OpenStack cloud by following the :doc:`connect` user guide. +This will provide you with the ``conn`` variable used by all examples in this +guide. + +The primary abstractions/resources of the Clustering service are: + +.. toctree:: + :maxdepth: 1 + + Profile Type + Profile + Cluster + Node + Policy Type + Policy + Receiver + Action + Event diff --git a/doc/source/user/guides/clustering/action.rst b/doc/source/user/guides/clustering/action.rst new file mode 100644 index 0000000000..1be15eca3a --- /dev/null +++ b/doc/source/user/guides/clustering/action.rst @@ -0,0 +1,47 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +==================== +Working with Actions +==================== + +An action is an abstraction of some logic that can be executed by a worker +thread. Most of the operations supported by Senlin are executed asynchronously, +which means they are queued into database and then picked up by certain worker +thread for execution. + + +List Actions +~~~~~~~~~~~~ + +To examine the list of actions: + +.. literalinclude:: ../../examples/clustering/action.py + :pyobject: list_actions + +When listing actions, you can specify the sorting option using the ``sort`` +parameter and you can do pagination using the ``limit`` and ``marker`` +parameters. + +Full example: `manage action`_ + + +Get Action +~~~~~~~~~~ + +To get a action based on its name or ID: + +.. literalinclude:: ../../examples/clustering/action.py + :pyobject: get_action + +.. _manage action: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/action.py diff --git a/doc/source/user/guides/clustering/cluster.rst b/doc/source/user/guides/clustering/cluster.rst new file mode 100644 index 0000000000..6fd798668f --- /dev/null +++ b/doc/source/user/guides/clustering/cluster.rst @@ -0,0 +1,193 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +================= +Managing Clusters +================= + +Clusters are first-class citizens in Senlin service design. A cluster is +defined as a collection of homogeneous objects. The "homogeneous" here means +that the objects managed (aka. Nodes) have to be instantiated from the same +"profile type". + + +List Clusters +~~~~~~~~~~~~~ + +To examine the list of receivers: + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: list_cluster + +When listing clusters, you can specify the sorting option using the ``sort`` +parameter and you can do pagination using the ``limit`` and ``marker`` +parameters. + +Full example: `manage cluster`_ + + +Create Cluster +~~~~~~~~~~~~~~ + +When creating a cluster, you will provide a dictionary with keys and values +according to the cluster type referenced. + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: create_cluster + +Optionally, you can specify a ``metadata`` keyword argument that contains some +key-value pairs to be associated with the cluster. + +Full example: `manage cluster`_ + + +Get Cluster +~~~~~~~~~~~ + +To get a cluster based on its name or ID: + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: get_cluster + +Full example: `manage cluster`_ + + +Find Cluster +~~~~~~~~~~~~ + +To find a cluster based on its name or ID: + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: find_cluster + +Full example: `manage cluster`_ + + +Update Cluster +~~~~~~~~~~~~~~ + +After a cluster is created, most of its properties are immutable. Still, you +can update a cluster's ``name`` and/or ``params``. + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: update_cluster + +Full example: `manage cluster`_ + + +Delete Cluster +~~~~~~~~~~~~~~ + +A cluster can be deleted after creation, When there are nodes in the cluster, +the Senlin engine will launch a process to delete all nodes from the cluster +and destroy them before deleting the cluster object itself. + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: delete_cluster + + +Add Nodes to Cluster +~~~~~~~~~~~~~~~~~~~~ + +Add some existing nodes into the specified cluster. + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: add_nodes_to_cluster + + +Remove Nodes from Cluster +~~~~~~~~~~~~~~~~~~~~~~~~~ + +Remove nodes from specified cluster. + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: remove_nodes_from_cluster + + +Replace Nodes in Cluster +~~~~~~~~~~~~~~~~~~~~~~~~ + +Replace some existing nodes in the specified cluster. + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: replace_nodes_in_cluster + + +Cluster Scale Out +~~~~~~~~~~~~~~~~~ + +Inflate the size of a cluster. + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: scale_out_cluster + + +Cluster Scale In +~~~~~~~~~~~~~~~~ + +Shrink the size of a cluster. + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: scale_out_cluster + + +Cluster Resize +~~~~~~~~~~~~~~ + +Resize of cluster. + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: resize_cluster + + +Attach Policy to Cluster +~~~~~~~~~~~~~~~~~~~~~~~~ + +Once a policy is attached (bound) to a cluster, it will be +enforced when related actions are performed on that cluster, +unless the policy is (temporarily) disabled on the cluster + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: attach_policy_to_cluster + + +Detach Policy from Cluster +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +Once a policy is attached to a cluster, it can be detached +from the cluster at user's request. + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: detach_policy_from_cluster + + +Cluster Check +~~~~~~~~~~~~~ + +Check cluster health status, Cluster members can be check. + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: check_cluster + + +Cluster Recover +~~~~~~~~~~~~~~~ + +To restore a specified cluster, members in the cluster will be checked. + +.. literalinclude:: ../../examples/clustering/cluster.py + :pyobject: recover_cluster + + +.. _manage cluster: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/cluster.py + diff --git a/doc/source/user/guides/clustering/event.rst b/doc/source/user/guides/clustering/event.rst new file mode 100644 index 0000000000..da958e318b --- /dev/null +++ b/doc/source/user/guides/clustering/event.rst @@ -0,0 +1,47 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +=================== +Working with Events +=================== + +An event is a record generated during engine execution. Such an event +captures what has happened inside the senlin-engine. The senlin-engine service +generates event records when it is performing some actions or checking +policies. + + +List Events +~~~~~~~~~~~ + +To examine the list of events: + +.. literalinclude:: ../../examples/clustering/event.py + :pyobject: list_events + +When listing events, you can specify the sorting option using the ``sort`` +parameter and you can do pagination using the ``limit`` and ``marker`` +parameters. + +Full example: `manage event`_ + + +Get Event +~~~~~~~~~ + +To get a event based on its name or ID: + +.. literalinclude:: ../../examples/clustering/event.py + :pyobject: get_event + +.. _manage event: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/event.py diff --git a/doc/source/user/guides/clustering/node.rst b/doc/source/user/guides/clustering/node.rst new file mode 100644 index 0000000000..99b59939db --- /dev/null +++ b/doc/source/user/guides/clustering/node.rst @@ -0,0 +1,120 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +============== +Managing Nodes +============== + +Node is a logical object managed by the Senlin service. A node can be a member +of at most one cluster at any time. A node can be an orphan node which means +it doesn't belong to any clusters. + + +List Nodes +~~~~~~~~~~ + +To examine the list of Nodes: + +.. literalinclude:: ../../examples/clustering/node.py + :pyobject: list_nodes + +When listing nodes, you can specify the sorting option using the ``sort`` +parameter and you can do pagination using the ``limit`` and ``marker`` +parameters. + +Full example: `manage node`_ + + +Create Node +~~~~~~~~~~~ + +When creating a node, you will provide a dictionary with keys and values +according to the node type referenced. + +.. literalinclude:: ../../examples/clustering/node.py + :pyobject: create_node + +Optionally, you can specify a ``metadata`` keyword argument that contains some +key-value pairs to be associated with the node. + +Full example: `manage node`_ + + +Get Node +~~~~~~~~ + +To get a node based on its name or ID: + +.. literalinclude:: ../../examples/clustering/node.py + :pyobject: get_node + +Full example: `manage node`_ + + +Find Node +~~~~~~~~~ + +To find a node based on its name or ID: + +.. literalinclude:: ../../examples/clustering/node.py + :pyobject: find_node + +Full example: `manage node`_ + + +Update Node +~~~~~~~~~~~ + +After a node is created, most of its properties are immutable. Still, you +can update a node's ``name`` and/or ``params``. + +.. literalinclude:: ../../examples/clustering/node.py + :pyobject: update_node + +Full example: `manage node`_ + + +Delete Node +~~~~~~~~~~~ + +A node can be deleted after creation, provided that it is not referenced +by any active clusters. If you attempt to delete a node that is still in +use, you will get an error message. + +.. literalinclude:: ../../examples/clustering/node.py + :pyobject: delete_node + +Full example: `manage node`_ + + +Check Node +~~~~~~~~~~ + +If the underlying physical resource is not healthy, the node will be set +to ERROR status. + +.. literalinclude:: ../../examples/clustering/node.py + :pyobject: check_node + +Full example: `manage node`_ + + +Recover Node +~~~~~~~~~~~~ + +To restore a specified node. + +.. literalinclude:: ../../examples/clustering/node.py + :pyobject: recover_node + +.. _manage node: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/node.py diff --git a/doc/source/user/guides/clustering/policy.rst b/doc/source/user/guides/clustering/policy.rst new file mode 100644 index 0000000000..bf282b2149 --- /dev/null +++ b/doc/source/user/guides/clustering/policy.rst @@ -0,0 +1,102 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +================= +Managing Policies +================= + +A **policy type** can be treated as the meta-type of a `Policy` object. A +registry of policy types is built when the Cluster service starts. When +creating a `Policy` object, you will indicate the policy type used in its +`spec` property. + + +List Policies +~~~~~~~~~~~~~ + +To examine the list of policies: + +.. literalinclude:: ../../examples/clustering/policy.py + :pyobject: list_policies + +When listing policies, you can specify the sorting option using the ``sort`` +parameter and you can do pagination using the ``limit`` and ``marker`` +parameters. + +Full example: `manage policy`_ + + +Create Policy +~~~~~~~~~~~~~ + +When creating a policy, you will provide a dictionary with keys and values +according to the policy type referenced. + +.. literalinclude:: ../../examples/clustering/policy.py + :pyobject: create_policy + +Optionally, you can specify a ``metadata`` keyword argument that contains some +key-value pairs to be associated with the policy. + +Full example: `manage policy`_ + + +Find Policy +~~~~~~~~~~~ + +To find a policy based on its name or ID: + +.. literalinclude:: ../../examples/clustering/policy.py + :pyobject: find_policy + +Full example: `manage policy`_ + + +Get Policy +~~~~~~~~~~ + +To get a policy based on its name or ID: + +.. literalinclude:: ../../examples/clustering/policy.py + :pyobject: get_policy + +Full example: `manage policy`_ + + +Update Policy +~~~~~~~~~~~~~ + +After a policy is created, most of its properties are immutable. Still, you +can update a policy's ``name`` and/or ``metadata``. + +.. literalinclude:: ../../examples/clustering/policy.py + :pyobject: update_policy + +The Cluster service doesn't allow updating the ``spec`` of a policy. The only +way to achieve that is to create a new policy. + +Full example: `manage policy`_ + + +Delete Policy +~~~~~~~~~~~~~ + +A policy can be deleted after creation, provided that it is not referenced +by any active clusters or nodes. If you attempt to delete a policy that is +still in use, you will get an error message. + +.. literalinclude:: ../../examples/clustering/policy.py + :pyobject: delete_policy + + +.. _manage policy: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/policy.py diff --git a/doc/source/user/guides/clustering/policy_type.rst b/doc/source/user/guides/clustering/policy_type.rst new file mode 100644 index 0000000000..eb7bc623ce --- /dev/null +++ b/doc/source/user/guides/clustering/policy_type.rst @@ -0,0 +1,45 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +========================= +Working with Policy Types +========================= + +A **policy** is a template that encodes the information needed for specifying +the rules that are checked/enforced before/after certain actions are performed +on a cluster. The rules are encoded in a property named ``spec``. + + +List Policy Types +~~~~~~~~~~~~~~~~~ + +To examine the known policy types: + +.. literalinclude:: ../../examples/clustering/policy_type.py + :pyobject: list_policy_types + +Full example: `manage policy type`_ + + +Get Policy Type +~~~~~~~~~~~~~~~ + +To retrieve the details about a policy type, you need to provide the name of +it. + +.. literalinclude:: ../../examples/clustering/policy_type.py + :pyobject: get_policy_type + +Full example: `manage policy type`_ + +.. _manage policy type: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/policy_type.py diff --git a/doc/source/user/guides/clustering/profile.rst b/doc/source/user/guides/clustering/profile.rst new file mode 100644 index 0000000000..79137844d5 --- /dev/null +++ b/doc/source/user/guides/clustering/profile.rst @@ -0,0 +1,105 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +================= +Managing Profiles +================= + +A **profile type** can be treated as the meta-type of a `Profile` object. A +registry of profile types is built when the Cluster service starts. When +creating a `Profile` object, you will indicate the profile type used in its +`spec` property. + + +List Profiles +~~~~~~~~~~~~~ + +To examine the list of profiles: + +.. literalinclude:: ../../examples/clustering/profile.py + :pyobject: list_profiles + +When listing profiles, you can specify the sorting option using the ``sort`` +parameter and you can do pagination using the ``limit`` and ``marker`` +parameters. + +Full example: `manage profile`_ + + +Create Profile +~~~~~~~~~~~~~~ + +When creating a profile, you will provide a dictionary with keys and values +specified according to the profile type referenced. + +.. literalinclude:: ../../examples/clustering/profile.py + :pyobject: create_profile + +Optionally, you can specify a ``metadata`` keyword argument that contains some +key-value pairs to be associated with the profile. + +Full example: `manage profile`_ + + +Find Profile +~~~~~~~~~~~~ + +To find a profile based on its name or ID: + +.. literalinclude:: ../../examples/clustering/profile.py + :pyobject: find_profile + +The Cluster service doesn't allow updating the ``spec`` of a profile. The only +way to achieve that is to create a new profile. + +Full example: `manage profile`_ + + +Get Profile +~~~~~~~~~~~ + +To get a profile based on its name or ID: + +.. literalinclude:: ../../examples/clustering/profile.py + :pyobject: get_profile + +Full example: `manage profile`_ + + +Update Profile +~~~~~~~~~~~~~~ + +After a profile is created, most of its properties are immutable. Still, you +can update a profile's ``name`` and/or ``metadata``. + +.. literalinclude:: ../../examples/clustering/profile.py + :pyobject: update_profile + +The Cluster service doesn't allow updating the ``spec`` of a profile. The only +way to achieve that is to create a new profile. + +Full example: `manage profile`_ + + +Delete Profile +~~~~~~~~~~~~~~ + +A profile can be deleted after creation, provided that it is not referenced +by any active clusters or nodes. If you attempt to delete a profile that is +still in use, you will get an error message. + +.. literalinclude:: ../../examples/clustering/profile.py + :pyobject: delete_profile + + +.. _manage profile: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/profile.py diff --git a/doc/source/user/guides/clustering/profile_type.rst b/doc/source/user/guides/clustering/profile_type.rst new file mode 100644 index 0000000000..4f8d3645c4 --- /dev/null +++ b/doc/source/user/guides/clustering/profile_type.rst @@ -0,0 +1,44 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +========================== +Working with Profile Types +========================== + +A **profile** is a template used to create and manage nodes, i.e. objects +exposed by other OpenStack services. A profile encodes the information needed +for node creation in a property named ``spec``. + + +List Profile Types +~~~~~~~~~~~~~~~~~~ + +To examine the known profile types: + +.. literalinclude:: ../../examples/clustering/profile_type.py + :pyobject: list_profile_types + +Full example: `manage profile type`_ + + +Get Profile Type +~~~~~~~~~~~~~~~~ + +To get the details about a profile type, you need to provide the name of it. + +.. literalinclude:: ../../examples/clustering/profile_type.py + :pyobject: get_profile_type + +Full example: `manage profile type`_ + +.. _manage profile type: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/profile_type.py diff --git a/doc/source/user/guides/clustering/receiver.rst b/doc/source/user/guides/clustering/receiver.rst new file mode 100644 index 0000000000..c3c71dce07 --- /dev/null +++ b/doc/source/user/guides/clustering/receiver.rst @@ -0,0 +1,100 @@ +.. + Licensed under the Apache License, Version 2.0 (the "License"); you may + not use this file except in compliance with the License. You may obtain + a copy of the License at + + http://www.apache.org/licenses/LICENSE-2.0 + + Unless required by applicable law or agreed to in writing, software + distributed under the License is distributed on an "AS IS" BASIS, WITHOUT + WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the + License for the specific language governing permissions and limitations + under the License. + +================== +Managing Receivers +================== + +Receivers are the event sinks associated to senlin clusters. When certain +events (or alarms) are seen by a monitoring software, the software can +notify the senlin clusters of those events (or alarms). When senlin receives +those notifications, it can automatically trigger some predefined operations +with preset parameter values. + + +List Receivers +~~~~~~~~~~~~~~ + +To examine the list of receivers: + +.. literalinclude:: ../../examples/clustering/receiver.py + :pyobject: list_receivers + +When listing receivers, you can specify the sorting option using the ``sort`` +parameter and you can do pagination using the ``limit`` and ``marker`` +parameters. + +Full example: `manage receiver`_ + + +Create Receiver +~~~~~~~~~~~~~~~ + +When creating a receiver, you will provide a dictionary with keys and values +according to the receiver type referenced. + +.. literalinclude:: ../../examples/clustering/receiver.py + :pyobject: create_receiver + +Optionally, you can specify a ``metadata`` keyword argument that contains some +key-value pairs to be associated with the receiver. + +Full example: `manage receiver`_ + + +Get Receiver +~~~~~~~~~~~~ + +To get a receiver based on its name or ID: + +.. literalinclude:: ../../examples/clustering/receiver.py + :pyobject: get_receiver + +Full example: `manage receiver`_ + + +Find Receiver +~~~~~~~~~~~~~ + +To find a receiver based on its name or ID: + +.. literalinclude:: ../../examples/clustering/receiver.py + :pyobject: find_receiver + +Full example: `manage receiver`_ + + +Update Receiver +~~~~~~~~~~~~~~~ + +After a receiver is created, most of its properties are immutable. Still, you +can update a receiver's ``name`` and/or ``params``. + +.. literalinclude:: ../../examples/clustering/receiver.py + :pyobject: update_receiver + +Full example: `manage receiver`_ + + +Delete Receiver +~~~~~~~~~~~~~~~ + +A receiver can be deleted after creation, provided that it is not referenced +by any active clusters. If you attempt to delete a receiver that is still in +use, you will get an error message. + +.. literalinclude:: ../../examples/clustering/receiver.py + :pyobject: delete_receiver + + +.. _manage receiver: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/clustering/receiver.py diff --git a/doc/source/user/guides/compute.rst b/doc/source/user/guides/compute.rst new file mode 100644 index 0000000000..bac8a106d9 --- /dev/null +++ b/doc/source/user/guides/compute.rst @@ -0,0 +1,89 @@ +Using OpenStack Compute +======================= + +Before working with the Compute service, you'll need to create a connection +to your OpenStack cloud by following the :doc:`connect` user guide. This will +provide you with the ``conn`` variable used in the examples below. + +.. contents:: Table of Contents + :local: + +The primary resource of the Compute service is the server. + +List Servers +------------ + +A **server** is a virtual machine that provides access to a compute instance +being run by your cloud provider. + +.. literalinclude:: ../examples/compute/list.py + :pyobject: list_servers + +Full example: `compute resource list`_ + +List Images +----------- + +An **image** is the operating system you want to use for your server. + +.. literalinclude:: ../examples/compute/list.py + :pyobject: list_images + +Full example: `compute resource list`_ + +List Flavors +------------ + +A **flavor** is the resource configuration for a server. Each flavor is a +unique combination of disk, memory, vCPUs, and network bandwidth. + +.. literalinclude:: ../examples/compute/list.py + :pyobject: list_flavors + +Full example: `compute resource list`_ + +List Networks +------------- + +A **network** provides connectivity to servers. + +.. literalinclude:: ../examples/network/list.py + :pyobject: list_networks + +Full example: `network resource list`_ + +Create Key Pair +--------------- + +A **key pair** is the public key and private key of +`public–key cryptography`_. They are used to encrypt and decrypt login +information when connecting to your server. + +.. literalinclude:: ../examples/compute/create.py + :pyobject: create_keypair + +Full example: `compute resource create`_ + +Create Server +------------- + +At minimum, a server requires a name, an image, a flavor, and a network on +creation. You can discover the names and IDs of these attributes by listing +them as above and then using the find methods to get the appropriate +resources. + +Ideally you'll also create a server using a keypair so you can login to that +server with the private key. + +Servers take time to boot so we call ``wait_for_server`` to wait +for it to become active. + +.. literalinclude:: ../examples/compute/create.py + :pyobject: create_server + +Full example: `compute resource create`_ + +.. _compute resource list: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/compute/list.py +.. _network resource list: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/network/list.py +.. _compute resource create: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/compute/create.py +.. _public–key cryptography: https://en.wikipedia.org/wiki/Public-key_cryptography diff --git a/doc/source/user/guides/connect.rst b/doc/source/user/guides/connect.rst new file mode 100644 index 0000000000..5630bbcd85 --- /dev/null +++ b/doc/source/user/guides/connect.rst @@ -0,0 +1,32 @@ +Connect +======= + +In order to work with an OpenStack cloud you first need to create a +:class:`~openstack.connection.Connection` to it using your credentials. A +:class:`~openstack.connection.Connection` can be +created in 3 ways, using the class itself, :ref:`config-clouds-yaml`, or +:ref:`config-environment-variables`. It is recommended to always use +:ref:`config-clouds-yaml` as the same config can be used across tools and +languages. + +Create Connection +----------------- + +To create a :class:`~openstack.connection.Connection` instance, use the +:func:`~openstack.connect` factory function. + +.. literalinclude:: ../examples/connect.py + :pyobject: create_connection + +Full example at `connect.py `_ + +.. note:: To enable logging, see the :doc:`logging` user guide. + +Next +---- +Now that you can create a connection, continue with the :ref:`user_guides` +to work with an OpenStack service. + +.. TODO(shade) Update the text here and consolidate with the old + os-client-config docs so that we have a single and consistent explanation + of the envvars cloud, etc. diff --git a/doc/source/users/guides/connect_from_config.rst b/doc/source/user/guides/connect_from_config.rst similarity index 78% rename from doc/source/users/guides/connect_from_config.rst rename to doc/source/user/guides/connect_from_config.rst index 042ee83c3f..0e0b4572e2 100644 --- a/doc/source/users/guides/connect_from_config.rst +++ b/doc/source/user/guides/connect_from_config.rst @@ -6,12 +6,9 @@ In order to work with an OpenStack cloud you first need to create a :class:`~openstack.connection.Connection` can be created in 3 ways, using the class itself (see :doc:`connect`), a file, or environment variables as illustrated below. The SDK uses -`os-client-config `_ +`os-client-config `_ to handle the configuration. -.. note:: To get your credentials - `Download the OpenStack RC file `_. - Create Connection From A File ----------------------------- @@ -21,7 +18,7 @@ Default Location To create a connection from a file you need a YAML file to contain the configuration. -.. literalinclude:: ../../contributors/clouds.yaml +.. literalinclude:: ../../contributor/clouds.yaml :language: yaml To use a configuration file called ``clouds.yaml`` in one of the default @@ -36,9 +33,9 @@ function takes three optional arguments: * **cloud_name** allows you to specify a cloud from your ``clouds.yaml`` file. * **cloud_config** allows you to pass in an existing -``os_client_config.config.OpenStackConfig``` object. + ``openstack.config.loader.OpenStackConfig``` object. * **options** allows you to specify a namespace object with options to be -added to the cloud config. + added to the cloud config. .. literalinclude:: ../examples/connect.py :pyobject: Opts @@ -66,7 +63,7 @@ of the cloud configuration to use, . .. Create Connection From Environment Variables -------------------------------------------- - TODO(etoews): Document when https://bugs.launchpad.net/os-client-config/+bug/1489617 + TODO(etoews): Document when https://storyboard.openstack.org/#!/story/1489617 is fixed. Next diff --git a/doc/source/users/guides/database.rst b/doc/source/user/guides/database.rst similarity index 100% rename from doc/source/users/guides/database.rst rename to doc/source/user/guides/database.rst diff --git a/doc/source/user/guides/dns.rst b/doc/source/user/guides/dns.rst new file mode 100644 index 0000000000..88caba15e0 --- /dev/null +++ b/doc/source/user/guides/dns.rst @@ -0,0 +1,109 @@ +Using OpenStack DNS +=================== + +Before working with the DNS service, you'll need to create a connection +to your OpenStack cloud by following the :doc:`connect` user guide. This will +provide you with the ``conn`` variable used in the examples below. + +.. contents:: Table of Contents + :local: + +The primary resource of the DNS service is the server. + +List Zones +---------- + +**Zone** is a logical grouping of DNS records for a domain, allowing for the +centralized management of DNS resources, including domain names, +nameservers, and DNS queries. + +.. literalinclude:: ../examples/dns/list.py + :pyobject: list_zones + +Full example: `dns resource list`_ + +List Recordsets +--------------- + +**Recordsets** allow for the centralized management of various DNS records +within a Zone, helping to define how a domain responds to different types +of DNS queries. + +.. literalinclude:: ../examples/dns/list.py + :pyobject: list_recordsets + +Full example: `dns resource list`_ + +Create Zone +----------- + +Create a zone. +It allows users to define and manage the DNS namespace for a particular domain. + +.. literalinclude:: ../examples/dns/create.py + :pyobject: create_zone + +Full example: `dns resource list`_ + +Create Recordset +---------------- + +Create a recordset. It accepts several parameters that define the DNS +record's properties and sends an API request to OpenStack to create the +recordset within a specified DNS zone. + +.. literalinclude:: ../examples/dns/create.py + :pyobject: create_recordset + +Full example: `dns resource list`_ + +Delete Zone +----------- + +Delete a zone. +It allows users to completely delete the DNS management for a specified domain. + +.. literalinclude:: ../examples/dns/delete.py + :pyobject: delete_zone + +Full example: `dns resource list`_ + +Delete Recordset +---------------- + +Delete a recordset. + +.. literalinclude:: ../examples/dns/delete.py + :pyobject: delete_recordset + +Full example: `dns resource list`_ + +Find Zone +--------- + +The find_zone function searches for and returns a DNS zone by its name +using a given connection object. + +.. literalinclude:: ../examples/dns/find.py + :pyobject: find_zone + +Full example: `dns resource list`_ + +Find Recordset +-------------- + +The find_recordset function searches for a DNS recordset +with a specific name and type +within a given zone. If multiple recordsets +with the same name exist, +the record type can be specified to find the exact match. + +.. literalinclude:: ../examples/dns/find.py + :pyobject: find_recordset + +Full example: `dns resource list`_ + +.. _dns resource list: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/dns/list.py +.. _dns resource create: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/dns/create.py +.. _dns resource delete: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/dns/delete.py +.. _dns resource find: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/dns/find.py diff --git a/doc/source/user/guides/identity.rst b/doc/source/user/guides/identity.rst new file mode 100644 index 0000000000..9d94441441 --- /dev/null +++ b/doc/source/user/guides/identity.rst @@ -0,0 +1,111 @@ +Using OpenStack Identity +======================== + +Before working with the Identity service, you'll need to create a connection +to your OpenStack cloud by following the :doc:`connect` user guide. This will +provide you with the ``conn`` variable used in the examples below. + +The OpenStack Identity service is the default identity management system for +OpenStack. The Identity service authentication process confirms the identity +of a user and an incoming request by validating a set of credentials that the +user supplies. Initially, these credentials are a user name and password or a +user name and API key. When the Identity service validates user credentials, +it issues an authentication token that the user provides in subsequent +requests. An authentication token is an alpha-numeric text string that enables +access to OpenStack APIs and resources. A token may be revoked at any time and +is valid for a finite duration. + +List Users +---------- +A **user** is a digital representation of a person, system, or service that +uses OpenStack cloud services. The Identity service validates that incoming +requests are made by the user who claims to be making the call. Users have +a login and can access resources by using assigned tokens. Users can be +directly assigned to a particular project and behave as if they are contained +in that project. + +.. literalinclude:: ../examples/identity/list.py + :pyobject: list_users + +Full example: `identity resource list`_ + +List Credentials +---------------- +**Credentials** are data that confirms the identity of the user. For example, +user name and password, user name and API key, or an authentication token that +the Identity service provides. + +.. literalinclude:: ../examples/identity/list.py + :pyobject: list_credentials + +Full example: `identity resource list`_ + +List Projects +------------- +A **project** is a container that groups or isolates resources or identity +objects. + +.. literalinclude:: ../examples/identity/list.py + :pyobject: list_projects + +Full example: `identity resource list`_ + +List Domains +------------ +A **domain** is an Identity service API v3 entity and represents a collection +of projects and users that defines administrative boundaries for the management +of Identity entities. Users can be granted the administrator role for a domain. +A domain administrator can create projects, users, and groups in a domain and +assign roles to users and groups in a domain. + +.. literalinclude:: ../examples/identity/list.py + :pyobject: list_domains + +Full example: `identity resource list`_ + +List Groups +----------- +A **group** is an Identity service API v3 entity and represents a collection of +users that are owned by a domain. A group role granted to a domain or project +applies to all users in the group. Adding users to, or removing users from, a +group respectively grants, or revokes, their role and authentication to the +associated domain or project. + +.. literalinclude:: ../examples/identity/list.py + :pyobject: list_groups + +Full example: `identity resource list`_ + +List Services +------------- +A **service** is an OpenStack service, such as Compute, Object Storage, or +Image service, that provides one or more endpoints through which users can +access resources and perform operations. + +.. literalinclude:: ../examples/identity/list.py + :pyobject: list_services + +Full example: `identity resource list`_ + +List Endpoints +-------------- +An **endpoint** is a network-accessible address, usually a URL, through which +you can access a service. + +.. literalinclude:: ../examples/identity/list.py + :pyobject: list_endpoints + +Full example: `identity resource list`_ + +List Regions +------------ +A **region** is an Identity service API v3 entity and represents a general +division in an OpenStack deployment. You can associate zero or more +sub-regions with a region to make a tree-like structured hierarchy. + +.. literalinclude:: ../examples/identity/list.py + :pyobject: list_regions + +Full example: `identity resource list`_ + +.. _identity resource list: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/identity/list.py diff --git a/doc/source/user/guides/image.rst b/doc/source/user/guides/image.rst new file mode 100644 index 0000000000..9065a49319 --- /dev/null +++ b/doc/source/user/guides/image.rst @@ -0,0 +1,97 @@ +Using OpenStack Image +===================== + +Before working with the Image service, you'll need to create a connection +to your OpenStack cloud by following the :doc:`connect` user guide. This will +provide you with the ``conn`` variable used in the examples below. + +The primary resource of the Image service is the image. + +List Images +----------- + +An **image** is a collection of files for a specific operating system +that you use to create or rebuild a server. OpenStack provides +`pre-built images `_. +You can also create custom images, or snapshots, from servers that you have +launched. Images come in different formats and are sometimes called virtual +machine images. + +.. literalinclude:: ../examples/image/list.py + :pyobject: list_images + +Full example: `image resource list`_ + +Create Image +------------ + +Create an image by uploading its data and setting its attributes. + +.. literalinclude:: ../examples/image/create.py + :pyobject: upload_image + +Full example: `image resource create`_ + +Create Image via interoperable image import process +--------------------------------------------------- + +Create an image then use interoperable image import process to download data +from a web URL. + +For more information about the image import process, please check +`interoperable image import`_ + +.. literalinclude:: ../examples/image/import.py + :pyobject: import_image + +Full example: `image resource import`_ + +.. _download_image-stream-true: + +Downloading an Image with stream=True +------------------------------------- + +As images are often very large pieces of data, storing their entire contents +in the memory of your application can be less than desirable. A more +efficient method may be to iterate over a stream of the response data. + +By choosing to stream the response content, you determine the ``chunk_size`` +that is appropriate for your needs, meaning only that many bytes of data are +read for each iteration of the loop until all data has been consumed. +See :meth:`requests.Response.iter_content` for more information. + +When you choose to stream an image download, openstacksdk is no longer +able to compute the checksum of the response data for you. This example +shows how you might do that yourself, in a very similar manner to how +the library calculates checksums for non-streamed responses. + +.. literalinclude:: ../examples/image/download.py + :pyobject: download_image_stream + +Downloading an Image with stream=False +-------------------------------------- + +If you wish to download an image's contents all at once and to memory, +simply set ``stream=False``, which is the default. + +.. literalinclude:: ../examples/image/download.py + :pyobject: download_image + +Full example: `image resource download`_ + +Delete Image +------------ + +Delete an image. + +.. literalinclude:: ../examples/image/delete.py + :pyobject: delete_image + +Full example: `image resource delete`_ + +.. _image resource create: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/image/create.py +.. _image resource import: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/image/import.py +.. _image resource delete: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/image/delete.py +.. _image resource list: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/image/list.py +.. _image resource download: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/image/download.py +.. _interoperable image import: https://docs.openstack.org/glance/latest/admin/interoperable-image-import.html diff --git a/doc/source/user/guides/intro.rst b/doc/source/user/guides/intro.rst new file mode 100644 index 0000000000..ac53905e40 --- /dev/null +++ b/doc/source/user/guides/intro.rst @@ -0,0 +1,102 @@ +=============== +Getting started +=============== + +openstacksdk aims to talk to any OpenStack cloud. To do this, it requires a +configuration file. openstacksdk favours ``clouds.yaml`` files, but can also +use environment variables. The ``clouds.yaml`` file should be provided by your +cloud provider or deployment tooling. An example: + +.. code-block:: yaml + + clouds: + mordred: + region_name: Dallas + auth: + username: 'mordred' + password: XXXXXXX + project_name: 'demo' + auth_url: 'https://identity.example.com' + +More information on configuring openstacksdk can be found in +:doc:`/user/config/configuration`. + +Given sufficient configuration, you can use openstacksdk to interact with your +cloud. openstacksdk consists of three layers. Most users will make use of the +*proxy* layer. Using the above ``clouds.yaml``, consider listing servers: + +.. code-block:: python + + import openstack + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + # Initialize connection + conn = openstack.connect(cloud='mordred') + + # List the servers + for server in conn.compute.servers(): + print(server.to_dict()) + +openstacksdk also contains a higher-level *cloud* layer based on logical +operations: + +.. code-block:: python + + import openstack + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + # Initialize connection + conn = openstack.connect(cloud='mordred') + + # List the servers + for server in conn.list_servers(): + print(server.to_dict()) + +The benefit of this layer is mostly seen in more complicated operations that +take multiple steps and where the steps vary across providers. For example: + +.. code-block:: python + + import openstack + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + # Initialize connection + conn = openstack.connect(cloud='mordred') + + # Upload an image to the cloud + image = conn.create_image( + 'ubuntu-trusty', filename='ubuntu-trusty.qcow2', wait=True) + + # Find a flavor with at least 512M of RAM + flavor = conn.get_flavor_by_ram(512) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public IP address for it. + conn.create_server( + 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) + +Finally, there is the low-level *resource* layer. This provides support for the +basic CRUD operations supported by REST APIs and is the base building block for +the other layers. You typically will not need to use this directly: + +.. code-block:: python + + import openstack + import openstack.config.loader + import openstack.compute.v2.server + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + # Initialize connection + conn = openstack.connect(cloud='mordred') + + # List the servers + for server in openstack.compute.v2.server.Server.list(session=conn.compute): + print(server.to_dict()) diff --git a/doc/source/user/guides/key_manager.rst b/doc/source/user/guides/key_manager.rst new file mode 100644 index 0000000000..9b147f63f4 --- /dev/null +++ b/doc/source/user/guides/key_manager.rst @@ -0,0 +1,80 @@ +Using OpenStack Key Manager +=========================== + +Before working with the Key Manager service, you'll need to create a +connection to your OpenStack cloud by following the :doc:`connect` user +guide. This will provide you with the ``conn`` variable used in the examples +below. + +.. contents:: Table of Contents + :local: + +.. note:: Some interactions with the Key Manager service differ from that + of other services in that resources do not have a proper ``id`` parameter, + which is necessary to make some calls. Instead, resources have a separately + named id attribute, e.g., the Secret resource has ``secret_id``. + + The examples below outline when to pass in those id values. + +Create a Secret +--------------- + +The Key Manager service allows you to create new secrets by passing the +attributes of the :class:`~openstack.key_manager.v1.secret.Secret` to the +:meth:`~openstack.key_manager.v1._proxy.Proxy.create_secret` method. + +.. literalinclude:: ../examples/key_manager/create.py + :pyobject: create_secret + +List Secrets +------------ + +Once you have stored some secrets, they are available for you to list +via the :meth:`~openstack.key_manager.v1._proxy.Proxy.secrets` method. +This method returns a generator, which yields each +:class:`~openstack.key_manager.v1.secret.Secret`. + +.. literalinclude:: ../examples/key_manager/list.py + :pyobject: list_secrets + +The :meth:`~openstack.key_manager.v1._proxy.Proxy.secrets` method can +also make more advanced queries to limit the secrets that are returned. + +.. literalinclude:: ../examples/key_manager/list.py + :pyobject: list_secrets_query + +Get Secret Payload +------------------ + +Once you have received a :class:`~openstack.key_manager.v1.secret.Secret`, +you can obtain the payload for it by passing the secret's id value to +the :meth:`~openstack.key_manager.v1._proxy.Proxy.secrets` method. +Use the :data:`~openstack.key_manager.v1.secret.Secret.secret_id` attribute +when making this request. + +.. literalinclude:: ../examples/key_manager/get.py + :pyobject: get_secret_payload + +Find Secret +----------- + +To find a secret by name or ID, use the +:meth:`~openstack.key_manager.v1._proxy.Proxy.find_secret` method. +This method can search for a :class:`~openstack.key_manager.v1.secret.Secret` +by either its name or ID, making it flexible when you don't have +the exact secret ID. + +.. literalinclude:: ../examples/key_manager/find.py + :pyobject: find_secret + +Delete Secret +------------- + +To delete a secret, use the +:meth:`~openstack.key_manager.v1._proxy.Proxy.delete_secret` method. +The secret can be identified by its ID or by using +:meth:`~openstack.key_manager.v1._proxy.Proxy.find_secret` to locate +it by name first. + +.. literalinclude:: ../examples/key_manager/delete.py + :pyobject: delete_secret diff --git a/doc/source/user/guides/logging.rst b/doc/source/user/guides/logging.rst new file mode 100644 index 0000000000..6c8a27eee6 --- /dev/null +++ b/doc/source/user/guides/logging.rst @@ -0,0 +1,106 @@ +======= +Logging +======= + +.. note:: TODO(shade) This document is written from a shade POV. It needs to + be combined with the existing logging guide, but also the logging + systems need to be rationalized. + +`openstacksdk` uses `Python Logging`_. As `openstacksdk` is a library, it does +not configure logging handlers automatically, expecting instead for that to be +the purview of the consuming application. + +Simple Usage +------------ + +For consumers who just want to get a basic logging setup without thinking +about it too deeply, there is a helper method. If used, it should be called +before any other openstacksdk functionality. + +.. autofunction:: openstack.enable_logging + +.. code-block:: python + + import openstack + openstack.enable_logging() + +The ``stream`` parameter controls the stream where log message are written to. +It defaults to `sys.stdout` which will result in log messages being written +to STDOUT. It can be set to another output stream, or to ``None`` to disable +logging to the console. + +The ``path`` parameter sets up logging to log to a file. By default, if +``path`` is given and ``stream`` is not, logging will only go to ``path``. + +You can combine the ``path`` and ``stream`` parameters to log to both places +simultaneously. + +To log messages to a file called ``openstack.log`` and the console on +``stdout``: + +.. code-block:: python + + import sys + import openstack + + openstack.enable_logging( + debug=True, path='openstack.log', stream=sys.stdout) + + +`openstack.enable_logging` also sets up a few other loggers and +squelches some warnings or log messages that are otherwise uninteresting or +unactionable by an openstacksdk user. + +Advanced Usage +-------------- + +`openstacksdk` logs to a set of different named loggers. + +Most of the logging is set up to log to the root ``openstack`` logger. +There are additional sub-loggers that are used at times, primarily so that a +user can decide to turn on or off a specific type of logging. They are listed +below. + +openstack.config + Issues pertaining to configuration are logged to the ``openstack.config`` + logger. + +openstack.iterate_timeout + When `openstacksdk` needs to poll a resource, it does so in a loop that waits + between iterations and ultimately times out. The + ``openstack.iterate_timeout`` logger emits messages for each iteration + indicating it is waiting and for how long. These can be useful to see for + long running tasks so that one can know things are not stuck, but can also + be noisy. + +openstack.fnmatch + `openstacksdk` will try to use `fnmatch`_ on given `name_or_id` arguments. + It's a best effort attempt, so pattern misses are logged to + ``openstack.fnmatch``. A user may not be intending to use an fnmatch + pattern - such as if they are trying to find an image named + ``Fedora 24 [official]``, so these messages are logged separately. + +.. _fnmatch: https://pymotw.com/2/fnmatch/ + +HTTP Tracing +------------ + +HTTP Interactions are handled by `keystoneauth`_. If you want to enable HTTP +tracing while using openstacksdk and are not using `openstack.enable_logging`, +set the log level of the ``keystoneauth`` logger to ``DEBUG``. + +For more information see https://docs.openstack.org/keystoneauth/latest/using-sessions.html#logging + +.. _keystoneauth: https://docs.openstack.org/keystoneauth/latest/ + +Python Logging +-------------- + +Python logging is a standard feature of Python and is documented fully in the +Python Documentation, which varies by version of Python. + +For more information on Python Logging for Python v2, see +https://docs.python.org/2/library/logging.html. + +For more information on Python Logging for Python v3, see +https://docs.python.org/3/library/logging.html. diff --git a/doc/source/user/guides/message.rst b/doc/source/user/guides/message.rst new file mode 100644 index 0000000000..ac30f88306 --- /dev/null +++ b/doc/source/user/guides/message.rst @@ -0,0 +1,8 @@ +Using OpenStack Message +======================= + +Before working with the Message service, you'll need to create a connection +to your OpenStack cloud by following the :doc:`connect` user guide. This will +provide you with the ``conn`` variable used in the examples below. + +.. TODO(briancurtin): Implement this guide diff --git a/doc/source/user/guides/network.rst b/doc/source/user/guides/network.rst new file mode 100644 index 0000000000..fc5f4ac0f8 --- /dev/null +++ b/doc/source/user/guides/network.rst @@ -0,0 +1,142 @@ +Using OpenStack Network +======================= + +Before working with the Network service, you'll need to create a connection +to your OpenStack cloud by following the :doc:`connect` user guide. This will +provide you with the ``conn`` variable used in the examples below. + +.. contents:: Table of Contents + :local: + +The primary resource of the Network service is the network. + +List Networks +------------- + +A **network** is an isolated `Layer 2 `_ +networking segment. There are two types of networks, project and provider +networks. Project networks are fully isolated and are not shared with other +projects. Provider networks map to existing physical networks in the data +center and provide external network access for servers. Only an OpenStack +administrator can create provider networks. Networks can be connected via +routers. + +.. literalinclude:: ../examples/network/list.py + :pyobject: list_networks + +Full example: `network resource list`_ + +List Subnets +------------ + +A **subnet** is a block of IP addresses and associated configuration state. +Subnets are used to allocate IP addresses when new ports are created on a +network. + +.. literalinclude:: ../examples/network/list.py + :pyobject: list_subnets + +Full example: `network resource list`_ + +List Ports +---------- + +A **port** is a connection point for attaching a single device, such as the +`NIC `_ +of a server, to a network. The port also describes the associated network +configuration, such as the `MAC `_ +and IP addresses to be used on that port. + +.. literalinclude:: ../examples/network/list.py + :pyobject: list_ports + +Full example: `network resource list`_ + +List Security Groups +-------------------- + +A **security group** acts as a virtual firewall for servers. It is a container +for security group rules which specify the type of network traffic and +direction that is allowed to pass through a port. + +.. literalinclude:: ../examples/network/list.py + :pyobject: list_security_groups + +Full example: `network resource list`_ + +List Routers +------------ + +A **router** is a logical component that forwards data packets between +networks. It also provides +`Layer 3 `_ and +`NAT `_ +forwarding to provide external network access for servers on project networks. + +.. literalinclude:: ../examples/network/list.py + :pyobject: list_routers + +Full example: `network resource list`_ + +List Network Agents +------------------- + +A **network agent** is a plugin that handles various tasks used to +implement virtual networks. These agents include neutron-dhcp-agent, +neutron-l3-agent, neutron-metering-agent, and neutron-lbaas-agent, +among others. + +.. literalinclude:: ../examples/network/list.py + :pyobject: list_network_agents + +Full example: `network resource list`_ + +Create Network +-------------- + +Create a project network and subnet. This network can be used when creating +a server and allows the server to communicate with others servers on the +same project network. + +.. literalinclude:: ../examples/network/create.py + :pyobject: create_network + +Full example: `network resource create`_ + +Open a Port +----------- + +When creating a security group for a network, you will need to open certain +ports to allow communication via them. For example, you may need to enable +HTTPS access on port 443. + +.. literalinclude:: ../examples/network/security_group_rules.py + :pyobject: open_port + +Full example: `network security group create`_ + +Accept Pings +------------ + +In order to ping a machine on your network within a security group, +you will need to create a rule to allow inbound ICMP packets. + +.. literalinclude:: ../examples/network/security_group_rules.py + :pyobject: allow_ping + +Full example: `network security group create`_ + +Delete Network +-------------- + +Delete a project network and its subnets. + +.. literalinclude:: ../examples/network/delete.py + :pyobject: delete_network + +Full example: `network resource delete`_ + +.. _network resource create: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/network/create.py +.. _network resource delete: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/network/delete.py +.. _network resource list: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/network/list.py +.. _network security group create: https://opendev.org/openstack/openstacksdk/src/branch/master/examples/network/security_group_rules.py diff --git a/doc/source/user/guides/object_store.rst b/doc/source/user/guides/object_store.rst new file mode 100644 index 0000000000..dab88096e9 --- /dev/null +++ b/doc/source/user/guides/object_store.rst @@ -0,0 +1,211 @@ +Using OpenStack Object Store +============================ + +Before working with the Object Store service, you'll need to create a +connection to your OpenStack cloud by following the :doc:`connect` user +guide. This will provide you with the ``conn`` variable used in the examples +below. + +.. contents:: Table of Contents + :local: + +The primary resources of the Object Store service are containers and objects. + +Working with Containers +----------------------- + +Listing Containers +****************** + +To list existing containers, use the +:meth:`~openstack.object_store.v1._proxy.Proxy.containers` method. :: + + >>> for cont in conn.object_store.containers(): + ... print cont + ... + openstack.object_store.v1.container.Container: {u'count': 5, + u'bytes': 500, u'name': u'my container'} + openstack.object_store.v1.container.Container: {u'count': 0, + u'bytes': 0, u'name': u'empty container'} + openstack.object_store.v1.container.Container: {u'count': 100, + u'bytes': 1000000, u'name': u'another container'} + +The ``containers`` method returns a generator which yields +:class:`~openstack.object_store.v1.container.Container` objects. It handles +pagination for you, which can be adjusted via the ``limit`` argument. +By default, the ``containers`` method will yield as many containers as the +service will return, and it will continue requesting until it receives +no more. :: + + >>> for cont in conn.object_store.containers(limit=500): + ... print(cont) + ... + <500 Containers> + ... another request transparently made to the Object Store service + <500 more Containers> + ... + +Creating Containers +******************* + +To create a container, use the +:meth:`~openstack.object_store.v1._proxy.Proxy.create_container` method. :: + + >>> cont = conn.object_store.create_container(name="new container") + >>> cont + openstack.object_store.v1.container.Container: {'name': u'new container'} + +Working with Container Metadata +******************************* + +To get the metadata for a container, use the +:meth:`~openstack.object_store.v1._proxy.Proxy.get_container_metadata` method. +This method either takes the name of a container, or a +:class:`~openstack.object_store.v1.container.Container` object, and it returns +a `Container` object with all of its metadata attributes set. :: + + >>> cont = conn.object_store.get_container_metadata("new container") + openstack.object_store.v1.container.Container: {'content-length': '0', + 'x-container-object-count': '0', 'name': u'new container', + 'accept-ranges': 'bytes', + 'x-trans-id': 'tx22c5de63466e4c05bb104-0054740c39', + 'date': 'Tue, 25 Nov 2014 04:57:29 GMT', + 'x-timestamp': '1416889793.23520', 'x-container-read': '.r:mysite.com', + 'x-container-bytes-used': '0', 'content-type': 'text/plain; charset=utf-8'} + +To set the metadata for a container, use the +:meth:`~openstack.object_store.v1._proxy.Proxy.set_container_metadata` method. +This method takes a :class:`~openstack.object_store.v1.container.Container` +object. For example, to grant another user write access to this container, +you can call `set_container_metadata` passing it the `Container` to update +and keyward argument key/value pairs representing the metadata name and +value to set. :: + + >>> acl = "big_project:another_user" + >>> conn.object_store.set_container_metadata(cont, write_ACL=acl) + openstack.object_store.v1.container.Container: {'content-length': '0', + 'x-container-object-count': '0', + 'name': u'my new container', 'accept-ranges': 'bytes', + 'x-trans-id': 'txc3ee751f971d41de9e9f4-0054740ec1', + 'date': 'Tue, 25 Nov 2014 05:08:17 GMT', + 'x-timestamp': '1416889793.23520', 'x-container-read': '.r:mysite.com', + 'x-container-bytes-used': '0', 'content-type': 'text/plain; charset=utf-8', + 'x-container-write': 'big_project:another_user'} + +Working with Objects +-------------------- + +Objects are held in containers. From an API standpoint, you work with +them using similarly named methods, typically with an additional argument +to specify their container. + +Listing Objects +*************** + +To list the objects that exist in a container, use the +:meth:`~openstack.object_store.v1._proxy.Proxy.objects` method. + +If you have a :class:`~openstack.object_store.v1.container.Container` +object, you can pass it to ``objects``. :: + + >>> print cont.name + pictures + >>> for obj in conn.object_store.objects(cont): + ... print obj + ... + openstack.object_store.v1.container.Object: + {u'hash': u'0522d4ccdf9956badcb15c4087a0c4cb', + u'name': u'pictures/selfie.jpg', u'bytes': 15744, + 'last-modified': u'2014-10-31T06:33:36.618640', + u'last_modified': u'2014-10-31T06:33:36.618640', + u'content_type': u'image/jpeg', 'container': u'pictures', + 'content-type': u'image/jpeg'} + ... + +Similar to the :meth:`~openstack.object_store.v1._proxy.Proxy.containers` +method, ``objects`` returns a generator which yields +:class:`~openstack.object_store.v1.obj.Object` objects stored in the +container. It also handles pagination for you, which you can adjust +with the ``limit`` parameter, otherwise making each request for the maximum +that your Object Store will return. + +If you have the name of a container instead of an object, you can also +pass that to the ``objects`` method. :: + + >>> for obj in conn.object_store.objects("pictures".decode("utf8"), + limit=100): + ... print obj + ... + <100 Objects> + ... another request transparently made to the Object Store service + <100 more Objects> + +Getting Object Data +******************* + +Once you have an :class:`~openstack.object_store.v1.obj.Object`, you get +the data stored inside of it with the +:meth:`~openstack.object_store.v1._proxy.Proxy.get_object_data` method. :: + + >>> print ob.name + message.txt + >>> data = conn.object_store.get_object_data(ob) + >>> print data + Hello, world! + +Additionally, if you want to save the object to disk, the +:meth:`~openstack.object_store.v1._proxy.Proxy.download_object` convenience +method takes an :class:`~openstack.object_store.v1.obj.Object` and a +``path`` to write the contents to. :: + + >>> conn.object_store.download_object(ob, "the_message.txt") + +Uploading Objects +***************** + +Once you have data you'd like to store in the Object Store service, you use +the :meth:`~openstack.object_store.v1._proxy.Proxy.upload_object` method. +This method takes the ``data`` to be stored, along with at least an object +``name`` and the ``container`` it is to be stored in. :: + + >>> hello = conn.object_store.upload_object(container="messages", + name="helloworld.txt", + data="Hello, world!") + >>> print hello + openstack.object_store.v1.container.Object: {'content-length': '0', + 'container': u'messages', 'name': u'helloworld.txt', + 'last-modified': 'Tue, 25 Nov 2014 17:39:29 GMT', + 'etag': '5eb63bbbe01eeed093cb22bb8f5acdc3', + 'x-trans-id': 'tx3035d41b03334aeaaf3dd-005474bed0', + 'date': 'Tue, 25 Nov 2014 17:39:28 GMT', + 'content-type': 'text/html; charset=UTF-8'} + +Working with Object Metadata +**************************** + +Working with metadata on objects is identical to how it's done with +containers. You use the +:meth:`~openstack.object_store.v1._proxy.Proxy.get_object_metadata` and +:meth:`~openstack.object_store.v1._proxy.Proxy.set_object_metadata` methods. + +The metadata attributes to be set can be found on the +:class:`~openstack.object_store.v1.obj.Object` object. :: + + >>> secret.delete_after = 300 + >>> secret = conn.object_store.set_object_metadata(secret) + +We set the :attr:`~openstack.object_store.obj.Object.delete_after` +value to 500 seconds, causing the object to be deleted in 300 seconds, +or five minutes. That attribute corresponds to the ``X-Delete-After`` +header value, which you can see is returned when we retrieve the updated +metadata. :: + + >>> conn.object_store.get_object_metadata(ob) + openstack.object_store.v1.container.Object: {'content-length': '11', + 'container': u'Secret Container', + 'name': u'selfdestruct.txt', 'x-delete-after': 300, + 'accept-ranges': 'bytes', 'last-modified': 'Tue, 25 Nov 2014 17:50:45 GMT', + 'etag': '5eb63bbbe01eeed093cb22bb8f5acdc3', + 'x-timestamp': '1416937844.36805', + 'x-trans-id': 'tx5c3fd94adf7c4e1b8f334-005474c17b', + 'date': 'Tue, 25 Nov 2014 17:50:51 GMT', 'content-type': 'text/plain'} diff --git a/doc/source/users/guides/orchestration.rst b/doc/source/user/guides/orchestration.rst similarity index 100% rename from doc/source/users/guides/orchestration.rst rename to doc/source/user/guides/orchestration.rst diff --git a/doc/source/user/guides/shared_file_system.rst b/doc/source/user/guides/shared_file_system.rst new file mode 100644 index 0000000000..dc6b5d021d --- /dev/null +++ b/doc/source/user/guides/shared_file_system.rst @@ -0,0 +1,200 @@ +Using OpenStack Shared File Systems +=================================== + +Before working with the Shared File System service, you'll need to create a +connection to your OpenStack cloud by following the :doc:`connect` user +guide. This will provide you with the ``conn`` variable used in the examples +below. + +.. contents:: Table of Contents + :local: + + +List Availability Zones +----------------------- + +A Shared File System service **availability zone** is a failure domain for +your shared file systems. You may create a shared file system (referred +to simply as **shares**) in a given availability zone, and create replicas +of the share in other availability zones. + +.. literalinclude:: ../examples/shared_file_system/availability_zones.py + :pyobject: list_availability_zones + + +Share Instances +--------------- + +Administrators can list, show information for, explicitly set the state of, +and force-delete share instances. + +.. literalinclude:: ../examples/shared_file_system/share_instances.py + :pyobject: share_instances + + +Get Share Instance +------------------ + +Shows details for a single share instance. + +.. literalinclude:: ../examples/shared_file_system/share_instances.py + :pyobject: get_share_instance + + +Reset Share Instance Status +--------------------------- + +Explicitly updates the state of a share instance. + +.. literalinclude:: ../examples/shared_file_system/share_instances.py + :pyobject: reset_share_instance_status + + +Delete Share Instance +--------------------- + +Force-deletes a share instance. + +.. literalinclude:: ../examples/shared_file_system/share_instances.py + :pyobject: delete_share_instance + + +Resize Share +------------ + +Shared File System shares can be resized (extended or shrunk) to a given +size. For details on resizing shares, refer to the +`Manila docs `_. + +.. literalinclude:: ../examples/shared_file_system/shares.py + :pyobject: resize_share +.. literalinclude:: ../examples/shared_file_system/shares.py + :pyobject: resize_shares_without_shrink + + +List Share Group Snapshots +-------------------------- + +A share group snapshot is a point-in-time, read-only copy of the data that is +contained in a share group. You can list all share group snapshots + +.. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py + :pyobject: list_share_group_snapshots + + +Get Share Group Snapshot +------------------------ + +Show share group snapshot details + +.. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py + :pyobject: get_share_group_snapshot + + +List Share Group Snapshot Members +--------------------------------- + +Lists all share group snapshots members. + +.. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py + :pyobject: share_group_snapshot_members + + +Create Share Group Snapshot +--------------------------- + +Creates a snapshot from a share group. + +.. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py + :pyobject: create_share_group_snapshot + + +Reset Share Group Snapshot +--------------------------- + +Reset share group snapshot state. + +.. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py + :pyobject: reset_share_group_snapshot_status + + +Update Share Group Snapshot +--------------------------- + +Updates a share group snapshot. + +.. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py + :pyobject: update_share_group_snapshot + + +Delete Share Group Snapshot +--------------------------- + +Deletes a share group snapshot. + +.. literalinclude:: ../examples/shared_file_system/share_group_snapshots.py + :pyobject: delete_share_group_snapshot + + +List Share Metadata +-------------------- + +Lists all metadata for a given share. + +.. literalinclude:: ../examples/shared_file_system/share_metadata.py + :pyobject: list_share_metadata + + +Get Share Metadata Item +----------------------- + +Retrieves a specific metadata item from a shares metadata by its key. + +.. literalinclude:: ../examples/shared_file_system/share_metadata.py + :pyobject: get_share_metadata_item + + +Create Share Metadata +---------------------- + +Creates share metadata. + +.. literalinclude:: ../examples/shared_file_system/share_metadata.py + :pyobject: create_share_metadata + + +Update Share Metadata +---------------------- + +Updates metadata of a given share. + +.. literalinclude:: ../examples/shared_file_system/share_metadata.py + :pyobject: update_share_metadata + + +Delete Share Metadata +---------------------- + +Deletes a specific metadata item from a shares metadata by its key. Can +specify multiple keys to be deleted. + +.. literalinclude:: ../examples/shared_file_system/share_metadata.py + :pyobject: delete_share_metadata + + +Manage Share +------------ + +Manage a share with Manila. + +.. literalinclude:: ../examples/shared_file_system/shares.py + :pyobject: manage_share + + +Unmanage Share +-------------- + +Unmanage a share from Manila. + +.. literalinclude:: ../examples/shared_file_system/shares.py + :pyobject: unmanage_share diff --git a/doc/source/user/guides/stats.rst b/doc/source/user/guides/stats.rst new file mode 100644 index 0000000000..19e302f987 --- /dev/null +++ b/doc/source/user/guides/stats.rst @@ -0,0 +1,88 @@ +==================== +Statistics reporting +==================== + +`openstacksdk` can report statistics on individual API +requests/responses in several different formats. + +Note that metrics will be reported only when corresponding client +libraries (`statsd` for 'statsd' reporting, `influxdb` for influxdb, +etc.). If libraries are not available reporting will be silently +ignored. + +statsd +------ + +`statsd` can be configured via configuration entries or environment +variables. + +A global `metrics` entry defines defaults for all clouds. Each cloud +can specify a `metrics` section to override variables; this may be +useful to separate results reported for each cloud. + +.. code-block:: yaml + + metrics: + statsd: + host: __statsd_server_host__ + port: __statsd_server_port__ + prefix: __statsd_prefix__ (default 'openstack.api') + clouds: + a-cloud: + auth: + ... + metrics: + statsd: + prefix: 'openstack.api.a-cloud' + +If the `STATSD_HOST` or `STATSD_PORT` environment variables are set, +they will be taken as the default values (and enable `statsd` +reporting if no other configuration is specified). + +InfluxDB +-------- + +`InfluxDB `__ is supported via +configuration in the `metrics` field. Similar to `statsd`, each cloud +can provide it's own `metrics` section to override any global +defaults. + +.. code-block:: yaml + + metrics: + influxdb: + host: __influxdb_server_host__ + port: __influxdb_server_port__ + use_udp: __True|False__ + username: __influxdb_auth_username__ + password: __influxdb_auth_password__ + database: __influxdb_db_name__ + measurement: __influxdb_measurement_name__ + timeout: __infludb_requests_timeout__ + clouds: + .. + +InfluxDB reporting allows setting additional tags into the metrics based on the +selected cloud. + +.. code-block:: yaml + + clouds: + my_cloud: + profile: some_profile + ... + additional_metric_tags: + environment: production + +prometheus +---------- +.. + NOTE(ianw) 2021-04-19 : examples here would be great; this is just terse + description taken from + https://review.opendev.org/c/openstack/openstacksdk/+/614834 + +The prometheus support does not read from config, and does not run an +http service since OpenstackSDK is a library. It is expected that an +application that uses OpenstackSDK and wants request stats be +collected will pass a `prometheus_client.CollectorRegistry` to +`collector_registry`. diff --git a/doc/source/user/index.rst b/doc/source/user/index.rst new file mode 100644 index 0000000000..7522e71164 --- /dev/null +++ b/doc/source/user/index.rst @@ -0,0 +1,195 @@ +Using the OpenStack SDK +======================= + +This section of documentation pertains to those who wish to use this SDK in +their own application. If you're looking for documentation on how to contribute +to or extend the SDK, refer to the `contributor <../contributor>`_ section. + +For a listing of terms used throughout the SDK, including the names of +projects and services supported by it, see the :doc:`glossary <../glossary>`. + +.. _user_guides: + +User Guides +----------- + +These guides walk you through how to make use of the libraries we provide +to work with each OpenStack service. If you're looking for a cookbook +approach, this is where you'll want to begin. + +.. toctree:: + :maxdepth: 1 + + Introduction + Configuration + Connect to an OpenStack Cloud + Connect to an OpenStack Cloud Using a Config File + Logging + Statistics reporting + Microversions + Baremetal + Block Storage + Clustering + Compute + Database + DNS + Identity + Image + Key Manager + Message + Network + Object Store + Orchestration + Shared File System + +Testing +------- + +The SDK provides a number of utilities to help you test your applications. + +.. toctree:: + :maxdepth: 1 + + testing/index + +API Documentation +----------------- + +Service APIs are exposed through a two-layered approach. The classes +exposed through our *Connection* interface are the place to start if you're +an application developer consuming an OpenStack cloud. The *Resource* +interface is the layer upon which the *Connection* is built, with +*Connection* methods accepting and returning *Resource* objects. + +The Cloud Abstraction layer has a data model. + +.. toctree:: + :maxdepth: 1 + + model + +Connection Interface +~~~~~~~~~~~~~~~~~~~~ + +A *Connection* instance maintains your cloud config, session and authentication +information providing you with a set of higher-level interfaces to work with +OpenStack services. + +.. toctree:: + :maxdepth: 1 + + connection + +Once you have a *Connection* instance, services are accessed through instances +of :class:`~openstack.proxy.Proxy` or subclasses of it that exist as +attributes on the :class:`~openstack.connection.Connection`. + +.. _service-proxies: + +Service Proxies +~~~~~~~~~~~~~~~ + +The following service proxies exist on the +:class:`~openstack.connection.Connection`. The service proxies are all always +present on the :class:`~openstack.connection.Connection` object, but the +combination of your ``CloudRegion`` and the catalog of the cloud in question +control which services can be used. + +.. toctree:: + :maxdepth: 1 + + Accelerator + Baremetal + Baremetal Introspection + Block Storage v2 + Block Storage v3 + Clustering + Compute + Container Infrastructure Management + Database + DNS + Identity v2 + Identity v3 + Image v1 + Image v2 + Key Manager + Load Balancer + Message v2 + Network + Object Store + Orchestration + Placement + Shared File System + Workflow + +Resource Interface +~~~~~~~~~~~~~~~~~~ + +The *Resource* layer is a lower-level interface to communicate with OpenStack +services. While the classes exposed by the *Connection* build a convenience +layer on top of this, *Resources* can be used directly. However, the most +common usage of this layer is in receiving an object from a class in the +*Connection* layer, modifying it, and sending it back into the *Connection* +layer, such as to update a resource on the server. + +The following services have exposed *Resource* classes. + +.. toctree:: + :maxdepth: 1 + + Accelerator + Baremetal + Baremetal Introspection + Block Storage + Clustering + Compute + Container Infrastructure Management + Database + DNS + Identity + Image + Key Management + Load Balancer + Network + Orchestration + Object Store + Placement + Shared File System + Workflow + +Low-Level Classes +~~~~~~~~~~~~~~~~~ + +The following classes are not commonly used by application developers, +but are used to construct applications to talk to OpenStack APIs. Typically +these parts are managed through the `Connection Interface`_, but their use +can be customized. + +.. toctree:: + :maxdepth: 1 + + resource + service_description + utils + +Errors and warnings +~~~~~~~~~~~~~~~~~~~ + +The SDK attempts to provide detailed errors and warnings for things like failed +requests, deprecated APIs, and invalid configurations. Application developers +are responsible for handling these errors and can opt into warnings to ensure +their applications stay up-to-date. + +.. toctree:: + :maxdepth: 1 + + exceptions + warnings + +Presentations +------------- + +.. toctree:: + :maxdepth: 1 + + multi-cloud-demo diff --git a/doc/source/user/microversions.rst b/doc/source/user/microversions.rst new file mode 100644 index 0000000000..fcef096562 --- /dev/null +++ b/doc/source/user/microversions.rst @@ -0,0 +1,99 @@ +Microversions +============= + +As openstacksdk rolls out support for consuming microversions, it will do so +on a call by call basis as needed. Just like with major versions, openstacksdk +should have logic to handle each microversion for a given REST call it makes, +with the following rules in mind: + +* If an activity openstack performs can be done differently or more efficiently + with a new microversion, the support should be added to openstack.cloud and + to the appropriate Proxy class. + +* openstacksdk should always attempt to use the latest microversion it is aware + of for a given call, unless a microversion removes important data. + +* Microversion selection should under no circumstances be exposed to the user + in python API calls in the Resource layer or the openstack.cloud layer. + +* Microversion selection is exposed to the user in the REST layer via the + ``microversion`` argument to each REST call. + +* A user of the REST layer may set the default microversion by setting + ``{service_type}_default_microversion`` in clouds.yaml or + ``OS_{service_type|upper}_DEFAULT_MICROVERSION`` environment variable. + +.. note:: + + Setting the default microversion in any circumstance other than when using + the REST layer is highly discouraged. Both of the higher layers in + openstacksdk provide data normalization as well as logic about which REST + call to make. Setting the default microversion could change the behavior + of the service in question in such a way that openstacksdk does not + understand. If there is a feature of a service that needs a microversion + and it is not already transparently exposed in openstacksdk, please file + a bug. + +* If a feature is only exposed for a given microversion and cannot be simulated + for older clouds without that microversion, it is ok to add it, but + a clear error message should be given to the user that the given feature is + not available on their cloud. (A message such as "This cloud supports + a maximum microversion of XXX for service YYY and this feature only exists + on clouds with microversion ZZZ. Please contact your cloud provider for + information about when this feature might be available") + +* When adding a feature that only exists behind a new microversion, + every effort should be made to figure out how to provide the same + functionality if at all possible, even if doing so is inefficient. If an + inefficient workaround is employed, a warning should be provided to the + user. (the user's workaround to skip the inefficient behavior would be to + stop using that openstacksdk API call) An example of this is the nova + "get me a network" feature. The logic of "get me a network" can be done + client-side, albeit less efficiently. Adding support for the + "get me a network" feature via nova microversion should also add support for + doing the client-side workaround. + +* If openstacksdk is aware of logic for more than one microversion, it should + always attempt to use the latest version available for the service for that + call. + +* Objects returned from openstacksdk should always go through normalization and + thus should always conform to openstacksdk's documented data model. The + objects should never look different to the user regardless of the + microversion used for the REST call. + +* If a microversion adds new fields to an object, those fields should be + added to openstacksdk's data model contract for that object and the data + should either be filled in by performing additional REST calls if the data is + available that way, or the field should have a default value of None which + the user can be expected to test for when attempting to use the new value. + +* If a microversion removes fields from an object that are part of the + existing data model contract, care should be taken to not use the new + microversion for that call unless forced to by lack of availablity of the + old microversion on the cloud in question. In the case where an old + microversion is no longer available, care must be taken to either find the + data from another source and fill it in, or to put a value of None into the + field and document for the user that on some clouds the value may not exist. + +* If a microversion removes a field and the outcome is particularly intractable + and impossible to work around without fundamentally breaking users, + an issue should be raised with the service team in question. Hopefully a + resolution can be found during the period while clouds still have the old + microversion. + +* As new calls or objects are added, it is important to check in with + the service team in question on the expected stability of the object. If + there are known changes expected in the future, even if they may be a few + years off, openstacksdk should take care to not add committments to its data + model for those fields/features. It is ok for openstacksdk to not have + something. + + .. note:: + openstacksdk does not currently have any sort of "experimental" opt-in API + that would allow exposing things to a user that may not be supportable + under the normal compatibility contract. If a conflict arises in the + future where there is a strong desire for a feature but also a lack of + certainty about its stability over time, an experimental API may want to + be explored ... but concrete use cases should arise before such a thing + is started. diff --git a/doc/source/user/model.rst b/doc/source/user/model.rst new file mode 100644 index 0000000000..1c270e364e --- /dev/null +++ b/doc/source/user/model.rst @@ -0,0 +1,66 @@ +Data Model +========== + +*openstacksdk* has a very strict policy on not breaking backwards compatibility +ever. However, with the data structures returned from OpenStack, there are +places where the resource structures from OpenStack are returned to the user +somewhat directly, leaving an openstacksdk user open to changes/differences in +result content. + +To combat that, openstacksdk 'normalizes' the return structure from OpenStack +in many places, and the results of that normalization are listed below. Where +openstacksdk performs normalization, a user can count on any fields declared in +the docs as being completely safe to use - they are as much a part of +openstacksdk's API contract as any other Python method. + +Some OpenStack objects allow for arbitrary attributes at the root of the +object. openstacksdk will pass those through so as not to break anyone who may +be counting on them, but as they are arbitrary openstacksdk can make no +guarantees as to their existence. As part of normalization, openstacksdk will +put any attribute from an OpenStack resource that is not in its data model +contract into an attribute called 'properties'. The contents of properties are +defined to be an arbitrary collection of key value pairs with no promises as to +any particular key ever existing. + +If a user passes ``strict=True`` to the openstacksdk constructor, openstacksdk +will not pass through arbitrary objects to the root of the resource, and will +instead only put them in the properties dict. If a user is worried about +accidentally writing code that depends on an attribute that is not part of the +API contract, this can be a useful tool. Keep in mind all data can still be +accessed via the properties dict, but any code touching anything in the +properties dict should be aware that the keys found there are highly user/cloud +specific. Any key that is transformed as part of the openstacksdk data model +contract will not wind up with an entry in properties - only keys that are +unknown. + +The ``location`` field +---------------------- + +A Location defines where a resource lives. It includes a cloud name and a +region name, an availability zone as well as information about the project +that owns the resource. + +The project information may contain a project ID, or a combination of one or +more of a project name with a domain name or ID. If a project ID is present, +it should be considered correct. + +Some resources do not carry ownership information with them. For those, the +project information will be filled in from the project the user currently +has a token for. + +Some resources do not have information about availability zones, or may exist +region wide. Those resources will have None as their availability zone. + +.. code-block:: python + + Location = dict( + cloud=str(), + region_name=str(), + zone=str() or None, + project=dict( + id=str() or None, + name=str() or None, + domain_id=str() or None, + domain_name=str() or None, + ) + ) diff --git a/doc/source/user/multi-cloud-demo.rst b/doc/source/user/multi-cloud-demo.rst new file mode 100644 index 0000000000..7aace3f3c4 --- /dev/null +++ b/doc/source/user/multi-cloud-demo.rst @@ -0,0 +1,813 @@ +Multi-Cloud Demo +================ + +This document contains a presentation in `presentty`_ format. If you want to +walk through it like a presentation, install `presentty` and run: + +.. code:: bash + + presentty doc/source/user/multi-cloud-demo.rst + +The content is hopefully helpful even if it's not being narrated, so it's being +included in the openstacksdk docs. + +.. _presentty: https://pypi.org/project/presentty + +Who am I? +--------- + +Monty Taylor + +* OpenStack Infra Core +* irc: mordred +* twitter: @e_monty + +What are we going to talk about? +-------------------------------- + +`OpenStackSDK` + +* a task and end-user oriented Python library +* abstracts deployment differences +* designed for multi-cloud +* simple to use +* massive scale + + * optional advanced features to handle 20k servers a day + +* Initial logic/design extracted from nodepool +* Librified to re-use in Ansible + +OpenStackSDK is Free Software +----------------------------- + +* https://opendev.org/openstack/openstacksdk +* openstack-discuss@lists.openstack.org +* #openstack-sdks on oftc + +This talk is Free Software, too +------------------------------- + +* Written for presentty (https://pypi.org/project/presentty) +* doc/source/user/multi-cloud-demo.rst +* examples in examples/cloud +* Paths subject to change - this is the first presentation in tree! + +Complete Example +---------------- + +.. code:: python + + from openstack import cloud as openstack + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + for cloud_name, region_name in [ + ('my-vexxhost', 'ca-ymq-1'), + ('my-citycloud', 'Buf1'), + ('my-internap', 'ams01'), + ]: + # Initialize cloud + cloud = openstack.connect(cloud=cloud_name, region_name=region_name) + + # Upload an image to the cloud + image = cloud.create_image( + 'devuan-jessie', + filename='devuan-jessie.qcow2', + wait=True, + ) + + # Find a flavor with at least 512M of RAM + flavor = cloud.get_flavor_by_ram(512) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + cloud.create_server( + 'my-server', + image=image, + flavor=flavor, + wait=True, + auto_ip=True, + ) + +Let's Take a Few Steps Back +--------------------------- + +Multi-cloud is easy, but you need to know a few things. + +* Terminology +* Config +* OpenStackSDK API + +Cloud Terminology +----------------- + +Let's define a few terms, so that we can use them with ease: + +* `cloud` - logically related collection of services +* `region` - completely independent subset of a given cloud +* `patron` - human who has an account +* `user` - account on a cloud +* `project` - logical collection of cloud resources +* `domain` - collection of users and projects + +Cloud Terminology Relationships +------------------------------- + +* A `cloud` has one or more `regions` +* A `patron` has one or more `users` +* A `patron` has one or more `projects` +* A `cloud` has one or more `domains` +* In a `cloud` with one `domain` it is named "default" +* Each `patron` may have their own `domain` +* Each `user` is in one `domain` +* Each `project` is in one `domain` +* A `user` has one or more `roles` on one or more `projects` + +HTTP Sessions +------------- + +* HTTP interactions are authenticated via keystone +* Authenticating returns a `token` +* An authenticated HTTP Session is shared across a `region` + +Cloud Regions +------------- + +A `cloud region` is the basic unit of REST interaction. + +* A `cloud` has a `service catalog` +* The `service catalog` is returned in the `token` +* The `service catalog` lists `endpoint` for each `service` in each `region` +* A `region` is completely autonomous + +Users, Projects and Domains +--------------------------- + +In clouds with multiple domains, project and user names are +only unique within a region. + +* Names require `domain` information for uniqueness. IDs do not. +* Providing `domain` information when not needed is fine. +* `project_name` requires `project_domain_name` or `project_domain_id` +* `project_id` does not +* `username` requires `user_domain_name` or `user_domain_id` +* `user_id` does not + +Confused Yet? +------------- + +Don't worry - you don't have to deal with most of that. + +Auth per cloud, select per region +--------------------------------- + +In general, the thing you need to know is: + +* Configure authentication per `cloud` +* Select config to use by `cloud` and `region` + +clouds.yaml +----------- + +Information about the clouds you want to connect to is stored in a file +called `clouds.yaml`. + +`clouds.yaml` can be in your homedir: `~/.config/openstack/clouds.yaml` +or system-wide: `/etc/openstack/clouds.yaml`. + +Information in your homedir, if it exists, takes precedence. + +Full docs on `clouds.yaml` are at +https://docs.openstack.org/os-client-config/latest/ + +What about Mac and Windows? +--------------------------- + +`USER_CONFIG_DIR` is different on Linux, OSX and Windows. + +* Linux: `~/.config/openstack` +* OSX: `~/Library/Application Support/openstack` +* Windows: `C:\\Users\\USERNAME\\AppData\\Local\\OpenStack\\openstack` + +`SITE_CONFIG_DIR` is different on Linux, OSX and Windows. + +* Linux: `/etc/openstack` +* OSX: `/Library/Application Support/openstack` +* Windows: `C:\\ProgramData\\OpenStack\\openstack` + +Config Terminology +------------------ + +For multi-cloud, think of two types: + +* `profile` - Facts about the `cloud` that are true for everyone +* `cloud` - Information specific to a given `user` + +Apologies for the use of `cloud` twice. + +Environment Variables and Simple Usage +-------------------------------------- + +* Environment variables starting with `OS_` go into a cloud called `envvars` +* If you only have one cloud, you don't have to specify it +* `OS_CLOUD` and `OS_REGION_NAME` are default values for + `cloud` and `region_name` + +TOO MUCH TALKING - NOT ENOUGH CODE +---------------------------------- + +basic clouds.yaml for the example code +-------------------------------------- + +Simple example of a clouds.yaml + +* Config for a named `cloud` "my-citycloud" +* Reference a well-known "named" profile: `citycloud` +* `os-client-config` has a built-in list of profiles at + https://docs.openstack.org/openstacksdk/latest/user/config/vendor-support.html +* Vendor profiles contain various advanced config +* `cloud` name can match `profile` name (using different names for clarity) + +.. code:: yaml + + clouds: + my-citycloud: + profile: citycloud + auth: + username: mordred + project_id: 65222a4d09ea4c68934fa1028c77f394 + user_domain_id: d0919bd5e8d74e49adf0e145807ffc38 + project_domain_id: d0919bd5e8d74e49adf0e145807ffc38 + +Where's the password? + +secure.yaml +----------- + +* Optional additional file just like `clouds.yaml` +* Values overlaid on `clouds.yaml` +* Useful if you want to protect secrets more stringently + +Example secure.yaml +------------------- + +* No, my password isn't XXXXXXXX +* `cloud` name should match `clouds.yaml` +* Optional - I actually keep mine in my `clouds.yaml` + +.. code:: yaml + + clouds: + my-citycloud: + auth: + password: XXXXXXXX + +more clouds.yaml +---------------- + +More information can be provided. + +* Use v3 of the `identity` API - even if others are present +* Use `https://image-ca-ymq-1.vexxhost.net/v2` for `image` API + instead of what's in the catalog + +.. code:: yaml + + my-vexxhost: + identity_api_version: 3 + image_endpoint_override: https://image-ca-ymq-1.vexxhost.net/v2 + profile: vexxhost + auth: + user_domain_id: default + project_domain_id: default + project_name: d8af8a8f-a573-48e6-898a-af333b970a2d + username: 0b8c435b-cc4d-4e05-8a47-a2ada0539af1 + +Much more complex clouds.yaml example +------------------------------------- + +* Not using a profile - all settings included +* In the `ams01` `region` there are two networks with undiscoverable qualities +* Each one are labeled here so choices can be made +* Any of the settings can be specific to a `region` if needed +* `region` settings override `cloud` settings +* `cloud` does not support `floating-ips` + +.. code:: yaml + + my-internap: + auth: + auth_url: https://identity.api.cloud.inap.com + username: api-55f9a00fb2619 + project_name: inap-17037 + identity_api_version: 3 + floating_ip_source: None + regions: + - name: ams01 + values: + networks: + - name: inap-17037-WAN1654 + routes_externally: true + default_interface: true + - name: inap-17037-LAN3631 + routes_externally: false + +Complete Example Again +---------------------- + +.. code:: python + + from openstack import cloud as openstack + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + for cloud_name, region_name in [ + ('my-vexxhost', 'ca-ymq-1'), + ('my-citycloud', 'Buf1'), + ('my-internap', 'ams01')]: + # Initialize cloud + cloud = openstack.connect(cloud=cloud_name, region_name=region_name) + + # Upload an image to the cloud + image = cloud.create_image( + 'devuan-jessie', filename='devuan-jessie.qcow2', wait=True) + + # Find a flavor with at least 512M of RAM + flavor = cloud.get_flavor_by_ram(512) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + cloud.create_server( + 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) + +Step By Step +------------ + +Import the library +------------------ + +.. code:: python + + from openstack import cloud as openstack + +Logging +------- + +* `openstacksdk` uses standard python logging +* ``openstack.enable_logging`` does easy defaults +* Squelches some meaningless warnings + + * `debug` + + * Logs openstacksdk loggers at debug level + + * `http_debug` Implies `debug`, turns on HTTP tracing + +.. code:: python + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + +Example with Debug Logging +-------------------------- + +* examples/cloud/debug-logging.py + +.. code:: python + + from openstack import cloud as openstack + openstack.enable_logging(debug=True) + + cloud = openstack.connect(cloud='my-vexxhost', region_name='ca-ymq-1') + cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]') + +Example with HTTP Debug Logging +------------------------------- + +* examples/cloud/http-debug-logging.py + +.. code:: python + + from openstack import cloud as openstack + openstack.enable_logging(http_debug=True) + + cloud = openstack.connect( + cloud='my-vexxhost', region_name='ca-ymq-1') + cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]') + +Cloud Regions +------------- + +* `cloud` constructor needs `cloud` and `region_name` +* `openstack.connect` is a helper factory function + +.. code:: python + + for cloud_name, region_name in [ + ('my-vexxhost', 'ca-ymq-1'), + ('my-citycloud', 'Buf1'), + ('my-internap', 'ams01') + ]: + # Initialize cloud + cloud = openstack.connect(cloud=cloud_name, region_name=region_name) + +Upload an Image +--------------- + +* Picks the correct upload mechanism +* **SUGGESTION** Always upload your own base images + +.. code:: python + + # Upload an image to the cloud + image = cloud.create_image( + 'devuan-jessie', + filename='devuan-jessie.qcow2', + wait=True, + ) + +Always Upload an Image +---------------------- + +Ok. You don't have to. But, for multi-cloud... + +* Images with same content are named different on different clouds +* Images with same name on different clouds can have different content +* Upload your own to all clouds, both problems go away +* Download from OS vendor or build with `diskimage-builder` + +Find a flavor +------------- + +* Flavors are all named differently on clouds +* Flavors can be found via RAM +* `get_flavor_by_ram` finds the smallest matching flavor + +.. code:: python + + # Find a flavor with at least 512M of RAM + flavor = cloud.get_flavor_by_ram(512) + +Create a server +--------------- + +* my-vexxhost + + * Boot server + * Wait for `status==ACTIVE` + +* my-internap + + * Boot server on network `inap-17037-WAN1654` + * Wait for `status==ACTIVE` + +* my-citycloud + + * Boot server + * Wait for `status==ACTIVE` + * Find the `port` for the `fixed_ip` for `server` + * Create `floating-ip` on that `port` + * Wait for `floating-ip` to attach + +.. code:: python + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + cloud.create_server( + 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) + +Wow. We didn't even deploy Wordpress! +------------------------------------- + +Image and Flavor by Name or ID +------------------------------ + +* Pass string to image/flavor +* Image/Flavor will be found by name or ID +* Common pattern +* examples/cloud/create-server-name-or-id.py + +.. code:: python + + from openstack import cloud as openstack + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + for cloud_name, region_name, image, flavor in [ + ('my-vexxhost', 'ca-ymq-1', + 'Ubuntu 16.04.1 LTS [2017-03-03]', 'v1-standard-4'), + ('my-citycloud', 'Buf1', + 'Ubuntu 16.04 Xenial Xerus', '4C-4GB-100GB'), + ('my-internap', 'ams01', + 'Ubuntu 16.04 LTS (Xenial Xerus)', 'A1.4')]: + # Initialize cloud + cloud = openstack.connect(cloud=cloud_name, region_name=region_name) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + server = cloud.create_server( + 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True) + print(server.name) + print(server['name']) + cloud.pprint(server) + # Delete it - this is a demo + cloud.delete_server(server, wait=True, delete_ips=True) + +Delete Servers +-------------- + +* `delete_ips` Delete any `floating_ips` the server may have + +.. code:: python + + cloud.delete_server('my-server', wait=True, delete_ips=True) + +Image and Flavor by Dict +------------------------ + +* Pass dict to image/flavor +* If you know if the value is Name or ID +* Common pattern +* examples/cloud/create-server-dict.py + +.. code:: python + + from openstack import cloud as openstack + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + for cloud_name, region_name, image, flavor_id in [ + ('my-vexxhost', 'ca-ymq-1', 'Ubuntu 16.04.1 LTS [2017-03-03]', + '5cf64088-893b-46b5-9bb1-ee020277635d'), + ('my-citycloud', 'Buf1', 'Ubuntu 16.04 Xenial Xerus', + '0dab10b5-42a2-438e-be7b-505741a7ffcc'), + ('my-internap', 'ams01', 'Ubuntu 16.04 LTS (Xenial Xerus)', + 'A1.4')]: + # Initialize cloud + cloud = openstack.connect(cloud=cloud_name, region_name=region_name) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + server = cloud.create_server( + 'my-server', image=image, flavor=dict(id=flavor_id), + wait=True, auto_ip=True) + # Delete it - this is a demo + cloud.delete_server(server, wait=True, delete_ips=True) + +Munch Objects +------------- + +* Behave like a dict and an object +* examples/cloud/munch-dict-object.py + +.. code:: python + + from openstack import cloud as openstack + openstack.enable_logging(debug=True) + + cloud = openstack.connect(cloud='zetta', region_name='no-osl1') + image = cloud.get_image('Ubuntu 14.04 (AMD64) [Local Storage]') + print(image.name) + print(image['name']) + +API Organized by Logical Resource +--------------------------------- + +* list_servers +* search_servers +* get_server +* create_server +* delete_server +* update_server + +For other things, it's still {verb}_{noun} + +* attach_volume +* wait_for_server +* add_auto_ip + +Cleanup Script +-------------- + +* Sometimes my examples had bugs +* examples/cloud/cleanup-servers.py + +.. code:: python + + from openstack import cloud as openstack + + # Initialize and turn on debug logging + openstack.enable_logging(debug=True) + + for cloud_name, region_name in [ + ('my-vexxhost', 'ca-ymq-1'), + ('my-citycloud', 'Buf1'), + ('my-internap', 'ams01')]: + # Initialize cloud + cloud = openstack.connect(cloud=cloud_name, region_name=region_name) + for server in cloud.search_servers('my-server'): + cloud.delete_server(server, wait=True, delete_ips=True) + +Normalization +------------- + +* examples/cloud/normalization.py + +.. code:: python + + from openstack import cloud as openstack + openstack.enable_logging() + + cloud = openstack.connect(cloud='fuga', region_name='cystack') + image = cloud.get_image( + 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image') + cloud.pprint(image) + +Strict Normalized Results +------------------------- + +* Return only the declared model +* examples/cloud/strict-mode.py + +.. code:: python + + from openstack import cloud as openstack + openstack.enable_logging() + + cloud = openstack.connect( + cloud='fuga', region_name='cystack', strict=True) + image = cloud.get_image( + 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image') + cloud.pprint(image) + +How Did I Find the Image Name for the Last Example? +--------------------------------------------------- + +* I often make stupid little utility scripts +* examples/cloud/find-an-image.py + +.. code:: python + + from openstack import cloud as openstack + openstack.enable_logging() + + cloud = openstack.connect(cloud='fuga', region_name='cystack') + cloud.pprint([ + image for image in cloud.list_images() + if 'ubuntu' in image.name.lower()]) + +Added / Modified Information +---------------------------- + +* Servers need more extra help +* Fetch addresses dict from neutron +* Figure out which IPs are good +* `detailed` - defaults to True, add everything +* `bare` - no extra calls - don't even fix broken things +* `bare` is still normalized +* examples/cloud/server-information.py + +.. code:: python + + from openstack import cloud as openstack + openstack.enable_logging(debug=True) + + cloud = openstack.connect(cloud='my-citycloud', region_name='Buf1') + try: + server = cloud.create_server( + 'my-server', image='Ubuntu 16.04 Xenial Xerus', + flavor=dict(id='0dab10b5-42a2-438e-be7b-505741a7ffcc'), + wait=True, auto_ip=True) + + print("\n\nFull Server\n\n") + cloud.pprint(server) + + print("\n\nTurn Detailed Off\n\n") + cloud.pprint(cloud.get_server('my-server', detailed=False)) + + print("\n\nBare Server\n\n") + cloud.pprint(cloud.get_server('my-server', bare=True)) + + finally: + # Delete it - this is a demo + cloud.delete_server(server, wait=True, delete_ips=True) + +Exceptions +---------- + +* All openstacksdk exceptions are subclasses of `OpenStackCloudException` +* Direct REST calls throw `OpenStackCloudHTTPError` +* `OpenStackCloudHTTPError` subclasses `OpenStackCloudException` + and `requests.exceptions.HTTPError` +* `OpenStackCloudURINotFound` for 404 +* `OpenStackCloudBadRequest` for 400 + +User Agent Info +--------------- + +* Set `app_name` and `app_version` for User Agents +* (ssh ... `region_name` is optional if the cloud has one region) +* examples/cloud/user-agent.py + +.. code:: python + + from openstack import cloud as openstack + openstack.enable_logging(http_debug=True) + + cloud = openstack.connect( + cloud='datacentred', + app_name='AmazingApp', + app_version='1.0', + ) + cloud.list_networks() + +Uploading Large Objects +----------------------- + +* swift has a maximum object size +* Large Objects are uploaded specially +* openstacksdk figures this out and does it +* multi-threaded +* examples/cloud/upload-object.py + +.. code:: python + + from openstack import cloud as openstack + openstack.enable_logging(debug=True) + + cloud = openstack.connect(cloud='ovh', region_name='SBG1') + cloud.create_object( + container='my-container', + name='my-object', + filename='/home/mordred/briarcliff.sh3d', + ) + cloud.delete_object('my-container', 'my-object') + cloud.delete_container('my-container') + +Uploading Large Objects +----------------------- + +* Default max_file_size is 5G +* This is a conference demo +* Let's force a segment_size +* One MILLION bytes +* examples/cloud/upload-object.py + +.. code:: python + + from openstack import cloud as openstack + openstack.enable_logging(debug=True) + + cloud = openstack.connect(cloud='ovh', region_name='SBG1') + cloud.create_object( + container='my-container', + name='my-object', + filename='/home/mordred/briarcliff.sh3d', + segment_size=1000000, + ) + cloud.delete_object('my-container', 'my-object') + cloud.delete_container('my-container') + +Service Conditionals +-------------------- + +.. code:: python + + from openstack import cloud as openstack + openstack.enable_logging(debug=True) + + cloud = openstack.connect(cloud='kiss', region_name='region1') + print(cloud.has_service('network')) + print(cloud.has_service('container-orchestration')) + +Service Conditional Overrides +----------------------------- + +* Sometimes clouds are weird and figuring that out won't work + +.. code:: python + + from openstack import cloud as openstack + openstack.enable_logging(debug=True) + + cloud = openstack.connect(cloud='rax', region_name='DFW') + print(cloud.has_service('network')) + +.. code:: yaml + + clouds: + rax: + profile: rackspace + auth: + username: mordred + project_id: 245018 + # This is already in profile: rackspace + has_network: false + +FIN +--- diff --git a/doc/source/user/proxies/accelerator.rst b/doc/source/user/proxies/accelerator.rst new file mode 100644 index 0000000000..d47933a4c5 --- /dev/null +++ b/doc/source/user/proxies/accelerator.rst @@ -0,0 +1,56 @@ +Accelerator API +=============== + +.. automodule:: openstack.accelerator.v2._proxy + +The Accelerator Class +--------------------- + +The accelerator high-level interface is available through the ``accelerator`` +member of a :class:`~openstack.connection.Connection` object. +The ``accelerator`` member will only be added if the service is detected. + +Deployable Operations +^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.accelerator.v2._proxy.Proxy + :noindex: + :members: deployables, get_deployable, update_deployable + +Device Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.accelerator.v2._proxy.Proxy + :noindex: + :members: devices, get_device + +Device Profile Operations +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.accelerator.v2._proxy.Proxy + :noindex: + :members: device_profiles, get_device_profile, + create_device_profile, delete_device_profile + +Accelerator Request Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.accelerator.v2._proxy.Proxy + :noindex: + :members: accelerator_requests, get_accelerator_request, + create_accelerator_request, delete_accelerator_request, + update_accelerator_request + +Attribute Operations +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.accelerator.v2._proxy.Proxy + :noindex: + :members: attributes, create_attribute, delete_attribute, get_attribute + +Helpers +^^^^^^^ + +.. autoclass:: openstack.accelerator.v2._proxy.Proxy + :noindex: + :members: wait_for_status, wait_for_delete diff --git a/doc/source/user/proxies/baremetal.rst b/doc/source/user/proxies/baremetal.rst new file mode 100644 index 0000000000..1fc657ad75 --- /dev/null +++ b/doc/source/user/proxies/baremetal.rst @@ -0,0 +1,129 @@ +Baremetal API +============= + +For details on how to use baremetal, see :doc:`/user/guides/baremetal` + +.. automodule:: openstack.baremetal.v1._proxy + +The Baremetal Class +------------------- + +The baremetal high-level interface is available through the ``baremetal`` +member of a :class:`~openstack.connection.Connection` object. +The ``baremetal`` member will only be added if the service is detected. + +Node Operations +^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: nodes, create_node, find_node, get_node, update_node, patch_node, delete_node, + set_node_provision_state, get_node_boot_device, + set_node_boot_device, get_node_supported_boot_devices, + set_node_boot_mode, + set_node_secure_boot, inject_nmi_to_node, wait_for_nodes_provision_state, + set_node_power_state, wait_for_node_power_state, + wait_for_node_reservation, validate_node, set_node_maintenance, + unset_node_maintenance, delete_node, list_node_vendor_passthru, + get_node_console, enable_node_console, disable_node_console + +Node Trait Operations +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: add_node_trait, remove_node_trait, set_node_traits + +Port Operations +^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: ports, find_port, get_port, create_port, update_port, delete_port, patch_port + +Port Group Operations +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: port_groups, find_port_group, get_port_group, + create_port_group, update_port_group, delete_port_group, patch_port_group + +Driver Operations +^^^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: drivers, get_driver + +Chassis Operations +^^^^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: chassis, find_chassis, get_chassis, + create_chassis, update_chassis, patch_chassis, delete_chassis + +Virtual Media Operations +^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: attach_vmedia_to_node, detach_vmedia_from_node + +VIF Operations +^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: list_node_vifs, attach_vif_to_node, detach_vif_from_node + +Allocation Operations +^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: allocations, get_allocation, create_allocation, + update_allocation, patch_allocation, delete_allocation, + wait_for_allocation + +Volume Connector Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: volume_connectors, find_volume_connector, get_volume_connector, + create_volume_connector, update_volume_connector, + patch_volume_connector, delete_volume_connector + +Volume Target Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: volume_targets, find_volume_target, get_volume_target, + create_volume_target, update_volume_target, + patch_volume_target, delete_volume_target + +Deploy Template Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: deploy_templates, get_deploy_template, + create_deploy_template, update_deploy_template, + patch_deploy_template, delete_deploy_template + +Runbook Operations +^^^^^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: runbooks, get_runbook, + create_runbook, update_runbook, + patch_runbook, delete_runbook + +Inspection Rule Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^ +.. autoclass:: openstack.baremetal.v1._proxy.Proxy + :noindex: + :members: inspection_rules, get_inspection_rule, + create_inspection_rule, update_inspection_rule, + patch_inspection_rule, delete_inspection_rule + +Utilities +--------- + +Building config drives +^^^^^^^^^^^^^^^^^^^^^^ + +.. automodule:: openstack.baremetal.configdrive + :noindex: + :members: diff --git a/doc/source/user/proxies/baremetal_introspection.rst b/doc/source/user/proxies/baremetal_introspection.rst new file mode 100644 index 0000000000..b21aba0c49 --- /dev/null +++ b/doc/source/user/proxies/baremetal_introspection.rst @@ -0,0 +1,21 @@ +Baremetal Introspection API +=========================== + +.. automodule:: openstack.baremetal_introspection.v1._proxy + +The Baremetal Introspection Proxy +--------------------------------- + +The baremetal introspection high-level interface is available through +the ``baremetal_introspection`` member of a +:class:`~openstack.connection.Connection` object. +The ``baremetal_introspection`` member will only be added if the service is +detected. + +Introspection Process Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.baremetal_introspection.v1._proxy.Proxy + :noindex: + :members: introspections, get_introspection, get_introspection_data, + start_introspection, wait_for_introspection, abort_introspection diff --git a/doc/source/user/proxies/block_storage_v2.rst b/doc/source/user/proxies/block_storage_v2.rst new file mode 100644 index 0000000000..0ef865c28e --- /dev/null +++ b/doc/source/user/proxies/block_storage_v2.rst @@ -0,0 +1,108 @@ +Block Storage API +================= + +For details on how to use block_storage, see :doc:`/user/guides/block_storage` + +.. automodule:: openstack.block_storage.v2._proxy + +The BlockStorage Class +---------------------- + +The block_storage high-level interface is available through the +``block_storage`` member of a :class:`~openstack.connection.Connection` object. +The ``block_storage`` member will only be added if the service is detected. + +Backup Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v2._proxy.Proxy + :noindex: + :members: create_backup, delete_backup, get_backup, backups, restore_backup, + reset_backup_status + +Capabilities Operations +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v2._proxy.Proxy + :noindex: + :members: get_capabilities + +Limits Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v2._proxy.Proxy + :noindex: + :members: get_limits + +QuotaClassSet Operations +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v2._proxy.Proxy + :noindex: + :members: get_quota_class_set, update_quota_class_set + +QuotaSet Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v2._proxy.Proxy + :noindex: + :members: get_quota_set, get_quota_set_defaults, + revert_quota_set, update_quota_set + +Service Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v2._proxy.Proxy + :noindex: + :members: find_service, services, enable_service, disable_service, + thaw_service, freeze_service, failover_service + +Snapshot Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v2._proxy.Proxy + :noindex: + :members: create_snapshot, delete_snapshot, get_snapshot, snapshots, + reset_snapshot_status + +Stats Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v2._proxy.Proxy + :noindex: + :members: backend_pools + +Transfer Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v2._proxy.Proxy + :noindex: + :members: create_transfer, delete_transfer, find_transfer, + get_transfer, transfers, accept_transfer + +Type Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v2._proxy.Proxy + :noindex: + :members: create_type, delete_type, get_type, types + +Volume Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v2._proxy.Proxy + :noindex: + :members: create_volume, delete_volume, get_volume, + find_volume, volumes, get_volume_metadata, set_volume_metadata, + delete_volume_metadata, extend_volume, + retype_volume, set_volume_bootable_status, reset_volume_status, + set_volume_image_metadata, delete_volume_image_metadata, + attach_volume, detach_volume, + unmanage_volume, migrate_volume, complete_volume_migration + +Helpers +^^^^^^^ + +.. autoclass:: openstack.block_storage.v2._proxy.Proxy + :noindex: + :members: wait_for_status, wait_for_delete diff --git a/doc/source/user/proxies/block_storage_v3.rst b/doc/source/user/proxies/block_storage_v3.rst new file mode 100644 index 0000000000..16bd1aa02c --- /dev/null +++ b/doc/source/user/proxies/block_storage_v3.rst @@ -0,0 +1,185 @@ +Block Storage API +================= + +For details on how to use block_storage, see :doc:`/user/guides/block_storage` + +.. automodule:: openstack.block_storage.v3._proxy + +The BlockStorage Class +---------------------- + +The block_storage high-level interface is available through the +``block_storage`` member of a :class:`~openstack.connection.Connection` object. +The ``block_storage`` member will only be added if the service is detected. + +Attachments +^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: create_attachment, get_attachment, attachments, + delete_attachment, update_attachment, complete_attachment + +Availability Zone Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: availability_zones + +Backend Pools Operations +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: backend_pools + +Backup Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: create_backup, delete_backup, get_backup, find_backup, backups, + restore_backup, reset_backup_status + +BlockStorageSummary Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: summary + +Capabilities Operations +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: get_capabilities + +Default Volume Types +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: default_types, show_default_type, set_default_type, + unset_default_type + +Limits Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: get_limits + +Group Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: create_group, create_group_from_source, delete_group, update_group, + get_group, find_group, groups, reset_group_status + +Group Snapshot Operations +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: create_group_snapshot, delete_group_snapshot, get_group_snapshot, + find_group_snapshot, group_snapshots, reset_group_snapshot_status + +Group Type Operations +^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: create_group_type, delete_group_type, update_group_type, + get_group_type, find_group_type, group_types, + fetch_group_type_group_specs, create_group_type_group_specs, + get_group_type_group_specs_property, + update_group_type_group_specs_property, + delete_group_type_group_specs_property + +QuotaClassSet Operations +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: get_quota_class_set, update_quota_class_set + +QuotaSet Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: get_quota_set, get_quota_set_defaults, + revert_quota_set, update_quota_set + +Service Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: find_service, services, enable_service, disable_service, + thaw_service, freeze_service, failover_service, + get_service_log_levels, set_service_log_levels + +Snapshot Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: create_snapshot, delete_snapshot, update_snapshot, get_snapshot, + find_snapshot, snapshots, get_snapshot_metadata, + set_snapshot_metadata, delete_snapshot_metadata, + reset_snapshot_status, set_snapshot_status, manage_snapshot, + unmanage_snapshot + +Stats Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: backend_pools + +Transfer Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: create_transfer, delete_transfer, find_transfer, + get_transfer, transfers, accept_transfer + +Type Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: create_type, delete_type, update_type, get_type, find_type, types, + update_type_extra_specs, delete_type_extra_specs, get_type_access, + add_type_access, remove_type_access, get_type_encryption, + create_type_encryption, delete_type_encryption, + update_type_encryption + +Volume Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: create_volume, delete_volume, update_volume, get_volume, + find_volume, volumes, get_volume_metadata, set_volume_metadata, + delete_volume_metadata, extend_volume, set_volume_readonly, + retype_volume, set_volume_bootable_status, reset_volume_status, + set_volume_image_metadata, delete_volume_image_metadata, + revert_volume_to_snapshot, attach_volume, detach_volume, + unmanage_volume, migrate_volume, complete_volume_migration, + upload_volume_to_image, reserve_volume, unreserve_volume, + begin_volume_detaching, abort_volume_detaching, + init_volume_attachment, terminate_volume_attachment, + manage_volume, + +Helpers +^^^^^^^ + +.. autoclass:: openstack.block_storage.v3._proxy.Proxy + :noindex: + :members: wait_for_status, wait_for_delete diff --git a/doc/source/user/proxies/clustering.rst b/doc/source/user/proxies/clustering.rst new file mode 100644 index 0000000000..46bf497532 --- /dev/null +++ b/doc/source/user/proxies/clustering.rst @@ -0,0 +1,119 @@ +Cluster API +=========== + +.. automodule:: openstack.clustering.v1._proxy + +The Cluster Class +----------------- + +The cluster high-level interface is available through the ``cluster`` +member of a :class:`~openstack.connection.Connection` object. The +``cluster`` member will only be added if the service is detected. + + +Build Info Operations +^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.clustering.v1._proxy.Proxy + :noindex: + :members: get_build_info + + +Profile Type Operations +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.clustering.v1._proxy.Proxy + :noindex: + :members: profile_types, get_profile_type + + +Profile Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.clustering.v1._proxy.Proxy + :noindex: + :members: create_profile, update_profile, delete_profile, get_profile, + find_profile, profiles, validate_profile + + +Policy Type Operations +^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.clustering.v1._proxy.Proxy + :noindex: + :members: policy_types, get_policy_type + + +Policy Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.clustering.v1._proxy.Proxy + :noindex: + :members: create_policy, update_policy, delete_policy, get_policy, + find_policy, policies + +validate_policy + + +Cluster Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.clustering.v1._proxy.Proxy + :noindex: + :members: create_cluster, update_cluster, delete_cluster, get_cluster, + find_cluster, clusters, check_cluster, recover_cluster, + resize_cluster, scale_in_cluster, scale_out_cluster, + collect_cluster_attrs, perform_operation_on_cluster, + add_nodes_to_cluster, remove_nodes_from_cluster, + replace_nodes_in_cluster, attach_policy_to_cluster, + update_cluster_policy, detach_policy_from_cluster, + get_cluster_policy, cluster_policies + +Node Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.clustering.v1._proxy.Proxy + :noindex: + :members: create_node, update_node, delete_node, get_node, find_node, nodes, + check_node, recover_node, perform_operation_on_node, adopt_node + + +Receiver Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.clustering.v1._proxy.Proxy + :noindex: + :members: create_receiver, update_receiver, delete_receiver, + get_receiver, find_receiver, receivers + + +Action Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.clustering.v1._proxy.Proxy + :noindex: + :members: get_action, actions + + +Event Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.clustering.v1._proxy.Proxy + :noindex: + :members: get_event, events + + +Helper Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.clustering.v1._proxy.Proxy + :noindex: + :members: wait_for_delete, wait_for_status + + +Service Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.clustering.v1._proxy.Proxy + :noindex: + :members: services diff --git a/doc/source/user/proxies/compute.rst b/doc/source/user/proxies/compute.rst new file mode 100644 index 0000000000..c82a990e2a --- /dev/null +++ b/doc/source/user/proxies/compute.rst @@ -0,0 +1,204 @@ +Compute API +=========== + +For details on how to use compute, see :doc:`/user/guides/compute` + +.. automodule:: openstack.compute.v2._proxy + +The Compute Class +----------------- + +The compute high-level interface is available through the ``compute`` +member of a :class:`~openstack.connection.Connection` object. The +``compute`` member will only be added if the service is detected. + + +Server Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: create_server, update_server, delete_server, get_server, + find_server, servers, get_server_metadata, set_server_metadata, + delete_server_metadata, wait_for_server, create_server_image, + backup_server + +Network Actions +*************** + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: add_fixed_ip_to_server, remove_fixed_ip_from_server, + add_floating_ip_to_server, remove_floating_ip_from_server, + fetch_server_security_groups, add_security_group_to_server, + remove_security_group_from_server + +Starting, Stopping, etc. +************************ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: start_server, stop_server, suspend_server, resume_server, + reboot_server, restore_server, shelve_server, unshelve_server, + lock_server, unlock_server, pause_server, unpause_server, + rescue_server, unrescue_server, evacuate_server, migrate_server, + live_migrate_server + +Modifying a Server +****************** + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: resize_server, confirm_server_resize, revert_server_resize, + rebuild_server, reset_server_state, change_server_password, + get_server_password + +Image Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: images, get_image, find_image, delete_image, get_image_metadata, + set_image_metadata, delete_image_metadata + +Flavor Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: create_flavor, delete_flavor, get_flavor, find_flavor, flavors, + flavor_add_tenant_access, flavor_remove_tenant_access, + get_flavor_access, fetch_flavor_extra_specs, + create_flavor_extra_specs, get_flavor_extra_specs_property, + update_flavor_extra_specs_property, + delete_flavor_extra_specs_property + +Service Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: services, enable_service, disable_service, update_service_forced_down, + delete_service, update_service, find_service + +Volume Attachment Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: create_volume_attachment, update_volume_attachment, + delete_volume_attachment, get_volume_attachment, + volume_attachments + +Keypair Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: create_keypair, delete_keypair, get_keypair, find_keypair, + keypairs + +Server IPs +^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: server_ips + +Server Group Operations +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: create_server_group, delete_server_group, get_server_group, + find_server_group, server_groups + +Server Interface Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: create_server_interface, delete_server_interface, + get_server_interface, server_interfaces, + +Server Tag Operations +^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: add_tag_to_server, remove_tag_from_server, remove_tags_from_server + +Availability Zone Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: availability_zones + +Limits Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: get_limits + +Hypervisor Operations +^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: get_hypervisor, find_hypervisor, hypervisors, + get_hypervisor_uptime + +Extension Operations +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: find_extension, extensions + +QuotaClassSet Operations +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: get_quota_class_set, update_quota_class_set + +QuotaSet Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: get_quota_set, get_quota_set_defaults, + revert_quota_set, update_quota_set + +Server Migration Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: abort_server_migration, force_complete_server_migration, + get_server_migration, server_migrations + +Migration Operations +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: migrations + +Interactive Consoles +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: create_server_remote_console, get_server_console_url, + validate_console_auth_token, get_server_console_output, + create_console + +Helpers +^^^^^^^ + +.. autoclass:: openstack.compute.v2._proxy.Proxy + :noindex: + :members: wait_for_delete diff --git a/doc/source/user/proxies/container_infrastructure_management.rst b/doc/source/user/proxies/container_infrastructure_management.rst new file mode 100644 index 0000000000..99e33063a0 --- /dev/null +++ b/doc/source/user/proxies/container_infrastructure_management.rst @@ -0,0 +1,35 @@ +Container Infrastructure Management +=================================== + +.. automodule:: openstack.container_infrastructure_management.v1._proxy + +Cluster Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.container_infrastructure_management.v1._proxy.Proxy + :noindex: + :members: create_cluster, delete_cluster, update_cluster, get_cluster, + find_cluster, clusters + +Cluster Certificates Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.container_infrastructure_management.v1._proxy.Proxy + :noindex: + :members: create_cluster_certificate, get_cluster_certificate + +Cluster Templates Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.container_infrastructure_management.v1._proxy.Proxy + :noindex: + :members: create_cluster_template, delete_cluster_template, + find_cluster_template, + get_cluster_template, cluster_templates, update_cluster_template + +Service Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.container_infrastructure_management.v1._proxy.Proxy + :noindex: + :members: services diff --git a/doc/source/user/proxies/database.rst b/doc/source/user/proxies/database.rst new file mode 100644 index 0000000000..3752b88fb9 --- /dev/null +++ b/doc/source/user/proxies/database.rst @@ -0,0 +1,43 @@ +Database API +============ + +For details on how to use database, see :doc:`/user/guides/database` + +.. automodule:: openstack.database.v1._proxy + +The Database Class +------------------ + +The database high-level interface is available through the ``database`` +member of a :class:`~openstack.connection.Connection` object. The +``database`` member will only be added if the service is detected. + +Database Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.database.v1._proxy.Proxy + :noindex: + :members: create_database, delete_database, get_database, find_database, + databases + +Flavor Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.database.v1._proxy.Proxy + :noindex: + :members: get_flavor, find_flavor, flavors + +Instance Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.database.v1._proxy.Proxy + :noindex: + :members: create_instance, update_instance, delete_instance, get_instance, + find_instance, instances + +User Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.database.v1._proxy.Proxy + :noindex: + :members: create_user, delete_user, get_user, find_user, users diff --git a/doc/source/user/proxies/dns.rst b/doc/source/user/proxies/dns.rst new file mode 100644 index 0000000000..0229eb4ef9 --- /dev/null +++ b/doc/source/user/proxies/dns.rst @@ -0,0 +1,107 @@ +DNS API +======= + +For details on how to use dns, see :doc:`/user/guides/dns` + +.. automodule:: openstack.dns.v2._proxy + +The DNS Class +------------- + +The dns high-level interface is available through the ``dns`` +member of a :class:`~openstack.connection.Connection` object. The +``dns`` member will only be added if the service is detected. + +DNS Zone Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.dns.v2._proxy.Proxy + :noindex: + :members: create_zone, delete_zone, get_zone, find_zone, zones, + abandon_zone, xfr_zone + +Recordset Operations +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.dns.v2._proxy.Proxy + :noindex: + :members: create_recordset, update_recordset, get_recordset, + delete_recordset, recordsets + +Zone Import Operations +^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.dns.v2._proxy.Proxy + :noindex: + :members: zone_imports, create_zone_import, get_zone_import, + delete_zone_import + +Zone Export Operations +^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.dns.v2._proxy.Proxy + :noindex: + :members: zone_exports, create_zone_export, get_zone_export, + get_zone_export_text, delete_zone_export + +FloatingIP Operations +^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.dns.v2._proxy.Proxy + :noindex: + :members: floating_ips, get_floating_ip, update_floating_ip + +TLD Operations +^^^^^^^^^^^^^^ + +.. autoclass:: openstack.dns.v2._proxy.Proxy + :noindex: + :members: create_tld, delete_tld, get_tld, find_tld, tlds + +Zone Transfer Operations +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.dns.v2._proxy.Proxy + :noindex: + :members: zone_transfer_requests, get_zone_transfer_request, + create_zone_transfer_request, update_zone_transfer_request, + delete_zone_transfer_request, zone_transfer_accepts, + get_zone_transfer_accept, create_zone_transfer_accept + +Zone Share Operations +^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.dns.v2._proxy.Proxy + :noindex: + :members: create_zone_share, delete_zone_share, get_zone_share, + find_zone_share, zone_shares + +Limit Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.dns.v2._proxy.Proxy + :noindex: + :members: limits + +Quota Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.dns.v2._proxy.Proxy + :noindex: + :members: quotas, get_quota, update_quota, delete_quota + +Service Status Operations +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.dns.v2._proxy.Proxy + :noindex: + :members: service_statuses, get_service_status + + +Blacklist Operations +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.dns.v2._proxy.Proxy + :noindex: + :members: blacklists, get_blacklist, create_blacklist, + update_blacklist, delete_blacklist diff --git a/doc/source/user/proxies/identity_v2.rst b/doc/source/user/proxies/identity_v2.rst new file mode 100644 index 0000000000..fac47fa2a1 --- /dev/null +++ b/doc/source/user/proxies/identity_v2.rst @@ -0,0 +1,42 @@ +Identity API v2 +=============== + +For details on how to use identity, see :doc:`/user/guides/identity` + +.. automodule:: openstack.identity.v2._proxy + +The Identity v2 Class +--------------------- + +The identity high-level interface is available through the ``identity`` +member of a :class:`~openstack.connection.Connection` object. The +``identity`` member will only be added if the service is detected. + +Extension Operations +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v2._proxy.Proxy + :noindex: + :members: get_extension, extensions + +User Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v2._proxy.Proxy + :noindex: + :members: create_user, update_user, delete_user, get_user, find_user, users + +Role Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v2._proxy.Proxy + :noindex: + :members: create_role, update_role, delete_role, get_role, find_role, roles + +Tenant Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v2._proxy.Proxy + :noindex: + :members: create_tenant, update_tenant, delete_tenant, get_tenant, + find_tenant, tenants diff --git a/doc/source/user/proxies/identity_v3.rst b/doc/source/user/proxies/identity_v3.rst new file mode 100644 index 0000000000..e51f3c94ea --- /dev/null +++ b/doc/source/user/proxies/identity_v3.rst @@ -0,0 +1,198 @@ +Identity API v3 +=============== + +For details on how to use identity, see :doc:`/user/guides/identity` + +.. automodule:: openstack.identity.v3._proxy + +The Identity v3 Class +--------------------- + +The identity high-level interface is available through the ``identity`` +member of a :class:`~openstack.connection.Connection` object. The +``identity`` member will only be added if the service is detected. + +Credential Operations +^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_credential, update_credential, delete_credential, + get_credential, find_credential, credentials + +Domain Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_domain, update_domain, delete_domain, get_domain, + find_domain, domains + +Domain Config Operations +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_domain_config, delete_domain_config, get_domain_config, + update_domain_config + +Endpoint Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_endpoint, update_endpoint, delete_endpoint, get_endpoint, + find_endpoint, endpoints, project_endpoints, + associate_endpoint_with_project, disassociate_endpoint_from_project + +Group Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_group, update_group, delete_group, get_group, find_group, + groups, add_user_to_group, remove_user_from_group, + check_user_in_group, group_users + +Policy Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_policy, update_policy, delete_policy, get_policy, + find_policy, policies + +Project Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_project, update_project, delete_project, get_project, + find_project, projects, user_projects, endpoint_projects + +Service Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_service, update_service, delete_service, get_service, + find_service, services + +User Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_user, update_user, delete_user, get_user, find_user, users, + user_groups + +Token Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: validate_token, check_token, revoke_token + +Trust Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_trust, delete_trust, get_trust, find_trust, trusts + +Region Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_region, update_region, delete_region, get_region, + find_region, regions + +Role Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_role, update_role, delete_role, get_role, find_role, roles + +Role Assignment Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: role_assignments, role_assignments_filter, + assign_project_role_to_user, unassign_project_role_from_user, + validate_user_has_project_role, assign_project_role_to_group, + unassign_project_role_from_group, validate_group_has_project_role, + assign_domain_role_to_user, unassign_domain_role_from_user, + validate_user_has_domain_role, assign_domain_role_to_group, + unassign_domain_role_from_group, validate_group_has_domain_role, + assign_system_role_to_user, unassign_system_role_from_user, + validate_user_has_system_role, assign_system_role_to_group, + unassign_system_role_from_group, validate_group_has_system_role + +Registered Limit Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: registered_limits, get_registered_limit, create_registered_limit, + update_registered_limit, delete_registered_limit + +Limit Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: limits, get_limit, create_limit, update_limit, delete_limit + +Application Credential Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: application_credentials, get_application_credential, + create_application_credential, find_application_credential, + delete_application_credential + +Federation Protocol Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_federation_protocol, delete_federation_protocol, + find_federation_protocol, get_federation_protocol, + federation_protocols, update_federation_protocol + +Mapping Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_mapping, delete_mapping, find_mapping, get_mapping, + mappings, update_mapping + +Identity Provider Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_identity_provider, delete_identity_provider, + find_identity_provider, get_identity_provider, identity_providers, + update_identity_provider + +Access Rule Operations +^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: access_rules, access_rules, delete_access_rule + +Service Provider Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.identity.v3._proxy.Proxy + :noindex: + :members: create_service_provider, delete_service_provider, + find_service_provider, get_service_provider, service_providers, + update_service_provider diff --git a/doc/source/user/proxies/image_v1.rst b/doc/source/user/proxies/image_v1.rst new file mode 100644 index 0000000000..10911a3df3 --- /dev/null +++ b/doc/source/user/proxies/image_v1.rst @@ -0,0 +1,18 @@ +Image API v1 +============ + +For details on how to use image, see :doc:`/user/guides/image` + +.. automodule:: openstack.image.v1._proxy + +The Image v1 Class +------------------ + +The image high-level interface is available through the ``image`` member of a +:class:`~openstack.connection.Connection` object. The ``image`` member will +only be added if the service is detected. + +.. autoclass:: openstack.image.v1._proxy.Proxy + :noindex: + :members: upload_image, update_image, delete_image, get_image, find_image, + images diff --git a/doc/source/user/proxies/image_v2.rst b/doc/source/user/proxies/image_v2.rst new file mode 100644 index 0000000000..2d3fd442f1 --- /dev/null +++ b/doc/source/user/proxies/image_v2.rst @@ -0,0 +1,117 @@ +Image API v2 +============ + +For details on how to use image, see :doc:`/user/guides/image` + +.. automodule:: openstack.image.v2._proxy + +The Image v2 Class +------------------ + +The image high-level interface is available through the ``image`` member of a +:class:`~openstack.connection.Connection` object. The ``image`` member will +only be added if the service is detected. + +Image Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.image.v2._proxy.Proxy + :noindex: + :members: create_image, import_image, upload_image, download_image, + update_image, delete_image, get_image, find_image, images, + deactivate_image, reactivate_image, stage_image, + add_tag, remove_tag + +Image Task Operations +^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.image.v2._proxy.Proxy + :noindex: + :members: image_tasks + +Member Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.image.v2._proxy.Proxy + :noindex: + :members: add_member, remove_member, update_member, get_member, find_member, + members + +Task Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.image.v2._proxy.Proxy + :noindex: + :members: tasks, create_task, get_task, wait_for_task + +Schema Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.image.v2._proxy.Proxy + :noindex: + :members: get_images_schema, get_image_schema, get_members_schema, + get_member_schema, get_tasks_schema, get_task_schema, + get_metadef_namespace_schema, get_metadef_namespaces_schema, + get_metadef_resource_type_schema, get_metadef_resource_types_schema, + get_metadef_object_schema, get_metadef_objects_schema, + get_metadef_property_schema, get_metadef_properties_schema, + get_metadef_tag_schema, get_metadef_tags_schema + +Service Info Discovery Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.image.v2._proxy.Proxy + :noindex: + :members: stores, get_import_info + + +Metadef Namespace Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.image.v2._proxy.Proxy + :noindex: + :members: create_metadef_namespace, delete_metadef_namespace, + get_metadef_namespace, metadef_namespaces, update_metadef_namespace + + +Metadef Object Operations +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.image.v2._proxy.Proxy + :noindex: + :members: create_metadef_object, delete_metadef_object, + get_metadef_object, metadef_objects, update_metadef_object + +Metadef Resource Type Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.image.v2._proxy.Proxy + :noindex: + :members: metadef_resource_types, metadef_resource_type_associations, + create_metadef_resource_type_association, + delete_metadef_resource_type_association + + +Metadef Property Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.image.v2._proxy.Proxy + :noindex: + :members: create_metadef_property, update_metadef_property, + delete_metadef_property, get_metadef_property + + +Helpers +^^^^^^^ + +.. autoclass:: openstack.image.v2._proxy.Proxy + :noindex: + :members: wait_for_delete + + +Cache Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.image.v2._proxy.Proxy + :noindex: + :members: cache_delete_image, queue_image, get_image_cache, clear_cache diff --git a/doc/source/user/proxies/key_manager.rst b/doc/source/user/proxies/key_manager.rst new file mode 100644 index 0000000000..9210e7a3c5 --- /dev/null +++ b/doc/source/user/proxies/key_manager.rst @@ -0,0 +1,54 @@ +KeyManager API +============== + +For details on how to use key_management, see +:doc:`/user/guides/key_manager` + +.. automodule:: openstack.key_manager.v1._proxy + +The KeyManager Class +-------------------- + +The key_management high-level interface is available through the +``key_manager`` member of a :class:`~openstack.connection.Connection` +object. The ``key_manager`` member will only be added if the service is +detected. + +Secret Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.key_manager.v1._proxy.Proxy + :noindex: + :members: create_secret, update_secret, delete_secret, get_secret, + find_secret, secrets + +Container Operations +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.key_manager.v1._proxy.Proxy + :noindex: + :members: create_container, update_container, delete_container, + get_container, find_container, containers + +Order Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.key_manager.v1._proxy.Proxy + :noindex: + :members: create_order, update_order, delete_order, get_order, + find_order, orders + +Secret Store Operations +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.key_manager.v1._proxy.Proxy + :noindex: + :members: secret_stores, get_global_default_secret_store, + get_preferred_secret_store + +ProjectQuota Operations +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.key_manager.v1._proxy.Proxy + :noindex: + :members: update_project_quota, delete_project_quota, get_project_quota diff --git a/doc/source/user/proxies/load_balancer_v2.rst b/doc/source/user/proxies/load_balancer_v2.rst new file mode 100644 index 0000000000..711d80375f --- /dev/null +++ b/doc/source/user/proxies/load_balancer_v2.rst @@ -0,0 +1,123 @@ +Load Balancer v2 API +==================== + +.. automodule:: openstack.load_balancer.v2._proxy + +The LoadBalancer Class +---------------------- + +The load_balancer high-level interface is available through the +``load_balancer`` member of a :class:`~openstack.connection.Connection` object. +The ``load_balancer`` member will only be added if the service is detected. + +Load Balancer Operations +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: create_load_balancer, delete_load_balancer, find_load_balancer, + get_load_balancer, get_load_balancer_statistics, load_balancers, + update_load_balancer, failover_load_balancer, wait_for_load_balancer + +Listener Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: create_listener, delete_listener, find_listener, get_listener, + get_listener_statistics, listeners, update_listener + +Pool Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: create_pool, delete_pool, find_pool, get_pool, pools, update_pool + +Member Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: create_member, delete_member, find_member, get_member, members, + update_member + +Health Monitor Operations +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: create_health_monitor, delete_health_monitor, find_health_monitor, + get_health_monitor, health_monitors, update_health_monitor + +L7 Policy Operations +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: create_l7_policy, delete_l7_policy, find_l7_policy, + get_l7_policy, l7_policies, update_l7_policy + +L7 Rule Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: create_l7_rule, delete_l7_rule, find_l7_rule, + get_l7_rule, l7_rules, update_l7_rule + +Provider Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: providers, provider_flavor_capabilities + +Flavor Profile Operations +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: create_flavor_profile, get_flavor_profile, flavor_profiles, + delete_flavor_profile, find_flavor_profile, update_flavor_profile + +Flavor Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: create_flavor, get_flavor, flavors, delete_flavor, + find_flavor, update_flavor + +Quota Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: update_quota, delete_quota, quotas, get_quota, get_quota_default + +Amphora Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: amphorae, get_amphora, find_amphora, configure_amphora, + failover_amphora + +Availability Zone Profile Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: create_availability_zone_profile, get_availability_zone_profile, + availability_zone_profiles, delete_availability_zone_profile, + find_availability_zone_profile, update_availability_zone_profile + +Availability Zone Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.load_balancer.v2._proxy.Proxy + :noindex: + :members: create_availability_zone, get_availability_zone, + availability_zones, delete_availability_zone, + find_availability_zone, update_availability_zone diff --git a/doc/source/user/proxies/message_v2.rst b/doc/source/user/proxies/message_v2.rst new file mode 100644 index 0000000000..361842c50d --- /dev/null +++ b/doc/source/user/proxies/message_v2.rst @@ -0,0 +1,42 @@ +Message API v2 +============== + +For details on how to use message, see :doc:`/user/guides/message` + +.. automodule:: openstack.message.v2._proxy + +The Message v2 Class +-------------------- + +The message high-level interface is available through the ``message`` member +of a :class:`~openstack.connection.Connection` object. The ``message`` +member will only be added if the service is detected. + +Message Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.message.v2._proxy.Proxy + :noindex: + :members: post_message, delete_message, get_message, messages + +Queue Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.message.v2._proxy.Proxy + :noindex: + :members: create_queue, delete_queue, get_queue, queues + +Claim Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.message.v2._proxy.Proxy + :noindex: + :members: create_claim, update_claim, delete_claim, get_claim + +Subscription Operations +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.message.v2._proxy.Proxy + :noindex: + :members: create_subscription, delete_subscription, get_subscription, + subscriptions diff --git a/doc/source/user/proxies/network.rst b/doc/source/user/proxies/network.rst new file mode 100644 index 0000000000..0cd04b2204 --- /dev/null +++ b/doc/source/user/proxies/network.rst @@ -0,0 +1,372 @@ +Network API +=========== + +For details on how to use network, see :doc:`/user/guides/network` + +.. automodule:: openstack.network.v2._proxy + +The Network Class +----------------- + +The network high-level interface is available through the ``network`` +member of a :class:`~openstack.connection.Connection` object. The +``network`` member will only be added if the service is detected. + +Network Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_network, update_network, delete_network, get_network, + find_network, networks, get_network_ip_availability, + find_network_ip_availability, network_ip_availabilities, + add_dhcp_agent_to_network, remove_dhcp_agent_from_network, + dhcp_agent_hosting_networks, + +Port Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_port, create_ports, update_port, delete_port, get_port, + find_port, ports, add_ip_to_port, remove_ip_from_port + +Router Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_router, update_router, delete_router, get_router, + find_router, routers, + add_gateway_to_router, remove_gateway_from_router, + add_external_gateways, update_external_gateways, + remove_external_gateways, + add_interface_to_router, remove_interface_from_router, + add_extra_routes_to_router, remove_extra_routes_from_router, + create_conntrack_helper, update_conntrack_helper, + delete_conntrack_helper, get_conntrack_helper, conntrack_helpers + + + +Floating IP Operations +^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_ip, update_ip, delete_ip, get_ip, find_ip, + find_available_ip, ips + +Pool Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_pool, update_pool, delete_pool, get_pool, find_pool, pools, + create_pool_member, update_pool_member, delete_pool_member, + get_pool_member, find_pool_member, pool_members + +Auto Allocated Topology Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: delete_auto_allocated_topology, get_auto_allocated_topology, + validate_auto_allocated_topology + +Default Security Group Rules Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_default_security_group_rule, + find_default_security_group_rule, get_default_security_group_rule, + delete_default_security_group_rule, default_security_group_rules + +Security Group Operations +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_security_group, update_security_group, + delete_security_group, get_security_group, + get_security_group_rule, find_security_group, + find_security_group_rule, security_group_rules, + security_groups, create_security_group_rule, + create_security_group_rules, delete_security_group_rule + +Address Group Operations +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_address_group, delete_address_group, find_address_group, + get_address_group, address_groups, update_address_group, + add_addresses_to_address_group, remove_addresses_from_address_group + +Availability Zone Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: availability_zones + +Address Scope Operations +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_address_scope, update_address_scope, delete_address_scope, + get_address_scope, find_address_scope, address_scopes + +Quota Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: update_quota, delete_quota, get_quota, get_quota_default, quotas + +QoS Operations +^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_qos_policy, update_qos_policy, delete_qos_policy, + get_qos_policy, find_qos_policy, qos_policies, get_qos_rule_type, + find_qos_rule_type, qos_rule_types, + create_qos_minimum_bandwidth_rule, + update_qos_minimum_bandwidth_rule, + delete_qos_minimum_bandwidth_rule, + get_qos_minimum_bandwidth_rule, + find_qos_minimum_bandwidth_rule, + qos_minimum_bandwidth_rules, + create_qos_minimum_packet_rate_rule, + update_qos_minimum_packet_rate_rule, + delete_qos_minimum_packet_rate_rule, + get_qos_minimum_packet_rate_rule, + find_qos_minimum_packet_rate_rule, + qos_minimum_packet_rate_rules, + create_qos_bandwidth_limit_rule, + update_qos_bandwidth_limit_rule, + delete_qos_bandwidth_limit_rule, + get_qos_bandwidth_limit_rule, find_qos_bandwidth_limit_rule, + qos_bandwidth_limit_rules, + create_qos_dscp_marking_rule, update_qos_dscp_marking_rule, + delete_qos_dscp_marking_rule, get_qos_dscp_marking_rule, + find_qos_dscp_marking_rule, qos_dscp_marking_rules, + create_qos_packet_rate_limit_rule, + update_qos_packet_rate_limit_rule, + delete_qos_packet_rate_limit_rule, + get_qos_packet_rate_limit_rule, + find_qos_packet_rate_limit_rule, + +Agent Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: delete_agent, update_agent, get_agent, agents, + agent_hosted_routers, routers_hosting_l3_agents, + network_hosting_dhcp_agents, add_router_to_agent, + remove_router_from_agent + +RBAC Operations +^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_rbac_policy, update_rbac_policy, delete_rbac_policy, + get_rbac_policy, find_rbac_policy, rbac_policies + +Listener Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_listener, update_listener, delete_listener, + get_listener, find_listener, listeners + +Subnet Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_subnet, update_subnet, delete_subnet, get_subnet, + get_subnet_ports, find_subnet, subnets, create_subnet_pool, + update_subnet_pool, delete_subnet_pool, get_subnet_pool, + find_subnet_pool, subnet_pools + +Load Balancer Operations +^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_load_balancer, update_load_balancer, delete_load_balancer, + get_load_balancer, find_load_balancer, load_balancers + +Health Monitor Operations +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_health_monitor, update_health_monitor, + delete_health_monitor, get_health_monitor, find_health_monitor, + health_monitors + +Metering Label Operations +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_metering_label, update_metering_label, + delete_metering_label, get_metering_label, find_metering_label, + metering_labels, create_metering_label_rule, + update_metering_label_rule, delete_metering_label_rule, + get_metering_label_rule, find_metering_label_rule, + metering_label_rules + +Segment Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_segment, update_segment, delete_segment, get_segment, + find_segment, segments + +Flavor Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_flavor, update_flavor, delete_flavor, get_flavor, + find_flavor, flavors + +Service Profile Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_service_profile, update_service_profile, + delete_service_profile, get_service_profile, find_service_profile, + service_profiles, associate_flavor_with_service_profile, + disassociate_flavor_from_service_profile + +Tag Operations +^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: set_tags + +VPNaaS Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_vpn_service, update_vpn_service, delete_vpn_service, + get_vpn_service, find_vpn_service, vpn_services, create_vpn_endpoint_group, + update_vpn_endpoint_group, delete_vpn_endpoint_group, + get_vpn_endpoint_group, find_vpn_endpoint_group, vpn_endpoint_groups, + create_vpn_ipsec_site_connection, update_vpn_ipsec_site_connection, + delete_vpn_ipsec_site_connection, get_vpn_ipsec_site_connection, + find_vpn_ipsec_site_connection, vpn_ipsec_site_connections, + create_vpn_ike_policy, update_vpn_ike_policy, delete_vpn_ike_policy, + get_vpn_ike_policy, find_vpn_ike_policy, vpn_ike_policies, + create_vpn_ipsec_policy, update_vpn_ipsec_policy, delete_vpn_ipsec_policy, + get_vpn_ipsec_policy, find_vpn_ipsec_policy, vpn_ipsec_policies + +Extension Operations +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: find_extension, extensions + +Service Provider Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: service_providers + +Local IP Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_local_ip, delete_local_ip, find_local_ip, get_local_ip, + local_ips, update_local_ip, create_local_ip_association, + delete_local_ip_association, find_local_ip_association, + get_local_ip_association, local_ip_associations + +Ndp Proxy Operations +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_ndp_proxy, get_ndp_proxy, find_ndp_proxy, delete_ndp_proxy, + ndp_proxies, update_ndp_proxy + +BGP Operations +^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_bgp_peer, delete_bgp_peer, find_bgp_peer, get_bgp_peer, + update_bgp_peer, bgp_peers, create_bgp_speaker, + delete_bgp_speaker, find_bgp_speaker, get_bgp_speaker, + update_bgp_speaker, bgp_speakers, add_bgp_peer_to_speaker, + remove_bgp_peer_from_speaker, add_gateway_network_to_speaker, + remove_gateway_network_from_speaker, + get_advertised_routes_of_speaker, + get_bgp_dragents_hosting_speaker, add_bgp_speaker_to_dragent, + get_bgp_speakers_hosted_by_dragent, + remove_bgp_speaker_from_dragent + +Tap As A Service Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_tap_flow, delete_tap_flow, find_tap_flow, get_tap_flow, + update_tap_flow, tap_flows, create_tap_service, delete_tap_service, + find_tap_service, update_tap_service, tap_services + +BGPVPN operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_bgpvpn, delete_bgpvpn, find_bgpvpn, get_bgpvpn, + update_bgpvpn, bgpvpns, create_bgpvpn_network_association, + delete_bgpvpn_network_association, get_bgpvpn_network_association, + bgpvpn_network_associations, create_bgpvpn_port_association, + delete_bgpvpn_port_association, find_bgpvpn_port_association, + get_bgpvpn_port_association, update_bgpvpn_port_association, + bgpvpn_port_associations, create_bgpvpn_router_association, + delete_bgpvpn_router_association, get_bgpvpn_router_association, + update_bgpvpn_router_association, bgpvpn_router_associations + +SFC operations +^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_sfc_flow_classifier, delete_sfc_flow_classifier, + find_sfc_flow_classifier, get_sfc_flow_classifier, + update_sfc_flow_classifier, create_sfc_port_chain, + delete_sfc_port_chain, find_sfc_port_chain, get_sfc_port_chain, + update_sfc_port_chain, create_sfc_port_pair, delete_sfc_port_pair, + find_sfc_port_pair, get_sfc_port_pair, update_sfc_port_pair, + create_sfc_port_pair_group, delete_sfc_port_pair_group, + find_sfc_port_pair_group, get_sfc_port_pair_group, + update_sfc_port_pair_group, create_sfc_service_graph, + delete_sfc_service_graph, find_sfc_service_graph, + get_sfc_service_graph, update_sfc_service_graph + +Tap Mirror operations +^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.network.v2._proxy.Proxy + :noindex: + :members: create_tap_mirror, delete_tap_mirror, find_tap_mirror, + get_tap_mirror, tap_mirrors, update_tap_mirror diff --git a/doc/source/user/proxies/object_store.rst b/doc/source/user/proxies/object_store.rst new file mode 100644 index 0000000000..fa286d8e37 --- /dev/null +++ b/doc/source/user/proxies/object_store.rst @@ -0,0 +1,37 @@ +Object Store API +================ + +For details on how to use this API, see :doc:`/user/guides/object_store` + +.. automodule:: openstack.object_store.v1._proxy + +The Object Store Class +---------------------- + +The Object Store high-level interface is exposed as the ``object_store`` +object on :class:`~openstack.connection.Connection` objects. + +Account Operations +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.object_store.v1._proxy.Proxy + :noindex: + :members: get_account_metadata, set_account_metadata, delete_account_metadata + +Container Operations +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.object_store.v1._proxy.Proxy + :noindex: + :members: create_container, delete_container, containers, + get_container_metadata, set_container_metadata, + delete_container_metadata + +Object Operations +^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.object_store.v1._proxy.Proxy + :noindex: + :members: upload_object, download_object, copy_object, delete_object, + get_object, objects, get_object_metadata, set_object_metadata, + delete_object_metadata diff --git a/doc/source/user/proxies/orchestration.rst b/doc/source/user/proxies/orchestration.rst new file mode 100644 index 0000000000..76e555e931 --- /dev/null +++ b/doc/source/user/proxies/orchestration.rst @@ -0,0 +1,68 @@ +Orchestration API +================= + +For details on how to use orchestration, see :doc:`/user/guides/orchestration` + +.. automodule:: openstack.orchestration.v1._proxy + +The Orchestration Class +----------------------- + +The orchestration high-level interface is available through the +``orchestration`` member of a :class:`~openstack.connection.Connection` +object. The ``orchestration`` member will only be added if the service +is detected. + +Stack Operations +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.orchestration.v1._proxy.Proxy + :noindex: + :members: create_stack, stacks,find_stack, update_stack, delete_stack, + get_stack, export_stack, + get_stack_template, get_stack_environment + +Stack Resource Operations +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.orchestration.v1._proxy.Proxy + :noindex: + :members: resources + +Stack Action Operations +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.orchestration.v1._proxy.Proxy + :noindex: + :members: suspend_stack, resume_stack, check_stack + +Stack Event Operations +^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.orchestration.v1._proxy.Proxy + :noindex: + :members: stack_events + +Stack Template Operations +^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.orchestration.v1._proxy.Proxy + :noindex: + :members: validate_template + +Software Configuration Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.orchestration.v1._proxy.Proxy + :noindex: + :members: create_software_config, delete_software_config, + get_software_config, software_configs + +Software Deployment Operations +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.orchestration.v1._proxy.Proxy + :noindex: + :members: create_software_deployment, update_software_deployment, + delete_software_deployment, get_software_deployment, + software_deployments diff --git a/doc/source/user/proxies/placement.rst b/doc/source/user/proxies/placement.rst new file mode 100644 index 0000000000..864b41204d --- /dev/null +++ b/doc/source/user/proxies/placement.rst @@ -0,0 +1,49 @@ +Placement API +============= + +.. automodule:: openstack.placement.v1._proxy + +The Placement Class +------------------- + +The placement high-level interface is available through the ``placement`` +member of a :class:`~openstack.connection.Connection` object. +The ``placement`` member will only be added if the service is detected. + +Resource Classes +^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.placement.v1._proxy.Proxy + :noindex: + :members: create_resource_class, update_resource_class, + delete_resource_class, get_resource_class, + resource_classes + +Resource Providers +^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.placement.v1._proxy.Proxy + :noindex: + :members: create_resource_provider, update_resource_provider, + delete_resource_provider, get_resource_provider, + find_resource_provider, resource_providers, + get_resource_provider_aggregates, + set_resource_provider_aggregates + +Resource Provider Inventories +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.placement.v1._proxy.Proxy + :noindex: + :members: create_resource_provider_inventory, + update_resource_provider_inventory, + delete_resource_provider_inventory, + get_resource_provider_inventory, + resource_provider_inventories + +Traits +^^^^^^ + +.. autoclass:: openstack.placement.v1._proxy.Proxy + :noindex: + :members: create_trait, delete_trait, get_trait, traits diff --git a/doc/source/user/proxies/shared_file_system.rst b/doc/source/user/proxies/shared_file_system.rst new file mode 100644 index 0000000000..2a3a756a06 --- /dev/null +++ b/doc/source/user/proxies/shared_file_system.rst @@ -0,0 +1,194 @@ +Shared File System API +====================== + +.. automodule:: openstack.shared_file_system.v2._proxy + +The Shared File System Class +---------------------------- + +The high-level interface for accessing the shared file systems service API is +available through the ``shared_file_system`` member of a :class:`~openstack +.connection.Connection` object. The ``shared_file_system`` member will only +be added if the service is detected. ``share`` is an alias of the +``shared_file_system`` member. + + +Shared File System Availability Zones +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Interact with Availability Zones supported by the Shared File Systems +service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: availability_zones + + +Shared File System Shares +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Interact with Shares supported by the Shared File Systems +service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: shares, get_share, delete_share, update_share, create_share, + revert_share_to_snapshot, resize_share, find_share, manage_share, + unmanage_share + + +Shared File System Storage Pools +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Interact with the storage pool statistics exposed by the Shared File +Systems Service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: storage_pools + + +Shared File System User Messages +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +View and manipulate asynchronous user messages emitted by the Shared +File Systems service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: user_messages, get_user_message, delete_user_message + + +Shared File System Limits +^^^^^^^^^^^^^^^^^^^^^^^^^ + +Get absolute limits of resources supported by the Shared File Systems +service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: limits + + +Shared File System Snapshots +^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Interact with Share Snapshots supported by the Shared File Systems +service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: share_snapshots, get_share_snapshot, delete_share_snapshot, + update_share_snapshot, create_share_snapshot + +Shared File System Share Snapshot Instances +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Interact with Share Snapshot Instances supported by the +Shared File Systems service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: share_snapshot_instances, get_share_snapshot_instance + + +Shared File System Share Networks +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Create and manipulate Share Networks with the Shared File Systems service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: share_networks, get_share_network, delete_share_network, + update_share_network, create_share_network + + +Shared File System Share Instances +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Administrators can list, show information for, explicitly set the +state of, and force-delete share instances within the Shared File +Systems Service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: share_instances, get_share_instance, + reset_share_instance_status, + delete_share_instance + + +Shared File System Share Network Subnets +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Create and manipulate Share Network Subnets with the Shared File Systems +service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: share_network_subnets, get_share_network_subnet, + create_share_network_subnet, delete_share_network_subnet + + +Shared File System Share Access Rules +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Create, View, and Delete access rules for shares from the +Shared File Systems service. Access rules can also have their deletion +and visibility restricted during creation. A lock reason can also be +specified. The deletion restriction can be removed during the access removal. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: access_rules, get_access_rule, create_access_rule, + delete_access_rule + + +Shared File System Share Groups +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Interact with Share groups supported by the Shared File Systems +service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: share_groups, get_share_group, delete_share_group, + update_share_group, create_share_group, find_share_group + + +Shared File System Share Group Snapshots +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Interact with Share Group Snapshots by the Shared File Systems +service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: share_group_snapshots, get_share_group_snapshot, create_share_group_snapshot, + reset_share_group_snapshot_status, update_share_group_snapshot, + delete_share_group_snapshot + + +Shared File System Share Metadata +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +List, Get, Create, Update, and Delete metadata for shares from the +Shared File Systems service. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: get_share_metadata, get_share_metadata_item, + create_share_metadata, update_share_metadata, + delete_share_metadata + + +Shared File System Resource Locks +^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + +Create, list, update and delete locks for resources. When a resource is +locked, it means that it can be deleted only by services, admins or +the user that created the lock. + +.. autoclass:: openstack.shared_file_system.v2._proxy.Proxy + :noindex: + :members: resource_locks, get_resource_lock, update_resource_lock, + delete_resource_lock, create_resource_lock diff --git a/doc/source/user/proxies/workflow.rst b/doc/source/user/proxies/workflow.rst new file mode 100644 index 0000000000..f47baa55d0 --- /dev/null +++ b/doc/source/user/proxies/workflow.rst @@ -0,0 +1,35 @@ +Workflow API +============ + +.. automodule:: openstack.workflow.v2._proxy + +The Workflow Class +------------------ + +The workflow high-level interface is available through the ``workflow`` +member of a :class:`~openstack.connection.Connection` object. +The ``workflow`` member will only be added if the service is detected. + +Workflow Operations +^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.workflow.v2._proxy.Proxy + :noindex: + :members: create_workflow, update_workflow, delete_workflow, + get_workflow, find_workflow, workflows + +Execution Operations +^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.workflow.v2._proxy.Proxy + :noindex: + :members: create_execution, delete_execution, get_execution, + find_execution, executions + +Cron Trigger Operations +^^^^^^^^^^^^^^^^^^^^^^^ + +.. autoclass:: openstack.workflow.v2._proxy.Proxy + :noindex: + :members: create_cron_trigger, delete_cron_trigger, get_cron_trigger, + find_cron_trigger, cron_triggers diff --git a/doc/source/user/resource.rst b/doc/source/user/resource.rst new file mode 100644 index 0000000000..c7b5ae2d97 --- /dev/null +++ b/doc/source/user/resource.rst @@ -0,0 +1,22 @@ +Resource +======== +.. automodule:: openstack.resource + +Components +---------- + +.. autoclass:: openstack.resource.Body + :members: + +.. autoclass:: openstack.resource.Header + :members: + +.. autoclass:: openstack.resource.URI + :members: + +The Resource class +------------------ + +.. autoclass:: openstack.resource.Resource + :members: + :member-order: bysource diff --git a/doc/source/user/resources/accelerator/index.rst b/doc/source/user/resources/accelerator/index.rst new file mode 100644 index 0000000000..74bd93df6a --- /dev/null +++ b/doc/source/user/resources/accelerator/index.rst @@ -0,0 +1,11 @@ +Accelerator v2 Resources +======================== + +.. toctree:: + :maxdepth: 1 + + v2/attribute + v2/accelerator_request + v2/deployable + v2/device + v2/device_profile diff --git a/doc/source/user/resources/accelerator/v2/accelerator_request.rst b/doc/source/user/resources/accelerator/v2/accelerator_request.rst new file mode 100644 index 0000000000..172511407d --- /dev/null +++ b/doc/source/user/resources/accelerator/v2/accelerator_request.rst @@ -0,0 +1,13 @@ +openstack.accelerator.v2.accelerator_request +============================================ + +.. automodule:: openstack.accelerator.v2.accelerator_request + +The AcceleratorRequest Class +---------------------------- + +The ``AcceleratorRequest`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.accelerator.v2.accelerator_request.AcceleratorRequest + :members: diff --git a/doc/source/user/resources/accelerator/v2/attribute.rst b/doc/source/user/resources/accelerator/v2/attribute.rst new file mode 100644 index 0000000000..92ac0efdaa --- /dev/null +++ b/doc/source/user/resources/accelerator/v2/attribute.rst @@ -0,0 +1,12 @@ +openstack.accelerator.v2.attribute +================================== + +.. automodule:: openstack.accelerator.v2.attribute + +The Attribute Class +------------------- + +The ``Attribute`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.accelerator.v2.attribute.Attribute + :members: diff --git a/doc/source/user/resources/accelerator/v2/deployable.rst b/doc/source/user/resources/accelerator/v2/deployable.rst new file mode 100644 index 0000000000..4044c261d6 --- /dev/null +++ b/doc/source/user/resources/accelerator/v2/deployable.rst @@ -0,0 +1,13 @@ +openstack.accelerator.v2.deployable +=================================== + +.. automodule:: openstack.accelerator.v2.deployable + +The Deployable Class +-------------------- + +The ``Deployable`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.accelerator.v2.deployable.Deployable + :members: + diff --git a/doc/source/user/resources/accelerator/v2/device.rst b/doc/source/user/resources/accelerator/v2/device.rst new file mode 100644 index 0000000000..97e370c43f --- /dev/null +++ b/doc/source/user/resources/accelerator/v2/device.rst @@ -0,0 +1,13 @@ +openstack.accelerator.v2.device +=============================== + +.. automodule:: openstack.accelerator.v2.device + +The Device Class +---------------- + +The ``Device`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.accelerator.v2.device.Device + :members: + diff --git a/doc/source/user/resources/accelerator/v2/device_profile.rst b/doc/source/user/resources/accelerator/v2/device_profile.rst new file mode 100644 index 0000000000..71e05d1949 --- /dev/null +++ b/doc/source/user/resources/accelerator/v2/device_profile.rst @@ -0,0 +1,14 @@ +openstack.accelerator.v2.device_profile +======================================= + +.. automodule:: openstack.accelerator.v2.device_profile + +The DeviceProfile Class +----------------------- + +The ``DeviceProfile`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.accelerator.v2.device_profile.DeviceProfile + :members: + diff --git a/doc/source/user/resources/baremetal/index.rst b/doc/source/user/resources/baremetal/index.rst new file mode 100644 index 0000000000..4e0a5f1965 --- /dev/null +++ b/doc/source/user/resources/baremetal/index.rst @@ -0,0 +1,18 @@ +Baremetal Resources +=================== + +.. toctree:: + :maxdepth: 1 + + v1/driver + v1/chassis + v1/node + v1/port + v1/port_group + v1/allocation + v1/volume_connector + v1/volume_target + v1/deploy_templates + v1/conductor + v1/runbooks + v1/inspection_rules diff --git a/doc/source/user/resources/baremetal/v1/allocation.rst b/doc/source/user/resources/baremetal/v1/allocation.rst new file mode 100644 index 0000000000..0135180830 --- /dev/null +++ b/doc/source/user/resources/baremetal/v1/allocation.rst @@ -0,0 +1,12 @@ +openstack.baremetal.v1.Allocation +================================= + +.. automodule:: openstack.baremetal.v1.allocation + +The Allocation Class +-------------------- + +The ``Allocation`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal.v1.allocation.Allocation + :members: diff --git a/doc/source/user/resources/baremetal/v1/chassis.rst b/doc/source/user/resources/baremetal/v1/chassis.rst new file mode 100644 index 0000000000..87a6063894 --- /dev/null +++ b/doc/source/user/resources/baremetal/v1/chassis.rst @@ -0,0 +1,12 @@ +openstack.baremetal.v1.chassis +============================== + +.. automodule:: openstack.baremetal.v1.chassis + +The Chassis Class +----------------- + +The ``Chassis`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal.v1.chassis.Chassis + :members: diff --git a/doc/source/user/resources/baremetal/v1/conductor.rst b/doc/source/user/resources/baremetal/v1/conductor.rst new file mode 100644 index 0000000000..9e1bd1176c --- /dev/null +++ b/doc/source/user/resources/baremetal/v1/conductor.rst @@ -0,0 +1,13 @@ +openstack.baremetal.v1.conductor +================================ + +.. automodule:: openstack.baremetal.v1.conductor + +The Conductor Class +------------------- + +The ``Conductor`` class inherits +from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal.v1.conductor.Conductor + :members: diff --git a/doc/source/user/resources/baremetal/v1/deploy_templates.rst b/doc/source/user/resources/baremetal/v1/deploy_templates.rst new file mode 100644 index 0000000000..e55e63afad --- /dev/null +++ b/doc/source/user/resources/baremetal/v1/deploy_templates.rst @@ -0,0 +1,13 @@ +openstack.baremetal.v1.deploy_templates +======================================= + +.. automodule:: openstack.baremetal.v1.deploy_templates + +The DeployTemplate Class +------------------------- + +The ``DeployTemplate`` class inherits +from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal.v1.deploy_templates.DeployTemplate + :members: diff --git a/doc/source/user/resources/baremetal/v1/driver.rst b/doc/source/user/resources/baremetal/v1/driver.rst new file mode 100644 index 0000000000..f987d08613 --- /dev/null +++ b/doc/source/user/resources/baremetal/v1/driver.rst @@ -0,0 +1,12 @@ +openstack.baremetal.v1.driver +============================= + +.. automodule:: openstack.baremetal.v1.driver + +The Driver Class +---------------- + +The ``Driver`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal.v1.driver.Driver + :members: diff --git a/doc/source/user/resources/baremetal/v1/inspection_rules.rst b/doc/source/user/resources/baremetal/v1/inspection_rules.rst new file mode 100644 index 0000000000..dcfdc6320b --- /dev/null +++ b/doc/source/user/resources/baremetal/v1/inspection_rules.rst @@ -0,0 +1,13 @@ +openstack.baremetal.v1.inspection_rules +======================================= + +.. automodule:: openstack.baremetal.v1.inspection_rules + +The InspectionRule Class +------------------------- + +The ``InspectionRule`` class inherits +from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal.v1.inspection_rules.InspectionRule + :members: diff --git a/doc/source/user/resources/baremetal/v1/node.rst b/doc/source/user/resources/baremetal/v1/node.rst new file mode 100644 index 0000000000..14e691ed20 --- /dev/null +++ b/doc/source/user/resources/baremetal/v1/node.rst @@ -0,0 +1,35 @@ +openstack.baremetal.v1.Node +=========================== + +.. automodule:: openstack.baremetal.v1.node + +The Node Class +-------------- + +The ``Node`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal.v1.node.Node + :members: + +The PowerAction Class +^^^^^^^^^^^^^^^^^^^^^ + +The ``PowerAction`` enumeration represents known power actions. + +.. autoclass:: openstack.baremetal.v1.node.PowerAction + :members: + +The ValidationResult Class +^^^^^^^^^^^^^^^^^^^^^^^^^^ + +The ``ValidationResult`` class represents the result of a validation. + +.. autoclass:: openstack.baremetal.v1.node.ValidationResult + :members: + +The WaitResult Class +^^^^^^^^^^^^^^^^^^^^ + +The ``WaitResult`` class represents the result of waiting for several nodes. + +.. autoclass:: openstack.baremetal.v1.node.WaitResult diff --git a/doc/source/user/resources/baremetal/v1/port.rst b/doc/source/user/resources/baremetal/v1/port.rst new file mode 100644 index 0000000000..2bf0e18656 --- /dev/null +++ b/doc/source/user/resources/baremetal/v1/port.rst @@ -0,0 +1,12 @@ +openstack.baremetal.v1.port +=========================== + +.. automodule:: openstack.baremetal.v1.port + +The Port Class +-------------- + +The ``Port`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal.v1.port.Port + :members: diff --git a/doc/source/user/resources/baremetal/v1/port_group.rst b/doc/source/user/resources/baremetal/v1/port_group.rst new file mode 100644 index 0000000000..45c2bd91ab --- /dev/null +++ b/doc/source/user/resources/baremetal/v1/port_group.rst @@ -0,0 +1,12 @@ +openstack.baremetal.v1.port_group +================================= + +.. automodule:: openstack.baremetal.v1.port_group + +The PortGroup Class +------------------- + +The ``PortGroup`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal.v1.port_group.PortGroup + :members: diff --git a/doc/source/user/resources/baremetal/v1/runbooks.rst b/doc/source/user/resources/baremetal/v1/runbooks.rst new file mode 100644 index 0000000000..f462171412 --- /dev/null +++ b/doc/source/user/resources/baremetal/v1/runbooks.rst @@ -0,0 +1,13 @@ +openstack.baremetal.v1.runbooks +=============================== + +.. automodule:: openstack.baremetal.v1.runbooks + +The Runbook Class +----------------- + +The ``Runbook`` class inherits +from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal.v1.runbooks.Runbook + :members: diff --git a/doc/source/user/resources/baremetal/v1/volume_connector.rst b/doc/source/user/resources/baremetal/v1/volume_connector.rst new file mode 100644 index 0000000000..3ffae22125 --- /dev/null +++ b/doc/source/user/resources/baremetal/v1/volume_connector.rst @@ -0,0 +1,13 @@ +openstack.baremetal.v1.volume_connector +======================================= + +.. automodule:: openstack.baremetal.v1.volume_connector + +The VolumeConnector Class +------------------------- + +The ``VolumeConnector`` class inherits +from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal.v1.volume_connector.VolumeConnector + :members: diff --git a/doc/source/user/resources/baremetal/v1/volume_target.rst b/doc/source/user/resources/baremetal/v1/volume_target.rst new file mode 100644 index 0000000000..93525a80f5 --- /dev/null +++ b/doc/source/user/resources/baremetal/v1/volume_target.rst @@ -0,0 +1,13 @@ +openstack.baremetal.v1.volume_target +======================================= + +.. automodule:: openstack.baremetal.v1.volume_target + +The VolumeTarget Class +------------------------- + +The ``VolumeTarget`` class inherits +from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal.v1.volume_target.VolumeTarget + :members: diff --git a/doc/source/user/resources/baremetal_introspection/index.rst b/doc/source/user/resources/baremetal_introspection/index.rst new file mode 100644 index 0000000000..b1da156385 --- /dev/null +++ b/doc/source/user/resources/baremetal_introspection/index.rst @@ -0,0 +1,8 @@ +Baremetal Introspection Resources +================================= + +.. toctree:: + :maxdepth: 1 + + v1/introspection + v1/introspection_rule diff --git a/doc/source/user/resources/baremetal_introspection/v1/introspection.rst b/doc/source/user/resources/baremetal_introspection/v1/introspection.rst new file mode 100644 index 0000000000..6275e254b5 --- /dev/null +++ b/doc/source/user/resources/baremetal_introspection/v1/introspection.rst @@ -0,0 +1,13 @@ +openstack.baremetal_introspection.v1.Introspection +================================================== + +.. automodule:: openstack.baremetal_introspection.v1.introspection + +The Introspection Class +----------------------- + +The ``Introspection`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal_introspection.v1.introspection.Introspection + :members: diff --git a/doc/source/user/resources/baremetal_introspection/v1/introspection_rule.rst b/doc/source/user/resources/baremetal_introspection/v1/introspection_rule.rst new file mode 100644 index 0000000000..ba8bc28565 --- /dev/null +++ b/doc/source/user/resources/baremetal_introspection/v1/introspection_rule.rst @@ -0,0 +1,13 @@ +openstack.baremetal_introspection.v1.introspection_rule +======================================================== + +.. automodule:: openstack.baremetal_introspection.v1.introspection_rule + +The IntrospectionRule Class +---------------------------- + +The ``IntrospectionRule`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.baremetal_introspection.v1.introspection_rule.IntrospectionRule + :members: diff --git a/doc/source/user/resources/block_storage/index.rst b/doc/source/user/resources/block_storage/index.rst new file mode 100644 index 0000000000..18c2c98c7e --- /dev/null +++ b/doc/source/user/resources/block_storage/index.rst @@ -0,0 +1,20 @@ +Block Storage Resources +======================= + +Block Storage v2 Resources +-------------------------- + +.. toctree:: + :maxdepth: 1 + :glob: + + v2/* + +Block Storage v3 Resources +-------------------------- + +.. toctree:: + :maxdepth: 1 + :glob: + + v3/* diff --git a/doc/source/user/resources/block_storage/v2/backup.rst b/doc/source/user/resources/block_storage/v2/backup.rst new file mode 100644 index 0000000000..a291fa5c08 --- /dev/null +++ b/doc/source/user/resources/block_storage/v2/backup.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v2.backup +================================= + +.. automodule:: openstack.block_storage.v2.backup + +The Backup Class +---------------- + +The ``Backup`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.backup.Backup + :members: diff --git a/doc/source/user/resources/block_storage/v2/capabilities.rst b/doc/source/user/resources/block_storage/v2/capabilities.rst new file mode 100644 index 0000000000..5835928ac5 --- /dev/null +++ b/doc/source/user/resources/block_storage/v2/capabilities.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v2.capabilities +======================================= + +.. automodule:: openstack.block_storage.v2.capabilities + +The Capabilities Class +---------------------- + +The ``Capabilities`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.capabilities.Capabilities + :members: diff --git a/doc/source/user/resources/block_storage/v2/limits.rst b/doc/source/user/resources/block_storage/v2/limits.rst new file mode 100644 index 0000000000..37a925a721 --- /dev/null +++ b/doc/source/user/resources/block_storage/v2/limits.rst @@ -0,0 +1,37 @@ +openstack.block_storage.v2.limits +================================= + +.. automodule:: openstack.block_storage.v2.limits + +The AbsoluteLimit Class +----------------------- + +The ``AbsoluteLimit`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.limits.AbsoluteLimit + :members: + +The Limits Class +---------------- + +The ``Limit`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.limits.Limits + :members: + +The RateLimit Class +------------------- + +The ``RateLimit`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.limits.RateLimit + :members: + +The RateLimits Class +-------------------- + +The ``RateLimits`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.limits.RateLimits + :members: diff --git a/doc/source/user/resources/block_storage/v2/quota_set.rst b/doc/source/user/resources/block_storage/v2/quota_set.rst new file mode 100644 index 0000000000..35211aec44 --- /dev/null +++ b/doc/source/user/resources/block_storage/v2/quota_set.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v2.quota_set +==================================== + +.. automodule:: openstack.block_storage.v2.quota_set + +The QuotaSet Class +------------------ + +The ``QuotaSet`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.quota_set.QuotaSet + :members: diff --git a/doc/source/user/resources/block_storage/v2/service.rst b/doc/source/user/resources/block_storage/v2/service.rst new file mode 100644 index 0000000000..60a451de1f --- /dev/null +++ b/doc/source/user/resources/block_storage/v2/service.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v2.service +================================== + +.. automodule:: openstack.block_storage.v2.service + +The Service Class +----------------- + +The ``Service`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.service.Service + :members: diff --git a/doc/source/user/resources/block_storage/v2/snapshot.rst b/doc/source/user/resources/block_storage/v2/snapshot.rst new file mode 100644 index 0000000000..5b2eea172d --- /dev/null +++ b/doc/source/user/resources/block_storage/v2/snapshot.rst @@ -0,0 +1,21 @@ +openstack.block_storage.v2.snapshot +=================================== + +.. automodule:: openstack.block_storage.v2.snapshot + +The Snapshot Class +------------------ + +The ``Snapshot`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.snapshot.Snapshot + :members: + +The SnapshotDetail Class +------------------------ + +The ``SnapshotDetail`` class inherits from +:class:`~openstack.block_storage.v2.snapshot.Snapshot`. + +.. autoclass:: openstack.block_storage.v2.snapshot.SnapshotDetail + :members: diff --git a/doc/source/user/resources/block_storage/v2/stats.rst b/doc/source/user/resources/block_storage/v2/stats.rst new file mode 100644 index 0000000000..78717e2029 --- /dev/null +++ b/doc/source/user/resources/block_storage/v2/stats.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v2.stats +================================ + +.. automodule:: openstack.block_storage.v2.stats + +The Pools Class +--------------- + +The ``Pools`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.stats.Pools + :members: diff --git a/doc/source/user/resources/block_storage/v2/transfer.rst b/doc/source/user/resources/block_storage/v2/transfer.rst new file mode 100644 index 0000000000..b51f769910 --- /dev/null +++ b/doc/source/user/resources/block_storage/v2/transfer.rst @@ -0,0 +1,13 @@ +openstack.block_storage.v2.transfer +=================================== + +.. automodule:: openstack.block_storage.v2.transfer + +The Volume Transfer Class +------------------------- + +The ``Volume Transfer`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.transfer.Transfer + :members: diff --git a/doc/source/user/resources/block_storage/v2/type.rst b/doc/source/user/resources/block_storage/v2/type.rst new file mode 100644 index 0000000000..963f235db6 --- /dev/null +++ b/doc/source/user/resources/block_storage/v2/type.rst @@ -0,0 +1,13 @@ +openstack.block_storage.v2.type +=============================== + +.. automodule:: openstack.block_storage.v2.type + +The Type Class +-------------- + +The ``Type`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.type.Type + :members: + diff --git a/doc/source/user/resources/block_storage/v2/volume.rst b/doc/source/user/resources/block_storage/v2/volume.rst new file mode 100644 index 0000000000..ba4b9db575 --- /dev/null +++ b/doc/source/user/resources/block_storage/v2/volume.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v2.volume +================================= + +.. automodule:: openstack.block_storage.v2.volume + +The Volume Class +---------------- + +The ``Volume`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v2.volume.Volume + :members: diff --git a/doc/source/user/resources/block_storage/v3/attachment.rst b/doc/source/user/resources/block_storage/v3/attachment.rst new file mode 100644 index 0000000000..113ecd32e0 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/attachment.rst @@ -0,0 +1,13 @@ +openstack.block_storage.v3.attachment +===================================== + +.. automodule:: openstack.block_storage.v3.attachment + +The Volume Attachment Class +--------------------------- + +The ``Volume Attachment`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.attachment.Attachment + :members: create, update, complete diff --git a/doc/source/user/resources/block_storage/v3/availability_zone.rst b/doc/source/user/resources/block_storage/v3/availability_zone.rst new file mode 100644 index 0000000000..83e8f0b2e8 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/availability_zone.rst @@ -0,0 +1,13 @@ +openstack.block_storage.v3.availability_zone +============================================ + +.. automodule:: openstack.block_storage.v3.availability_zone + +The AvailabilityZone Class +-------------------------- + +The ``AvailabilityZone`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.availability_zone.AvailabilityZone + :members: diff --git a/doc/source/user/resources/block_storage/v3/backup.rst b/doc/source/user/resources/block_storage/v3/backup.rst new file mode 100644 index 0000000000..2382ca978c --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/backup.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v3.backup +================================= + +.. automodule:: openstack.block_storage.v3.backup + +The Backup Class +---------------- + +The ``Backup`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.backup.Backup + :members: diff --git a/doc/source/user/resources/block_storage/v3/block_storage_summary.rst b/doc/source/user/resources/block_storage/v3/block_storage_summary.rst new file mode 100644 index 0000000000..2215db8f06 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/block_storage_summary.rst @@ -0,0 +1,14 @@ +openstack.block_storage.v3.block_storage_summary +================================================ + +.. automodule:: openstack.block_storage.v3.block_storage_summary + +The Block Storage Summary Class +------------------------------- + +The ``Block Storage Summary`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.block_storage_summary.BlockStorageSummary + :members: + diff --git a/doc/source/user/resources/block_storage/v3/capabilities.rst b/doc/source/user/resources/block_storage/v3/capabilities.rst new file mode 100644 index 0000000000..9f4c79f146 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/capabilities.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v3.capabilities +======================================= + +.. automodule:: openstack.block_storage.v3.capabilities + +The Capabilities Class +---------------------- + +The ``Capabilities`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.capabilities.Capabilities + :members: diff --git a/doc/source/user/resources/block_storage/v3/extension.rst b/doc/source/user/resources/block_storage/v3/extension.rst new file mode 100644 index 0000000000..c72ec408a6 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/extension.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v3.extension +==================================== + +.. automodule:: openstack.block_storage.v3.extension + +The Extension Class +------------------- + +The ``Extension`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.extension.Extension + :members: diff --git a/doc/source/user/resources/block_storage/v3/group.rst b/doc/source/user/resources/block_storage/v3/group.rst new file mode 100644 index 0000000000..3d1d6abc07 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/group.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v3.group +================================ + +.. automodule:: openstack.block_storage.v3.group + +The Group Class +--------------- + +The ``Group`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.group.Group + :members: diff --git a/doc/source/user/resources/block_storage/v3/group_snapshot.rst b/doc/source/user/resources/block_storage/v3/group_snapshot.rst new file mode 100644 index 0000000000..f8135e3e15 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/group_snapshot.rst @@ -0,0 +1,13 @@ +openstack.block_storage.v3.group_snapshot +========================================= + +.. automodule:: openstack.block_storage.v3.group_snapshot + +The GroupSnapshot Class +----------------------- + +The ``GroupSnapshot`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.group_snapshot.GroupSnapshot + :members: diff --git a/doc/source/user/resources/block_storage/v3/group_type.rst b/doc/source/user/resources/block_storage/v3/group_type.rst new file mode 100644 index 0000000000..064b9d2472 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/group_type.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v3.group_type +===================================== + +.. automodule:: openstack.block_storage.v3.group_type + +The GroupType Class +------------------- + +The ``GroupType`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.group_type.GroupType + :members: diff --git a/doc/source/user/resources/block_storage/v3/limits.rst b/doc/source/user/resources/block_storage/v3/limits.rst new file mode 100644 index 0000000000..d313373252 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/limits.rst @@ -0,0 +1,37 @@ +openstack.block_storage.v3.limits +================================= + +.. automodule:: openstack.block_storage.v3.limits + +The AbsoluteLimit Class +----------------------- + +The ``AbsoluteLimit`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.limits.AbsoluteLimit + :members: + +The Limits Class +---------------- + +The ``Limits`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.limits.Limits + :members: + +The RateLimit Class +------------------- + +The ``RateLimit`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.limits.RateLimit + :members: + +The RateLimits Class +-------------------- + +The ``RateLimits`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.limits.RateLimits + :members: diff --git a/doc/source/user/resources/block_storage/v3/quota_set.rst b/doc/source/user/resources/block_storage/v3/quota_set.rst new file mode 100644 index 0000000000..69a287b9cc --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/quota_set.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v3.quota_set +==================================== + +.. automodule:: openstack.block_storage.v3.quota_set + +The QuotaSet Class +------------------ + +The ``QuotaSet`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.quota_set.QuotaSet + :members: diff --git a/doc/source/user/resources/block_storage/v3/resource_filter.rst b/doc/source/user/resources/block_storage/v3/resource_filter.rst new file mode 100644 index 0000000000..eaef7b3b45 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/resource_filter.rst @@ -0,0 +1,13 @@ +openstack.block_storage.v3.resource_filter +========================================== + +.. automodule:: openstack.block_storage.v3.resource_filter + +The ResourceFilter Class +------------------------ + +The ``ResourceFilter`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.resource_filter.ResourceFilter + :members: diff --git a/doc/source/user/resources/block_storage/v3/service.rst b/doc/source/user/resources/block_storage/v3/service.rst new file mode 100644 index 0000000000..433880a861 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/service.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v3.service +================================== + +.. automodule:: openstack.block_storage.v3.service + +The Service Class +----------------- + +The ``Service`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.service.Service + :members: diff --git a/doc/source/user/resources/block_storage/v3/snapshot.rst b/doc/source/user/resources/block_storage/v3/snapshot.rst new file mode 100644 index 0000000000..2185f58ba6 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/snapshot.rst @@ -0,0 +1,21 @@ +openstack.block_storage.v3.snapshot +=================================== + +.. automodule:: openstack.block_storage.v3.snapshot + +The Snapshot Class +------------------ + +The ``Snapshot`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.snapshot.Snapshot + :members: + +The SnapshotDetail Class +------------------------ + +The ``SnapshotDetail`` class inherits from +:class:`~openstack.block_storage.v3.snapshot.Snapshot`. + +.. autoclass:: openstack.block_storage.v3.snapshot.SnapshotDetail + :members: diff --git a/doc/source/user/resources/block_storage/v3/stats.rst b/doc/source/user/resources/block_storage/v3/stats.rst new file mode 100644 index 0000000000..b8e802e620 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/stats.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v3.stats +================================ + +.. automodule:: openstack.block_storage.v3.stats + +The Pools Class +--------------- + +The ``Pools`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.stats.Pools + :members: diff --git a/doc/source/user/resources/block_storage/v3/transfer.rst b/doc/source/user/resources/block_storage/v3/transfer.rst new file mode 100644 index 0000000000..e738c75295 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/transfer.rst @@ -0,0 +1,13 @@ +openstack.block_storage.v3.transfer +=================================== + +.. automodule:: openstack.block_storage.v3.transfer + +The Volume Transfer Class +------------------------- + +The ``Volume Transfer`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.transfer.Transfer + :members: diff --git a/doc/source/user/resources/block_storage/v3/type.rst b/doc/source/user/resources/block_storage/v3/type.rst new file mode 100644 index 0000000000..32ce79968c --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/type.rst @@ -0,0 +1,21 @@ +openstack.block_storage.v3.type +=============================== + +.. automodule:: openstack.block_storage.v3.type + +The Type Class +-------------- + +The ``Type`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.type.Type + :members: + +The TypeEncryption Class +------------------------ + +The ``TypeEncryption`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.type.TypeEncryption + :members: diff --git a/doc/source/user/resources/block_storage/v3/volume.rst b/doc/source/user/resources/block_storage/v3/volume.rst new file mode 100644 index 0000000000..edb734f2a8 --- /dev/null +++ b/doc/source/user/resources/block_storage/v3/volume.rst @@ -0,0 +1,12 @@ +openstack.block_storage.v3.volume +================================= + +.. automodule:: openstack.block_storage.v3.volume + +The Volume Class +---------------- + +The ``Volume`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.block_storage.v3.volume.Volume + :members: diff --git a/doc/source/users/resources/cluster/index.rst b/doc/source/user/resources/clustering/index.rst similarity index 100% rename from doc/source/users/resources/cluster/index.rst rename to doc/source/user/resources/clustering/index.rst diff --git a/doc/source/user/resources/clustering/v1/action.rst b/doc/source/user/resources/clustering/v1/action.rst new file mode 100644 index 0000000000..a12aa284b3 --- /dev/null +++ b/doc/source/user/resources/clustering/v1/action.rst @@ -0,0 +1,12 @@ +openstack.clustering.v1.action +============================== + +.. automodule:: openstack.clustering.v1.action + +The Action Class +---------------- + +The ``Action`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.clustering.v1.action.Action + :members: diff --git a/doc/source/user/resources/clustering/v1/build_info.rst b/doc/source/user/resources/clustering/v1/build_info.rst new file mode 100644 index 0000000000..d84754f279 --- /dev/null +++ b/doc/source/user/resources/clustering/v1/build_info.rst @@ -0,0 +1,12 @@ +openstack.clustering.v1.build_info +================================== + +.. automodule:: openstack.clustering.v1.build_info + +The BuildInfo Class +------------------- + +The ``BuildInfo`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.clustering.v1.build_info.BuildInfo + :members: diff --git a/doc/source/user/resources/clustering/v1/cluster.rst b/doc/source/user/resources/clustering/v1/cluster.rst new file mode 100644 index 0000000000..779c7d6ce0 --- /dev/null +++ b/doc/source/user/resources/clustering/v1/cluster.rst @@ -0,0 +1,12 @@ +openstack.clustering.v1.Cluster +=============================== + +.. automodule:: openstack.clustering.v1.cluster + +The Cluster Class +----------------- + +The ``Cluster`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.clustering.v1.cluster.Cluster + :members: diff --git a/doc/source/user/resources/clustering/v1/cluster_policy.rst b/doc/source/user/resources/clustering/v1/cluster_policy.rst new file mode 100644 index 0000000000..58ae94374e --- /dev/null +++ b/doc/source/user/resources/clustering/v1/cluster_policy.rst @@ -0,0 +1,13 @@ +openstack.clustering.v1.cluster_policy +====================================== + +.. automodule:: openstack.clustering.v1.cluster_policy + +The ClusterPolicy Class +----------------------- + +The ``ClusterPolicy`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.clustering.v1.cluster_policy.ClusterPolicy + :members: diff --git a/doc/source/user/resources/clustering/v1/event.rst b/doc/source/user/resources/clustering/v1/event.rst new file mode 100644 index 0000000000..decc992e29 --- /dev/null +++ b/doc/source/user/resources/clustering/v1/event.rst @@ -0,0 +1,12 @@ +openstack.clustering.v1.event +============================= + +.. automodule:: openstack.clustering.v1.event + +The Event Class +--------------- + +The ``Event`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.clustering.v1.event.Event + :members: diff --git a/doc/source/user/resources/clustering/v1/node.rst b/doc/source/user/resources/clustering/v1/node.rst new file mode 100644 index 0000000000..3cab1ec47a --- /dev/null +++ b/doc/source/user/resources/clustering/v1/node.rst @@ -0,0 +1,12 @@ +openstack.clustering.v1.Node +============================ + +.. automodule:: openstack.clustering.v1.node + +The Node Class +-------------- + +The ``Node`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.clustering.v1.node.Node + :members: diff --git a/doc/source/user/resources/clustering/v1/policy.rst b/doc/source/user/resources/clustering/v1/policy.rst new file mode 100644 index 0000000000..00b832ed04 --- /dev/null +++ b/doc/source/user/resources/clustering/v1/policy.rst @@ -0,0 +1,12 @@ +openstack.clustering.v1.policy +============================== + +.. automodule:: openstack.clustering.v1.policy + +The Policy Class +---------------- + +The ``Policy`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.clustering.v1.policy.Policy + :members: diff --git a/doc/source/user/resources/clustering/v1/policy_type.rst b/doc/source/user/resources/clustering/v1/policy_type.rst new file mode 100644 index 0000000000..ad665f9eda --- /dev/null +++ b/doc/source/user/resources/clustering/v1/policy_type.rst @@ -0,0 +1,12 @@ +openstack.clustering.v1.policy_type +=================================== + +.. automodule:: openstack.clustering.v1.policy_type + +The PolicyType Class +-------------------- + +The ``PolicyType`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.clustering.v1.policy_type.PolicyType + :members: diff --git a/doc/source/user/resources/clustering/v1/profile.rst b/doc/source/user/resources/clustering/v1/profile.rst new file mode 100644 index 0000000000..c114e0c36c --- /dev/null +++ b/doc/source/user/resources/clustering/v1/profile.rst @@ -0,0 +1,12 @@ +openstack.clustering.v1.profile +=============================== + +.. automodule:: openstack.clustering.v1.profile + +The Profile Class +----------------- + +The ``Profile`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.clustering.v1.profile.Profile + :members: diff --git a/doc/source/user/resources/clustering/v1/profile_type.rst b/doc/source/user/resources/clustering/v1/profile_type.rst new file mode 100644 index 0000000000..d8534c0d9f --- /dev/null +++ b/doc/source/user/resources/clustering/v1/profile_type.rst @@ -0,0 +1,12 @@ +openstack.clustering.v1.profile_type +==================================== + +.. automodule:: openstack.clustering.v1.profile_type + +The ProfileType Class +--------------------- + +The ``ProfileType`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.clustering.v1.profile_type.ProfileType + :members: diff --git a/doc/source/user/resources/clustering/v1/receiver.rst b/doc/source/user/resources/clustering/v1/receiver.rst new file mode 100644 index 0000000000..9cdd4e5d7b --- /dev/null +++ b/doc/source/user/resources/clustering/v1/receiver.rst @@ -0,0 +1,12 @@ +openstack.clustering.v1.receiver +================================ + +.. automodule:: openstack.clustering.v1.receiver + +The Receiver Class +------------------ + +The ``Receiver`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.clustering.v1.receiver.Receiver + :members: diff --git a/doc/source/user/resources/compute/index.rst b/doc/source/user/resources/compute/index.rst new file mode 100644 index 0000000000..b982b0e67b --- /dev/null +++ b/doc/source/user/resources/compute/index.rst @@ -0,0 +1,9 @@ +Compute Resources +================= + +.. toctree:: + :maxdepth: 1 + :glob: + + v2/* + version diff --git a/doc/source/user/resources/compute/v2/aggregate.rst b/doc/source/user/resources/compute/v2/aggregate.rst new file mode 100644 index 0000000000..0d786887f3 --- /dev/null +++ b/doc/source/user/resources/compute/v2/aggregate.rst @@ -0,0 +1,12 @@ +openstack.compute.v2.aggregate +============================== + +.. automodule:: openstack.compute.v2.aggregate + +The Aggregate Class +------------------- + +The ``Aggregate`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.aggregate.Aggregate + :members: diff --git a/doc/source/user/resources/compute/v2/availability_zone.rst b/doc/source/user/resources/compute/v2/availability_zone.rst new file mode 100644 index 0000000000..8d58bb7324 --- /dev/null +++ b/doc/source/user/resources/compute/v2/availability_zone.rst @@ -0,0 +1,13 @@ +openstack.compute.v2.availability_zone +====================================== + +.. automodule:: openstack.compute.v2.availability_zone + +The AvailabilityZone Class +-------------------------- + +The ``AvailabilityZone`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.availability_zone.AvailabilityZone + :members: diff --git a/doc/source/user/resources/compute/v2/console_auth_token.rst b/doc/source/user/resources/compute/v2/console_auth_token.rst new file mode 100644 index 0000000000..86b0718fda --- /dev/null +++ b/doc/source/user/resources/compute/v2/console_auth_token.rst @@ -0,0 +1,13 @@ +openstack.compute.v2.console_auth_token +======================================= + +.. automodule:: openstack.compute.v2.console_auth_token + +The ServerRemoteConsole Class +----------------------------- + +The ``ConsoleAuthToken`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.console_auth_token.ConsoleAuthToken + :members: diff --git a/doc/source/users/resources/compute/v2/extension.rst b/doc/source/user/resources/compute/v2/extension.rst similarity index 100% rename from doc/source/users/resources/compute/v2/extension.rst rename to doc/source/user/resources/compute/v2/extension.rst diff --git a/doc/source/user/resources/compute/v2/flavor.rst b/doc/source/user/resources/compute/v2/flavor.rst new file mode 100644 index 0000000000..45fee1b1eb --- /dev/null +++ b/doc/source/user/resources/compute/v2/flavor.rst @@ -0,0 +1,21 @@ +openstack.compute.v2.flavor +=========================== + +.. automodule:: openstack.compute.v2.flavor + +The Flavor Class +---------------- + +The ``Flavor`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.flavor.Flavor + :members: + +The FlavorDetail Class +---------------------- + +The ``FlavorDetail`` class inherits from +:class:`~openstack.compute.v2.flavor.Flavor`. + +.. autoclass:: openstack.compute.v2.flavor.FlavorDetail + :members: diff --git a/doc/source/user/resources/compute/v2/hypervisor.rst b/doc/source/user/resources/compute/v2/hypervisor.rst new file mode 100644 index 0000000000..6959db4ac2 --- /dev/null +++ b/doc/source/user/resources/compute/v2/hypervisor.rst @@ -0,0 +1,12 @@ +openstack.compute.v2.hypervisor +=============================== + +.. automodule:: openstack.compute.v2.hypervisor + +The Hypervisor Class +-------------------- + +The ``Hypervisor`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.hypervisor.Hypervisor + :members: diff --git a/doc/source/users/resources/compute/v2/image.rst b/doc/source/user/resources/compute/v2/image.rst similarity index 100% rename from doc/source/users/resources/compute/v2/image.rst rename to doc/source/user/resources/compute/v2/image.rst diff --git a/doc/source/users/resources/compute/v2/keypair.rst b/doc/source/user/resources/compute/v2/keypair.rst similarity index 100% rename from doc/source/users/resources/compute/v2/keypair.rst rename to doc/source/user/resources/compute/v2/keypair.rst diff --git a/doc/source/user/resources/compute/v2/limits.rst b/doc/source/user/resources/compute/v2/limits.rst new file mode 100644 index 0000000000..f30ff2ffc8 --- /dev/null +++ b/doc/source/user/resources/compute/v2/limits.rst @@ -0,0 +1,29 @@ +openstack.compute.v2.limits +=========================== + +.. automodule:: openstack.compute.v2.limits + +The Limits Class +---------------- + +The ``Limits`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.limits.Limits + :members: + +The AbsoluteLimits Class +------------------------ + +The ``AbsoluteLimits`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.limits.AbsoluteLimits + :members: + +The RateLimit Class +------------------- + +The ``RateLimit`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.limits.RateLimit + :members: diff --git a/doc/source/user/resources/compute/v2/migration.rst b/doc/source/user/resources/compute/v2/migration.rst new file mode 100644 index 0000000000..ba02f3e177 --- /dev/null +++ b/doc/source/user/resources/compute/v2/migration.rst @@ -0,0 +1,12 @@ +openstack.compute.v2.migration +============================== + +.. automodule:: openstack.compute.v2.migration + +The Migration Class +------------------- + +The ``Migration`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.migration.Migration + :members: diff --git a/doc/source/user/resources/compute/v2/quota_set.rst b/doc/source/user/resources/compute/v2/quota_set.rst new file mode 100644 index 0000000000..8a5d91dfc1 --- /dev/null +++ b/doc/source/user/resources/compute/v2/quota_set.rst @@ -0,0 +1,12 @@ +openstack.compute.v2.quota_set +============================== + +.. automodule:: openstack.compute.v2.quota_set + +The QuotaSet Class +------------------ + +The ``QuotaSet`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.quota_set.QuotaSet + :members: diff --git a/doc/source/users/resources/compute/v2/server.rst b/doc/source/user/resources/compute/v2/server.rst similarity index 89% rename from doc/source/users/resources/compute/v2/server.rst rename to doc/source/user/resources/compute/v2/server.rst index 6f0ddbb7d1..5dc072c75a 100644 --- a/doc/source/users/resources/compute/v2/server.rst +++ b/doc/source/user/resources/compute/v2/server.rst @@ -1,5 +1,5 @@ openstack.compute.v2.server -============================ +=========================== .. automodule:: openstack.compute.v2.server diff --git a/doc/source/user/resources/compute/v2/server_action.rst b/doc/source/user/resources/compute/v2/server_action.rst new file mode 100644 index 0000000000..a3afefe895 --- /dev/null +++ b/doc/source/user/resources/compute/v2/server_action.rst @@ -0,0 +1,22 @@ +openstack.compute.v2.server_action +================================== + +.. automodule:: openstack.compute.v2.server_action + +The ServerAction Class +---------------------- + +The ``ServerAction`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.server_action.ServerAction + :members: + +The ServerActionEvent Class +--------------------------- + +The ``ServerActionEvent`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.server_action.ServerActionEvent + :members: diff --git a/doc/source/user/resources/compute/v2/server_diagnostics.rst b/doc/source/user/resources/compute/v2/server_diagnostics.rst new file mode 100644 index 0000000000..7b00e93bf6 --- /dev/null +++ b/doc/source/user/resources/compute/v2/server_diagnostics.rst @@ -0,0 +1,13 @@ +openstack.compute.v2.server_diagnostics +======================================= + +.. automodule:: openstack.compute.v2.server_diagnostics + +The ServerDiagnostics Class +--------------------------- + +The ``ServerDiagnostics`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.server_diagnostics.ServerDiagnostics + :members: diff --git a/doc/source/user/resources/compute/v2/server_group.rst b/doc/source/user/resources/compute/v2/server_group.rst new file mode 100644 index 0000000000..be84fe6daf --- /dev/null +++ b/doc/source/user/resources/compute/v2/server_group.rst @@ -0,0 +1,13 @@ +openstack.compute.v2.server_group +================================= + +.. automodule:: openstack.compute.v2.server_group + +The ServerGroup Class +--------------------- + +The ``ServerGroup`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.server_group.ServerGroup + :members: diff --git a/doc/source/users/resources/compute/v2/server_interface.rst b/doc/source/user/resources/compute/v2/server_interface.rst similarity index 76% rename from doc/source/users/resources/compute/v2/server_interface.rst rename to doc/source/user/resources/compute/v2/server_interface.rst index e67d9964a9..922606fb45 100644 --- a/doc/source/users/resources/compute/v2/server_interface.rst +++ b/doc/source/user/resources/compute/v2/server_interface.rst @@ -6,7 +6,8 @@ openstack.compute.v2.server_interface The ServerInterface Class ------------------------- -The ``ServerInterface`` class inherits from :class:`~openstack.resource.Resource`. +The ``ServerInterface`` class inherits from +:class:`~openstack.resource.Resource`. .. autoclass:: openstack.compute.v2.server_interface.ServerInterface :members: diff --git a/doc/source/users/resources/compute/v2/server_ip.rst b/doc/source/user/resources/compute/v2/server_ip.rst similarity index 100% rename from doc/source/users/resources/compute/v2/server_ip.rst rename to doc/source/user/resources/compute/v2/server_ip.rst diff --git a/doc/source/user/resources/compute/v2/server_migration.rst b/doc/source/user/resources/compute/v2/server_migration.rst new file mode 100644 index 0000000000..6587f596fb --- /dev/null +++ b/doc/source/user/resources/compute/v2/server_migration.rst @@ -0,0 +1,13 @@ +openstack.compute.v2.server_migration +===================================== + +.. automodule:: openstack.compute.v2.server_migration + +The ServerMigration Class +------------------------- + +The ``ServerMigration`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.server_migration.ServerMigration + :members: diff --git a/doc/source/user/resources/compute/v2/server_remote_console.rst b/doc/source/user/resources/compute/v2/server_remote_console.rst new file mode 100644 index 0000000000..9ce7c0851e --- /dev/null +++ b/doc/source/user/resources/compute/v2/server_remote_console.rst @@ -0,0 +1,13 @@ +openstack.compute.v2.server_remote_console +========================================== + +.. automodule:: openstack.compute.v2.server_remote_console + +The ServerRemoteConsole Class +----------------------------- + +The ``ServerRemoteConsole`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.server_remote_console.ServerRemoteConsole + :members: diff --git a/doc/source/user/resources/compute/v2/service.rst b/doc/source/user/resources/compute/v2/service.rst new file mode 100644 index 0000000000..ac0fdef664 --- /dev/null +++ b/doc/source/user/resources/compute/v2/service.rst @@ -0,0 +1,12 @@ +openstack.compute.v2.service +============================ + +.. automodule:: openstack.compute.v2.service + +The Service Class +----------------- + +The ``Service`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.service.Service + :members: diff --git a/doc/source/user/resources/compute/v2/usage.rst b/doc/source/user/resources/compute/v2/usage.rst new file mode 100644 index 0000000000..1be8f6be8c --- /dev/null +++ b/doc/source/user/resources/compute/v2/usage.rst @@ -0,0 +1,20 @@ +openstack.compute.v2.usage +========================== + +.. automodule:: openstack.compute.v2.usage + +The Usage Class +--------------- + +The ``Usage`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.usage.Usage + :members: + +The ServerUsage Class +--------------------- + +The ``ServerUsage`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.usage.ServerUsage + :members: diff --git a/doc/source/user/resources/compute/v2/volume_attachment.rst b/doc/source/user/resources/compute/v2/volume_attachment.rst new file mode 100644 index 0000000000..74d4be3f58 --- /dev/null +++ b/doc/source/user/resources/compute/v2/volume_attachment.rst @@ -0,0 +1,13 @@ +openstack.compute.v2.volume_attachment +====================================== + +.. automodule:: openstack.compute.v2.volume_attachment + +The VolumeAttachment Class +-------------------------- + +The ``VolumeAttachment`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.v2.volume_attachment.VolumeAttachment + :members: diff --git a/doc/source/user/resources/compute/version.rst b/doc/source/user/resources/compute/version.rst new file mode 100644 index 0000000000..0191df12b7 --- /dev/null +++ b/doc/source/user/resources/compute/version.rst @@ -0,0 +1,12 @@ +openstack.compute.version +========================= + +.. automodule:: openstack.compute.version + +The Version Class +----------------- + +The ``Version`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.compute.version.Version + :members: diff --git a/doc/source/user/resources/container_infrastructure_management/cluster.rst b/doc/source/user/resources/container_infrastructure_management/cluster.rst new file mode 100644 index 0000000000..df44b6fade --- /dev/null +++ b/doc/source/user/resources/container_infrastructure_management/cluster.rst @@ -0,0 +1,12 @@ +openstack.container_infrastructure_management.v1.cluster +======================================================== + +.. automodule:: openstack.container_infrastructure_management.v1.cluster + +The Cluster Class +------------------ + +The ``Cluster`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.container_infrastructure_management.v1.cluster.Cluster + :members: diff --git a/doc/source/user/resources/container_infrastructure_management/cluster_certificate.rst b/doc/source/user/resources/container_infrastructure_management/cluster_certificate.rst new file mode 100644 index 0000000000..6dbb3b1021 --- /dev/null +++ b/doc/source/user/resources/container_infrastructure_management/cluster_certificate.rst @@ -0,0 +1,13 @@ +openstack.container_infrastructure_management.v1.cluster_certificate +==================================================================== + +.. automodule:: openstack.container_infrastructure_management.v1.cluster_certificate + +The Cluster Certificate Class +----------------------------- + +The ``ClusterCertificate`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.container_infrastructure_management.v1.cluster_certificate.ClusterCertificate + :members: diff --git a/doc/source/user/resources/container_infrastructure_management/cluster_template.rst b/doc/source/user/resources/container_infrastructure_management/cluster_template.rst new file mode 100644 index 0000000000..fbf17725a6 --- /dev/null +++ b/doc/source/user/resources/container_infrastructure_management/cluster_template.rst @@ -0,0 +1,13 @@ +openstack.container_infrastructure_management.v1.cluster_template +================================================================= + +.. automodule:: openstack.container_infrastructure_management.v1.cluster_template + +The Cluster Template Class +-------------------------- + +The ``ClusterTemplate`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate + :members: diff --git a/doc/source/user/resources/container_infrastructure_management/index.rst b/doc/source/user/resources/container_infrastructure_management/index.rst new file mode 100644 index 0000000000..90fdb8558c --- /dev/null +++ b/doc/source/user/resources/container_infrastructure_management/index.rst @@ -0,0 +1,10 @@ +Container Infrastructure Management Resources +============================================= + +.. toctree:: + :maxdepth: 1 + + cluster + cluster_certificate + cluster_template + service diff --git a/doc/source/user/resources/container_infrastructure_management/service.rst b/doc/source/user/resources/container_infrastructure_management/service.rst new file mode 100644 index 0000000000..2be9147d46 --- /dev/null +++ b/doc/source/user/resources/container_infrastructure_management/service.rst @@ -0,0 +1,12 @@ +openstack.container_infrastructure_management.v1.service +======================================================== + +.. automodule:: openstack.container_infrastructure_management.v1.service + +The Service Class +----------------- + +The ``Service`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.container_infrastructure_management.v1.service.Service + :members: diff --git a/doc/source/user/resources/database/index.rst b/doc/source/user/resources/database/index.rst new file mode 100644 index 0000000000..3218b40a78 --- /dev/null +++ b/doc/source/user/resources/database/index.rst @@ -0,0 +1,10 @@ +Database Resources +================== + +.. toctree:: + :maxdepth: 1 + + v1/database + v1/flavor + v1/instance + v1/user diff --git a/doc/source/users/resources/database/v1/database.rst b/doc/source/user/resources/database/v1/database.rst similarity index 100% rename from doc/source/users/resources/database/v1/database.rst rename to doc/source/user/resources/database/v1/database.rst diff --git a/doc/source/users/resources/database/v1/flavor.rst b/doc/source/user/resources/database/v1/flavor.rst similarity index 100% rename from doc/source/users/resources/database/v1/flavor.rst rename to doc/source/user/resources/database/v1/flavor.rst diff --git a/doc/source/users/resources/database/v1/instance.rst b/doc/source/user/resources/database/v1/instance.rst similarity index 100% rename from doc/source/users/resources/database/v1/instance.rst rename to doc/source/user/resources/database/v1/instance.rst diff --git a/doc/source/users/resources/database/v1/user.rst b/doc/source/user/resources/database/v1/user.rst similarity index 100% rename from doc/source/users/resources/database/v1/user.rst rename to doc/source/user/resources/database/v1/user.rst diff --git a/doc/source/user/resources/dns/index.rst b/doc/source/user/resources/dns/index.rst new file mode 100644 index 0000000000..576c16c9fd --- /dev/null +++ b/doc/source/user/resources/dns/index.rst @@ -0,0 +1,18 @@ +DNS Resources +============= + +.. toctree:: + :maxdepth: 1 + + v2/zone + v2/zone_transfer + v2/zone_export + v2/zone_import + v2/zone_share + v2/floating_ip + v2/tld + v2/recordset + v2/limit + v2/quota + v2/service_status + v2/blacklist diff --git a/doc/source/user/resources/dns/v2/blacklist.rst b/doc/source/user/resources/dns/v2/blacklist.rst new file mode 100644 index 0000000000..d483eb9a14 --- /dev/null +++ b/doc/source/user/resources/dns/v2/blacklist.rst @@ -0,0 +1,12 @@ +openstack.dns.v2.blacklist +========================== + +.. automodule:: openstack.dns.v2.blacklist + +The Blacklist Class +------------------- + +The ``Blacklist`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.blacklist.Blacklist + :members: diff --git a/doc/source/user/resources/dns/v2/floating_ip.rst b/doc/source/user/resources/dns/v2/floating_ip.rst new file mode 100644 index 0000000000..d616e71a92 --- /dev/null +++ b/doc/source/user/resources/dns/v2/floating_ip.rst @@ -0,0 +1,12 @@ +openstack.dns.v2.floating_ip +============================ + +.. automodule:: openstack.dns.v2.floating_ip + +The FloatingIP Class +-------------------- + +The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.floating_ip.FloatingIP + :members: diff --git a/doc/source/user/resources/dns/v2/limit.rst b/doc/source/user/resources/dns/v2/limit.rst new file mode 100644 index 0000000000..b5af05ea26 --- /dev/null +++ b/doc/source/user/resources/dns/v2/limit.rst @@ -0,0 +1,12 @@ +openstack.dns.v2.limit +====================== + +.. automodule:: openstack.dns.v2.limit + +The Limit Class +--------------- + +The ``Limit`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.limit.Limit + :members: diff --git a/doc/source/user/resources/dns/v2/quota.rst b/doc/source/user/resources/dns/v2/quota.rst new file mode 100644 index 0000000000..9ad1f63041 --- /dev/null +++ b/doc/source/user/resources/dns/v2/quota.rst @@ -0,0 +1,12 @@ +openstack.dns.v2.quota +====================== + +.. automodule:: openstack.dns.v2.quota + +The Quota Class +--------------- + +The ``Quota`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.quota.Quota + :members: diff --git a/doc/source/user/resources/dns/v2/recordset.rst b/doc/source/user/resources/dns/v2/recordset.rst new file mode 100644 index 0000000000..c02302f2d5 --- /dev/null +++ b/doc/source/user/resources/dns/v2/recordset.rst @@ -0,0 +1,12 @@ +openstack.dns.v2.recordset +========================== + +.. automodule:: openstack.dns.v2.recordset + +The Recordset Class +------------------- + +The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.recordset.Recordset + :members: diff --git a/doc/source/user/resources/dns/v2/service_status.rst b/doc/source/user/resources/dns/v2/service_status.rst new file mode 100644 index 0000000000..492f9182a3 --- /dev/null +++ b/doc/source/user/resources/dns/v2/service_status.rst @@ -0,0 +1,13 @@ +openstack.dns.v2.service_status +=============================== + +.. automodule:: openstack.dns.v2.service_status + +The ServiceStatus Class +----------------------- + +The ``ServiceStatus`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.service_status.ServiceStatus + :members: diff --git a/doc/source/user/resources/dns/v2/tld.rst b/doc/source/user/resources/dns/v2/tld.rst new file mode 100644 index 0000000000..bcafc5cc9c --- /dev/null +++ b/doc/source/user/resources/dns/v2/tld.rst @@ -0,0 +1,12 @@ +openstack.dns.v2.tld +============================== + +.. automodule:: openstack.dns.v2.tld + +The TLD Class +-------------- + +The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.tld.TLD + :members: diff --git a/doc/source/user/resources/dns/v2/zone.rst b/doc/source/user/resources/dns/v2/zone.rst new file mode 100644 index 0000000000..634bd8f3f4 --- /dev/null +++ b/doc/source/user/resources/dns/v2/zone.rst @@ -0,0 +1,12 @@ +openstack.dns.v2.zone +============================== + +.. automodule:: openstack.dns.v2.zone + +The Zone Class +-------------- + +The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.zone.Zone + :members: diff --git a/doc/source/user/resources/dns/v2/zone_export.rst b/doc/source/user/resources/dns/v2/zone_export.rst new file mode 100644 index 0000000000..2c2baa3eee --- /dev/null +++ b/doc/source/user/resources/dns/v2/zone_export.rst @@ -0,0 +1,12 @@ +openstack.dns.v2.zone_export +============================ + +.. automodule:: openstack.dns.v2.zone_export + +The ZoneExport Class +-------------------- + +The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.zone_export.ZoneExport + :members: diff --git a/doc/source/user/resources/dns/v2/zone_import.rst b/doc/source/user/resources/dns/v2/zone_import.rst new file mode 100644 index 0000000000..5836f539db --- /dev/null +++ b/doc/source/user/resources/dns/v2/zone_import.rst @@ -0,0 +1,12 @@ +openstack.dns.v2.zone_import +============================ + +.. automodule:: openstack.dns.v2.zone_import + +The ZoneImport Class +-------------------- + +The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.zone_import.ZoneImport + :members: diff --git a/doc/source/user/resources/dns/v2/zone_share.rst b/doc/source/user/resources/dns/v2/zone_share.rst new file mode 100644 index 0000000000..5b0d02836b --- /dev/null +++ b/doc/source/user/resources/dns/v2/zone_share.rst @@ -0,0 +1,12 @@ +openstack.dns.v2.zone_share +=========================== + +.. automodule:: openstack.dns.v2.zone_share + +The ZoneShare Class +------------------- + +The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.zone_share.ZoneShare + :members: diff --git a/doc/source/user/resources/dns/v2/zone_transfer.rst b/doc/source/user/resources/dns/v2/zone_transfer.rst new file mode 100644 index 0000000000..9f5c2c4c43 --- /dev/null +++ b/doc/source/user/resources/dns/v2/zone_transfer.rst @@ -0,0 +1,20 @@ +openstack.dns.v2.zone_transfer +============================== + +.. automodule:: openstack.dns.v2.zone_transfer + +The ZoneTransferRequest Class +----------------------------- + +The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.zone_transfer.ZoneTransferRequest + :members: + +The ZoneTransferAccept Class +---------------------------- + +The ``DNS`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.dns.v2.zone_transfer.ZoneTransferAccept + :members: diff --git a/doc/source/user/resources/identity/index.rst b/doc/source/user/resources/identity/index.rst new file mode 100644 index 0000000000..82d7df6319 --- /dev/null +++ b/doc/source/user/resources/identity/index.rst @@ -0,0 +1,27 @@ +Identity Resources +================== + +Identity v2 Resources +--------------------- +.. toctree:: + :maxdepth: 1 + :glob: + + v2/* + +Identity v3 Resources +--------------------- + +.. toctree:: + :maxdepth: 1 + :glob: + + v3/* + +Other Resources +--------------- + +.. toctree:: + :maxdepth: 1 + + version diff --git a/doc/source/users/resources/identity/v2/extension.rst b/doc/source/user/resources/identity/v2/extension.rst similarity index 100% rename from doc/source/users/resources/identity/v2/extension.rst rename to doc/source/user/resources/identity/v2/extension.rst diff --git a/doc/source/users/resources/identity/v2/role.rst b/doc/source/user/resources/identity/v2/role.rst similarity index 100% rename from doc/source/users/resources/identity/v2/role.rst rename to doc/source/user/resources/identity/v2/role.rst diff --git a/doc/source/users/resources/identity/v2/tenant.rst b/doc/source/user/resources/identity/v2/tenant.rst similarity index 100% rename from doc/source/users/resources/identity/v2/tenant.rst rename to doc/source/user/resources/identity/v2/tenant.rst diff --git a/doc/source/users/resources/identity/v2/user.rst b/doc/source/user/resources/identity/v2/user.rst similarity index 100% rename from doc/source/users/resources/identity/v2/user.rst rename to doc/source/user/resources/identity/v2/user.rst diff --git a/doc/source/user/resources/identity/v3/application_credential.rst b/doc/source/user/resources/identity/v3/application_credential.rst new file mode 100644 index 0000000000..ee7b1dc14b --- /dev/null +++ b/doc/source/user/resources/identity/v3/application_credential.rst @@ -0,0 +1,13 @@ +openstack.identity.v3.application_credential +============================================ + +.. automodule:: openstack.identity.v3.application_credential + +The ApplicationCredential Class +------------------------------- + +The ``ApplicationCredential`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.application_credential.ApplicationCredential + :members: diff --git a/doc/source/users/resources/identity/v3/credential.rst b/doc/source/user/resources/identity/v3/credential.rst similarity index 100% rename from doc/source/users/resources/identity/v3/credential.rst rename to doc/source/user/resources/identity/v3/credential.rst diff --git a/doc/source/users/resources/identity/v3/domain.rst b/doc/source/user/resources/identity/v3/domain.rst similarity index 100% rename from doc/source/users/resources/identity/v3/domain.rst rename to doc/source/user/resources/identity/v3/domain.rst diff --git a/doc/source/user/resources/identity/v3/domain_config.rst b/doc/source/user/resources/identity/v3/domain_config.rst new file mode 100644 index 0000000000..28defa2374 --- /dev/null +++ b/doc/source/user/resources/identity/v3/domain_config.rst @@ -0,0 +1,12 @@ +openstack.identity.v3.domain_config +=================================== + +.. automodule:: openstack.identity.v3.domain_config + +The Domain Class +---------------- + +The ``DomainConfig`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.domain_config.DomainConfig + :members: diff --git a/doc/source/users/resources/identity/v3/endpoint.rst b/doc/source/user/resources/identity/v3/endpoint.rst similarity index 100% rename from doc/source/users/resources/identity/v3/endpoint.rst rename to doc/source/user/resources/identity/v3/endpoint.rst diff --git a/doc/source/user/resources/identity/v3/federation_protocol.rst b/doc/source/user/resources/identity/v3/federation_protocol.rst new file mode 100644 index 0000000000..4cd8a97129 --- /dev/null +++ b/doc/source/user/resources/identity/v3/federation_protocol.rst @@ -0,0 +1,13 @@ +openstack.identity.v3.federation_protocol +========================================= + +.. automodule:: openstack.identity.v3.federation_protocol + +The FederationProtocol Class +---------------------------- + +The ``FederationProtocol`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.federation_protocol.FederationProtocol + :members: diff --git a/doc/source/user/resources/identity/v3/group.rst b/doc/source/user/resources/identity/v3/group.rst new file mode 100644 index 0000000000..d1215113d9 --- /dev/null +++ b/doc/source/user/resources/identity/v3/group.rst @@ -0,0 +1,21 @@ +openstack.identity.v3.group +=========================== + +.. automodule:: openstack.identity.v3.group + +The Group Class +--------------- + +The ``Group`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.group.Group + :members: + +The UserGroup Class +------------------- + +The ``UserGroup`` class inherits from +:class:`~openstack.identity.v3.group.Group` + +.. autoclass:: openstack.identity.v3.group.UserGroup + :members: diff --git a/doc/source/user/resources/identity/v3/identity_provider.rst b/doc/source/user/resources/identity/v3/identity_provider.rst new file mode 100644 index 0000000000..1b1bc7642b --- /dev/null +++ b/doc/source/user/resources/identity/v3/identity_provider.rst @@ -0,0 +1,13 @@ +openstack.identity.v3.identity_provider +======================================= + +.. automodule:: openstack.identity.v3.identity_provider + +The IdentityProvider Class +-------------------------- + +The ``IdentityProvider`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.identity_provider.IdentityProvider + :members: diff --git a/doc/source/user/resources/identity/v3/limit.rst b/doc/source/user/resources/identity/v3/limit.rst new file mode 100644 index 0000000000..f8a8174e4c --- /dev/null +++ b/doc/source/user/resources/identity/v3/limit.rst @@ -0,0 +1,12 @@ +openstack.identity.v3.limit +=========================== + +.. automodule:: openstack.identity.v3.limit + +The Limit Class +--------------- + +The ``Limit`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.limit.Limit + :members: diff --git a/doc/source/user/resources/identity/v3/mapping.rst b/doc/source/user/resources/identity/v3/mapping.rst new file mode 100644 index 0000000000..dd242f9585 --- /dev/null +++ b/doc/source/user/resources/identity/v3/mapping.rst @@ -0,0 +1,12 @@ +openstack.identity.v3.mapping +============================= + +.. automodule:: openstack.identity.v3.mapping + +The Mapping Class +----------------- + +The ``Mapping`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.mapping.Mapping + :members: diff --git a/doc/source/users/resources/identity/v3/policy.rst b/doc/source/user/resources/identity/v3/policy.rst similarity index 100% rename from doc/source/users/resources/identity/v3/policy.rst rename to doc/source/user/resources/identity/v3/policy.rst diff --git a/doc/source/users/resources/identity/v3/project.rst b/doc/source/user/resources/identity/v3/project.rst similarity index 100% rename from doc/source/users/resources/identity/v3/project.rst rename to doc/source/user/resources/identity/v3/project.rst diff --git a/doc/source/user/resources/identity/v3/region.rst b/doc/source/user/resources/identity/v3/region.rst new file mode 100644 index 0000000000..ba334a76ae --- /dev/null +++ b/doc/source/user/resources/identity/v3/region.rst @@ -0,0 +1,12 @@ +openstack.identity.v3.region +============================ + +.. automodule:: openstack.identity.v3.region + +The Region Class +---------------- + +The ``Region`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.region.Region + :members: diff --git a/doc/source/user/resources/identity/v3/registered_limit.rst b/doc/source/user/resources/identity/v3/registered_limit.rst new file mode 100644 index 0000000000..8ecdfe3f5a --- /dev/null +++ b/doc/source/user/resources/identity/v3/registered_limit.rst @@ -0,0 +1,13 @@ +openstack.identity.v3.registered_limit +====================================== + +.. automodule:: openstack.identity.v3.registered_limit + +The RegisteredLimit Class +------------------------- + +The ``RegisteredLimit`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.registered_limit.RegisteredLimit + :members: diff --git a/doc/source/user/resources/identity/v3/role.rst b/doc/source/user/resources/identity/v3/role.rst new file mode 100644 index 0000000000..baa85e87dc --- /dev/null +++ b/doc/source/user/resources/identity/v3/role.rst @@ -0,0 +1,12 @@ +openstack.identity.v3.role +========================== + +.. automodule:: openstack.identity.v3.role + +The Role Class +-------------- + +The ``Role`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.role.Role + :members: diff --git a/doc/source/user/resources/identity/v3/role_assignment.rst b/doc/source/user/resources/identity/v3/role_assignment.rst new file mode 100644 index 0000000000..abdfcdc9e8 --- /dev/null +++ b/doc/source/user/resources/identity/v3/role_assignment.rst @@ -0,0 +1,13 @@ +openstack.identity.v3.role_assignment +===================================== + +.. automodule:: openstack.identity.v3.role_assignment + +The RoleAssignment Class +------------------------ + +The ``RoleAssignment`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.role_assignment.RoleAssignment + :members: diff --git a/doc/source/user/resources/identity/v3/role_domain_group_assignment.rst b/doc/source/user/resources/identity/v3/role_domain_group_assignment.rst new file mode 100644 index 0000000000..8ef6ef9902 --- /dev/null +++ b/doc/source/user/resources/identity/v3/role_domain_group_assignment.rst @@ -0,0 +1,13 @@ +openstack.identity.v3.role_domain_group_assignment +================================================== + +.. automodule:: openstack.identity.v3.role_domain_group_assignment + +The RoleDomainGroupAssignment Class +----------------------------------- + +The ``RoleDomainGroupAssignment`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.role_domain_group_assignment.RoleDomainGroupAssignment + :members: diff --git a/doc/source/user/resources/identity/v3/role_domain_user_assignment.rst b/doc/source/user/resources/identity/v3/role_domain_user_assignment.rst new file mode 100644 index 0000000000..063a3d7ec5 --- /dev/null +++ b/doc/source/user/resources/identity/v3/role_domain_user_assignment.rst @@ -0,0 +1,13 @@ +openstack.identity.v3.role_domain_user_assignment +================================================= + +.. automodule:: openstack.identity.v3.role_domain_user_assignment + +The RoleDomainUserAssignment Class +---------------------------------- + +The ``RoleDomainUserAssignment`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.role_domain_user_assignment.RoleDomainUserAssignment + :members: diff --git a/doc/source/user/resources/identity/v3/role_project_group_assignment.rst b/doc/source/user/resources/identity/v3/role_project_group_assignment.rst new file mode 100644 index 0000000000..2824e0c5ae --- /dev/null +++ b/doc/source/user/resources/identity/v3/role_project_group_assignment.rst @@ -0,0 +1,13 @@ +openstack.identity.v3.role_project_group_assignment +=================================================== + +.. automodule:: openstack.identity.v3.role_project_group_assignment + +The RoleProjectGroupAssignment Class +------------------------------------ + +The ``RoleProjectGroupAssignment`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.role_project_group_assignment.RoleProjectGroupAssignment + :members: diff --git a/doc/source/user/resources/identity/v3/role_project_user_assignment.rst b/doc/source/user/resources/identity/v3/role_project_user_assignment.rst new file mode 100644 index 0000000000..d9f44c9631 --- /dev/null +++ b/doc/source/user/resources/identity/v3/role_project_user_assignment.rst @@ -0,0 +1,13 @@ +openstack.identity.v3.role_project_user_assignment +================================================== + +.. automodule:: openstack.identity.v3.role_project_user_assignment + +The RoleProjectUserAssignment Class +----------------------------------- + +The ``RoleProjectUserAssignment`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.role_project_user_assignment.RoleProjectUserAssignment + :members: diff --git a/doc/source/user/resources/identity/v3/role_system_group_assignment.rst b/doc/source/user/resources/identity/v3/role_system_group_assignment.rst new file mode 100644 index 0000000000..5e9771c930 --- /dev/null +++ b/doc/source/user/resources/identity/v3/role_system_group_assignment.rst @@ -0,0 +1,13 @@ +openstack.identity.v3.role_system_group_assignment +================================================== + +.. automodule:: openstack.identity.v3.role_system_group_assignment + +The RoleSystemGroupAssignment Class +----------------------------------- + +The ``RoleSystemGroupAssignment`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.role_system_group_assignment.RoleSystemGroupAssignment + :members: diff --git a/doc/source/user/resources/identity/v3/role_system_user_assignment.rst b/doc/source/user/resources/identity/v3/role_system_user_assignment.rst new file mode 100644 index 0000000000..7346d00587 --- /dev/null +++ b/doc/source/user/resources/identity/v3/role_system_user_assignment.rst @@ -0,0 +1,13 @@ +openstack.identity.v3.role_system_user_assignment +================================================= + +.. automodule:: openstack.identity.v3.role_system_user_assignment + +The RoleSystemUserAssignment Class +---------------------------------- + +The ``RoleSystemUserAssignment`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.role_system_user_assignment.RoleSystemUserAssignment + :members: diff --git a/doc/source/users/resources/identity/v3/service.rst b/doc/source/user/resources/identity/v3/service.rst similarity index 100% rename from doc/source/users/resources/identity/v3/service.rst rename to doc/source/user/resources/identity/v3/service.rst diff --git a/doc/source/user/resources/identity/v3/system.rst b/doc/source/user/resources/identity/v3/system.rst new file mode 100644 index 0000000000..dcdda5be66 --- /dev/null +++ b/doc/source/user/resources/identity/v3/system.rst @@ -0,0 +1,12 @@ +openstack.identity.v3.system +============================ + +.. automodule:: openstack.identity.v3.system + +The System Class +---------------- + +The ``System`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.system.System + :members: diff --git a/doc/source/user/resources/identity/v3/token.rst b/doc/source/user/resources/identity/v3/token.rst new file mode 100644 index 0000000000..106390c57e --- /dev/null +++ b/doc/source/user/resources/identity/v3/token.rst @@ -0,0 +1,12 @@ +openstack.identity.v3.token +=========================== + +.. automodule:: openstack.identity.v3.token + +The Token Class +--------------- + +The ``Token`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.v3.token.Token + :members: diff --git a/doc/source/users/resources/identity/v3/trust.rst b/doc/source/user/resources/identity/v3/trust.rst similarity index 100% rename from doc/source/users/resources/identity/v3/trust.rst rename to doc/source/user/resources/identity/v3/trust.rst diff --git a/doc/source/users/resources/identity/v3/user.rst b/doc/source/user/resources/identity/v3/user.rst similarity index 100% rename from doc/source/users/resources/identity/v3/user.rst rename to doc/source/user/resources/identity/v3/user.rst diff --git a/doc/source/user/resources/identity/version.rst b/doc/source/user/resources/identity/version.rst new file mode 100644 index 0000000000..1fe3bfb741 --- /dev/null +++ b/doc/source/user/resources/identity/version.rst @@ -0,0 +1,12 @@ +openstack.identity.version +========================== + +.. automodule:: openstack.identity.version + +The Version Class +----------------- + +The ``Version`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.identity.version.Version + :members: diff --git a/doc/source/user/resources/image/index.rst b/doc/source/user/resources/image/index.rst new file mode 100644 index 0000000000..c6ef3d78fc --- /dev/null +++ b/doc/source/user/resources/image/index.rst @@ -0,0 +1,26 @@ +Image Resources +=============== + +Image v1 Resources +------------------ + +.. toctree:: + :maxdepth: 1 + + v1/image + +Image v2 Resources +------------------ + +.. toctree:: + :maxdepth: 1 + + v2/image + v2/member + v2/metadef_namespace + v2/metadef_object + v2/metadef_resource_type + v2/metadef_property + v2/metadef_schema + v2/task + v2/service_info diff --git a/doc/source/users/resources/image/v1/image.rst b/doc/source/user/resources/image/v1/image.rst similarity index 100% rename from doc/source/users/resources/image/v1/image.rst rename to doc/source/user/resources/image/v1/image.rst diff --git a/doc/source/users/resources/image/v2/image.rst b/doc/source/user/resources/image/v2/image.rst similarity index 100% rename from doc/source/users/resources/image/v2/image.rst rename to doc/source/user/resources/image/v2/image.rst diff --git a/doc/source/users/resources/image/v2/member.rst b/doc/source/user/resources/image/v2/member.rst similarity index 100% rename from doc/source/users/resources/image/v2/member.rst rename to doc/source/user/resources/image/v2/member.rst diff --git a/doc/source/user/resources/image/v2/metadef_namespace.rst b/doc/source/user/resources/image/v2/metadef_namespace.rst new file mode 100644 index 0000000000..de2c0f5520 --- /dev/null +++ b/doc/source/user/resources/image/v2/metadef_namespace.rst @@ -0,0 +1,13 @@ +openstack.image.v2.metadef_namespace +===================================== + +.. automodule:: openstack.image.v2.metadef_namespace + +The MetadefNamespace Class +---------------------------- + +The ``MetadefNamespace`` class inherits +from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.image.v2.metadef_namespace.MetadefNamespace + :members: diff --git a/doc/source/user/resources/image/v2/metadef_object.rst b/doc/source/user/resources/image/v2/metadef_object.rst new file mode 100644 index 0000000000..2f9548a0c4 --- /dev/null +++ b/doc/source/user/resources/image/v2/metadef_object.rst @@ -0,0 +1,13 @@ +openstack.image.v2.metadef_object +================================== + +.. automodule:: openstack.image.v2.metadef_object + +The MetadefObject Class +------------------------ + +The ``MetadefObject`` class inherits +from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.image.v2.metadef_object.MetadefObject + :members: diff --git a/doc/source/user/resources/image/v2/metadef_property.rst b/doc/source/user/resources/image/v2/metadef_property.rst new file mode 100644 index 0000000000..e70fff6eb9 --- /dev/null +++ b/doc/source/user/resources/image/v2/metadef_property.rst @@ -0,0 +1,13 @@ +openstack.image.v2.metadef_property +=================================== + +.. automodule:: openstack.image.v2.metadef_property + +The MetadefProperty Class +------------------------- + +The ``MetadefProperty`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.image.v2.metadef_property.MetadefProperty + :members: diff --git a/doc/source/user/resources/image/v2/metadef_resource_type.rst b/doc/source/user/resources/image/v2/metadef_resource_type.rst new file mode 100644 index 0000000000..c20ba943e8 --- /dev/null +++ b/doc/source/user/resources/image/v2/metadef_resource_type.rst @@ -0,0 +1,24 @@ +openstack.image.v2.metadef_resource_type +======================================== + +.. automodule:: openstack.image.v2.metadef_resource_type + +The MetadefResourceType Class +----------------------------- + +The ``MetadefResourceType`` class inherits +from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.image.v2.metadef_resource_type.MetadefResourceType + :members: + + +The MetadefResourceTypeAssociation Class +---------------------------------------- + +The ``MetadefResourceTypeAssociation`` class inherits +from :class:`~openstack.resource.Resource`. + +.. autoclass:: + openstack.image.v2.metadef_resource_type.MetadefResourceTypeAssociation + :members: diff --git a/doc/source/user/resources/image/v2/metadef_schema.rst b/doc/source/user/resources/image/v2/metadef_schema.rst new file mode 100644 index 0000000000..8332ee48d2 --- /dev/null +++ b/doc/source/user/resources/image/v2/metadef_schema.rst @@ -0,0 +1,13 @@ +openstack.image.v2.metadef_schema +================================= + +.. automodule:: openstack.image.v2.metadef_schema + +The MetadefSchema Class +----------------------- + +The ``MetadefSchema`` class inherits +from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.image.v2.metadef_schema.MetadefSchema + :members: diff --git a/doc/source/user/resources/image/v2/service_info.rst b/doc/source/user/resources/image/v2/service_info.rst new file mode 100644 index 0000000000..92bae8988e --- /dev/null +++ b/doc/source/user/resources/image/v2/service_info.rst @@ -0,0 +1,20 @@ +openstack.image.v2.service_info +=============================== + +.. automodule:: openstack.image.v2.service_info + +The Store Class +---------------- + +The ``Store`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.image.v2.service_info.Store + :members: + +The Import Info Class +--------------------- + +The ``Import`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.image.v2.service_info.Import + :members: diff --git a/doc/source/user/resources/image/v2/task.rst b/doc/source/user/resources/image/v2/task.rst new file mode 100644 index 0000000000..3e6652e995 --- /dev/null +++ b/doc/source/user/resources/image/v2/task.rst @@ -0,0 +1,12 @@ +openstack.image.v2.task +======================= + +.. automodule:: openstack.image.v2.task + +The Task Class +-------------- + +The ``Task`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.image.v2.task.Task + :members: diff --git a/doc/source/user/resources/key_manager/index.rst b/doc/source/user/resources/key_manager/index.rst new file mode 100644 index 0000000000..4f28d3a64d --- /dev/null +++ b/doc/source/user/resources/key_manager/index.rst @@ -0,0 +1,11 @@ +KeyManager Resources +==================== + +.. toctree:: + :maxdepth: 1 + + v1/container + v1/order + v1/project_quota + v1/secret + v1/secret_store diff --git a/doc/source/users/resources/key_manager/v1/container.rst b/doc/source/user/resources/key_manager/v1/container.rst similarity index 88% rename from doc/source/users/resources/key_manager/v1/container.rst rename to doc/source/user/resources/key_manager/v1/container.rst index ef09035dc7..601e7b1811 100644 --- a/doc/source/users/resources/key_manager/v1/container.rst +++ b/doc/source/user/resources/key_manager/v1/container.rst @@ -1,5 +1,5 @@ openstack.key_manager.v1.container -===================================== +================================== .. automodule:: openstack.key_manager.v1.container diff --git a/doc/source/users/resources/key_manager/v1/order.rst b/doc/source/user/resources/key_manager/v1/order.rst similarity index 100% rename from doc/source/users/resources/key_manager/v1/order.rst rename to doc/source/user/resources/key_manager/v1/order.rst diff --git a/doc/source/user/resources/key_manager/v1/project_quota.rst b/doc/source/user/resources/key_manager/v1/project_quota.rst new file mode 100644 index 0000000000..7f1f0ad9da --- /dev/null +++ b/doc/source/user/resources/key_manager/v1/project_quota.rst @@ -0,0 +1,12 @@ +openstack.key_manager.v1.project_quota +====================================== + +.. automodule:: openstack.key_manager.v1.project_quota + +The ProjectQuota Class +---------------------- + +The ``ProjectQuota`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.key_manager.v1.project_quota.ProjectQuota + :members: diff --git a/doc/source/users/resources/key_manager/v1/secret.rst b/doc/source/user/resources/key_manager/v1/secret.rst similarity index 100% rename from doc/source/users/resources/key_manager/v1/secret.rst rename to doc/source/user/resources/key_manager/v1/secret.rst diff --git a/doc/source/user/resources/key_manager/v1/secret_store.rst b/doc/source/user/resources/key_manager/v1/secret_store.rst new file mode 100644 index 0000000000..f40902b503 --- /dev/null +++ b/doc/source/user/resources/key_manager/v1/secret_store.rst @@ -0,0 +1,12 @@ +openstack.key_manager.v1.secret_store +===================================== + +.. automodule:: openstack.key_manager.v1.secret_store + +The SecretStore Class +--------------------- + +The ``SecretStore`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.key_manager.v1.secret_store.SecretStore + :members: diff --git a/doc/source/user/resources/load_balancer/index.rst b/doc/source/user/resources/load_balancer/index.rst new file mode 100644 index 0000000000..1eebf3787e --- /dev/null +++ b/doc/source/user/resources/load_balancer/index.rst @@ -0,0 +1,20 @@ +Load Balancer Resources +======================= + +.. toctree:: + :maxdepth: 1 + + v2/load_balancer + v2/listener + v2/pool + v2/member + v2/health_monitor + v2/l7_policy + v2/l7_rule + v2/provider + v2/flavor_profile + v2/flavor + v2/quota + v2/amphora + v2/availability_zone_profile + v2/availability_zone diff --git a/doc/source/user/resources/load_balancer/v2/amphora.rst b/doc/source/user/resources/load_balancer/v2/amphora.rst new file mode 100644 index 0000000000..c89d1ee86e --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/amphora.rst @@ -0,0 +1,30 @@ +openstack.load_balancer.v2.amphora +================================== + +.. automodule:: openstack.load_balancer.v2.amphora + +The Amphora Class +----------------- + +The ``Amphora`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.amphora.Amphora + :members: + +The AmphoraConfig Class +----------------------- + +The ``AmphoraConfig`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.amphora.AmphoraConfig + :members: + +The AmphoraFailover Class +------------------------- + +The ``AmphoraFailover`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.amphora.AmphoraFailover + :members: diff --git a/doc/source/user/resources/load_balancer/v2/availability_zone.rst b/doc/source/user/resources/load_balancer/v2/availability_zone.rst new file mode 100644 index 0000000000..8f8889b50e --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/availability_zone.rst @@ -0,0 +1,13 @@ +openstack.load_balancer.v2.availability_zone +============================================ + +.. automodule:: openstack.load_balancer.v2.availability_zone + +The AvailabilityZone Class +-------------------------- + +The ``AvailabilityZone`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.availability_zone.AvailabilityZone + :members: diff --git a/doc/source/user/resources/load_balancer/v2/availability_zone_profile.rst b/doc/source/user/resources/load_balancer/v2/availability_zone_profile.rst new file mode 100644 index 0000000000..0aa2c6a3b4 --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/availability_zone_profile.rst @@ -0,0 +1,13 @@ +openstack.load_balancer.v2.availability_zone_profile +==================================================== + +.. automodule:: openstack.load_balancer.v2.availability_zone_profile + +The AvailabilityZoneProfile Class +--------------------------------- + +The ``AvailabilityZoneProfile`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile + :members: diff --git a/doc/source/user/resources/load_balancer/v2/flavor.rst b/doc/source/user/resources/load_balancer/v2/flavor.rst new file mode 100644 index 0000000000..57b97ba0bd --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/flavor.rst @@ -0,0 +1,12 @@ +openstack.load_balancer.v2.flavor +================================= + +.. automodule:: openstack.load_balancer.v2.flavor + +The Flavor Class +---------------- + +The ``Flavor`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.flavor.Flavor + :members: diff --git a/doc/source/user/resources/load_balancer/v2/flavor_profile.rst b/doc/source/user/resources/load_balancer/v2/flavor_profile.rst new file mode 100644 index 0000000000..8a702f9a92 --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/flavor_profile.rst @@ -0,0 +1,13 @@ +openstack.load_balancer.v2.flavor_profile +========================================= + +.. automodule:: openstack.load_balancer.v2.flavor_profile + +The FlavorProfile Class +----------------------- + +The ``FlavorProfile`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.flavor_profile.FlavorProfile + :members: diff --git a/doc/source/user/resources/load_balancer/v2/health_monitor.rst b/doc/source/user/resources/load_balancer/v2/health_monitor.rst new file mode 100644 index 0000000000..7f90f42612 --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/health_monitor.rst @@ -0,0 +1,13 @@ +openstack.load_balancer.v2.health_monitor +========================================= + +.. automodule:: openstack.load_balancer.v2.health_monitor + +The HealthMonitor Class +----------------------- + +The ``HealthMonitor`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.health_monitor.HealthMonitor + :members: diff --git a/doc/source/user/resources/load_balancer/v2/l7_policy.rst b/doc/source/user/resources/load_balancer/v2/l7_policy.rst new file mode 100644 index 0000000000..2a5e6f01c7 --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/l7_policy.rst @@ -0,0 +1,12 @@ +openstack.load_balancer.v2.l7_policy +==================================== + +.. automodule:: openstack.load_balancer.v2.l7_policy + +The L7Policy Class +------------------ + +The ``L7Policy`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.l7_policy.L7Policy + :members: diff --git a/doc/source/user/resources/load_balancer/v2/l7_rule.rst b/doc/source/user/resources/load_balancer/v2/l7_rule.rst new file mode 100644 index 0000000000..c661cd676e --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/l7_rule.rst @@ -0,0 +1,12 @@ +openstack.load_balancer.v2.l7_rule +================================== + +.. automodule:: openstack.load_balancer.v2.l7_rule + +The L7Rule Class +---------------- + +The ``L7Rule`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.l7_rule.L7Rule + :members: diff --git a/doc/source/user/resources/load_balancer/v2/listener.rst b/doc/source/user/resources/load_balancer/v2/listener.rst new file mode 100644 index 0000000000..b94bc73b2e --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/listener.rst @@ -0,0 +1,21 @@ +openstack.load_balancer.v2.listener +=================================== + +.. automodule:: openstack.load_balancer.v2.listener + +The Listener Class +------------------ + +The ``Listener`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.listener.Listener + :members: + +The ListenerStats Class +----------------------- + +The ``ListenerStats`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.listener.ListenerStats + :members: diff --git a/doc/source/user/resources/load_balancer/v2/load_balancer.rst b/doc/source/user/resources/load_balancer/v2/load_balancer.rst new file mode 100644 index 0000000000..9e14560299 --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/load_balancer.rst @@ -0,0 +1,30 @@ +openstack.load_balancer.v2.load_balancer +======================================== + +.. automodule:: openstack.load_balancer.v2.load_balancer + +The LoadBalancer Class +---------------------- + +The ``LoadBalancer`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.load_balancer.LoadBalancer + :members: + +The LoadBalancerStats Class +--------------------------- + +The ``LoadBalancerStats`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.load_balancer.LoadBalancerStats + :members: + +The LoadBalancerFailover Class +------------------------------ + +The ``LoadBalancerFailover`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.load_balancer.LoadBalancerFailover + :members: diff --git a/doc/source/user/resources/load_balancer/v2/member.rst b/doc/source/user/resources/load_balancer/v2/member.rst new file mode 100644 index 0000000000..1fff01f075 --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/member.rst @@ -0,0 +1,12 @@ +openstack.load_balancer.v2.member +================================= + +.. automodule:: openstack.load_balancer.v2.member + +The Member Class +---------------- + +The ``Member`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.member.Member + :members: diff --git a/doc/source/user/resources/load_balancer/v2/pool.rst b/doc/source/user/resources/load_balancer/v2/pool.rst new file mode 100644 index 0000000000..e67f97c29d --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/pool.rst @@ -0,0 +1,12 @@ +openstack.load_balancer.v2.pool +=============================== + +.. automodule:: openstack.load_balancer.v2.pool + +The Pool Class +-------------- + +The ``Pool`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.pool.Pool + :members: diff --git a/doc/source/user/resources/load_balancer/v2/provider.rst b/doc/source/user/resources/load_balancer/v2/provider.rst new file mode 100644 index 0000000000..b9c5af91a9 --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/provider.rst @@ -0,0 +1,21 @@ +openstack.load_balancer.v2.provider +=================================== + +.. automodule:: openstack.load_balancer.v2.provider + +The Provider Class +------------------ + +The ``Provider`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.provider.Provider + :members: + +The Provider Flavor Capabilities Class +-------------------------------------- + +The ``ProviderFlavorCapabilities`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.provider.ProviderFlavorCapabilities + :members: diff --git a/doc/source/user/resources/load_balancer/v2/quota.rst b/doc/source/user/resources/load_balancer/v2/quota.rst new file mode 100644 index 0000000000..1bf0335f29 --- /dev/null +++ b/doc/source/user/resources/load_balancer/v2/quota.rst @@ -0,0 +1,12 @@ +openstack.load_balancer.v2.quota +================================ + +.. automodule:: openstack.load_balancer.v2.quota + +The Quota Class +--------------- + +The ``Quota`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.load_balancer.v2.quota.Quota + :members: diff --git a/doc/source/user/resources/network/index.rst b/doc/source/user/resources/network/index.rst new file mode 100644 index 0000000000..ff063779c6 --- /dev/null +++ b/doc/source/user/resources/network/index.rst @@ -0,0 +1,9 @@ +Network Resources +================= + +.. toctree:: + :maxdepth: 1 + :glob: + + v2/* + v2/vpn/index diff --git a/doc/source/user/resources/network/v2/address_group.rst b/doc/source/user/resources/network/v2/address_group.rst new file mode 100644 index 0000000000..34360fc59d --- /dev/null +++ b/doc/source/user/resources/network/v2/address_group.rst @@ -0,0 +1,12 @@ +openstack.network.v2.address_group +================================== + +.. automodule:: openstack.network.v2.address_group + +The AddressGroup Class +---------------------- + +The ``AddressGroup`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.address_group.AddressGroup + :members: diff --git a/doc/source/users/resources/network/v2/address_scope.rst b/doc/source/user/resources/network/v2/address_scope.rst similarity index 100% rename from doc/source/users/resources/network/v2/address_scope.rst rename to doc/source/user/resources/network/v2/address_scope.rst diff --git a/doc/source/user/resources/network/v2/agent.rst b/doc/source/user/resources/network/v2/agent.rst new file mode 100644 index 0000000000..5593e6d94b --- /dev/null +++ b/doc/source/user/resources/network/v2/agent.rst @@ -0,0 +1,12 @@ +openstack.network.v2.agent +========================== + +.. automodule:: openstack.network.v2.agent + +The Agent Class +--------------- + +The ``Agent`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.agent.Agent + :members: diff --git a/doc/source/user/resources/network/v2/auto_allocated_topology.rst b/doc/source/user/resources/network/v2/auto_allocated_topology.rst new file mode 100644 index 0000000000..a3700d0875 --- /dev/null +++ b/doc/source/user/resources/network/v2/auto_allocated_topology.rst @@ -0,0 +1,13 @@ +openstack.network.v2.auto_allocated_topology +============================================ + +.. automodule:: openstack.network.v2.auto_allocated_topology + +The Auto Allocated Topology Class +--------------------------------- + +The ``Auto Allocated Toplogy`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.auto_allocated_topology.AutoAllocatedTopology + :members: diff --git a/doc/source/user/resources/network/v2/availability_zone.rst b/doc/source/user/resources/network/v2/availability_zone.rst new file mode 100644 index 0000000000..da386ce6d0 --- /dev/null +++ b/doc/source/user/resources/network/v2/availability_zone.rst @@ -0,0 +1,13 @@ +openstack.network.v2.availability_zone +====================================== + +.. automodule:: openstack.network.v2.availability_zone + +The AvailabilityZone Class +-------------------------- + +The ``AvailabilityZone`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.availability_zone.AvailabilityZone + :members: diff --git a/doc/source/user/resources/network/v2/bgp_peer.rst b/doc/source/user/resources/network/v2/bgp_peer.rst new file mode 100644 index 0000000000..35fd5a61a0 --- /dev/null +++ b/doc/source/user/resources/network/v2/bgp_peer.rst @@ -0,0 +1,12 @@ +openstack.network.v2.bgp_peer +============================= + +.. automodule:: openstack.network.v2.bgp_peer + +The BgpPeer Class +----------------- + +The ``BgpPeer`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.bgp_peer.BgpPeer + :members: diff --git a/doc/source/user/resources/network/v2/bgp_speaker.rst b/doc/source/user/resources/network/v2/bgp_speaker.rst new file mode 100644 index 0000000000..147d6b54f5 --- /dev/null +++ b/doc/source/user/resources/network/v2/bgp_speaker.rst @@ -0,0 +1,12 @@ +openstack.network.v2.bgp_speaker +================================ + +.. automodule:: openstack.network.v2.bgp_speaker + +The BgpSpeaker Class +-------------------- + +The ``BgpSpeaker`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.bgp_speaker.BgpSpeaker + :members: diff --git a/doc/source/user/resources/network/v2/bgpvpn.rst b/doc/source/user/resources/network/v2/bgpvpn.rst new file mode 100644 index 0000000000..4c5d8af035 --- /dev/null +++ b/doc/source/user/resources/network/v2/bgpvpn.rst @@ -0,0 +1,12 @@ +openstack.network.v2.bgpvpn +============================= + +.. automodule:: openstack.network.v2.bgpvpn + +The BgpVpn Class +----------------- + +The ``BgpVpn`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.bgpvpn.BgpVpn + :members: diff --git a/doc/source/user/resources/network/v2/bgpvpn_network_association.rst b/doc/source/user/resources/network/v2/bgpvpn_network_association.rst new file mode 100644 index 0000000000..9d78df4361 --- /dev/null +++ b/doc/source/user/resources/network/v2/bgpvpn_network_association.rst @@ -0,0 +1,13 @@ +openstack.network.v2.bgpvpn_network_association +=============================================== + +.. automodule:: openstack.network.v2.bgpvpn_network_association + +The BgpVpnNetworkAssociation Class +---------------------------------- + +The ``BgpVpnNetworkAssociation`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.bgpvpn_network_association.BgpVpnNetworkAssociation + :members: diff --git a/doc/source/user/resources/network/v2/bgpvpn_port_association.rst b/doc/source/user/resources/network/v2/bgpvpn_port_association.rst new file mode 100644 index 0000000000..07584c1aae --- /dev/null +++ b/doc/source/user/resources/network/v2/bgpvpn_port_association.rst @@ -0,0 +1,13 @@ +openstack.network.v2.bgpvpn_port_association +============================================ + +.. automodule:: openstack.network.v2.bgpvpn_port_association + +The BgpVpnPortAssociation Class +------------------------------- + +The ``BgpVpnPortAssociation`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.bgpvpn_port_association.BgpVpnPortAssociation + :members: diff --git a/doc/source/user/resources/network/v2/bgpvpn_router_association.rst b/doc/source/user/resources/network/v2/bgpvpn_router_association.rst new file mode 100644 index 0000000000..4f046da7e3 --- /dev/null +++ b/doc/source/user/resources/network/v2/bgpvpn_router_association.rst @@ -0,0 +1,13 @@ +openstack.network.v2.bgpvpn_router_association +============================================== + +.. automodule:: openstack.network.v2.bgpvpn_router_association + +The BgpVpnRouterAssociation Class +--------------------------------- + +The ``BgpVpnRouterAssociation`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.bgpvpn_router_association.BgpVpnRouterAssociation + :members: diff --git a/doc/source/users/resources/network/v2/extension.rst b/doc/source/user/resources/network/v2/extension.rst similarity index 100% rename from doc/source/users/resources/network/v2/extension.rst rename to doc/source/user/resources/network/v2/extension.rst diff --git a/doc/source/users/resources/network/v2/flavor.rst b/doc/source/user/resources/network/v2/flavor.rst similarity index 100% rename from doc/source/users/resources/network/v2/flavor.rst rename to doc/source/user/resources/network/v2/flavor.rst diff --git a/doc/source/users/resources/network/v2/floating_ip.rst b/doc/source/user/resources/network/v2/floating_ip.rst similarity index 100% rename from doc/source/users/resources/network/v2/floating_ip.rst rename to doc/source/user/resources/network/v2/floating_ip.rst diff --git a/doc/source/user/resources/network/v2/health_monitor.rst b/doc/source/user/resources/network/v2/health_monitor.rst new file mode 100644 index 0000000000..76cf6a7e83 --- /dev/null +++ b/doc/source/user/resources/network/v2/health_monitor.rst @@ -0,0 +1,13 @@ +openstack.network.v2.health_monitor +=================================== + +.. automodule:: openstack.network.v2.health_monitor + +The HealthMonitor Class +----------------------- + +The ``HealthMonitor`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.health_monitor.HealthMonitor + :members: diff --git a/doc/source/users/resources/network/v2/listener.rst b/doc/source/user/resources/network/v2/listener.rst similarity index 100% rename from doc/source/users/resources/network/v2/listener.rst rename to doc/source/user/resources/network/v2/listener.rst diff --git a/doc/source/users/resources/network/v2/load_balancer.rst b/doc/source/user/resources/network/v2/load_balancer.rst similarity index 100% rename from doc/source/users/resources/network/v2/load_balancer.rst rename to doc/source/user/resources/network/v2/load_balancer.rst diff --git a/doc/source/user/resources/network/v2/local_ip.rst b/doc/source/user/resources/network/v2/local_ip.rst new file mode 100644 index 0000000000..30f846ad7d --- /dev/null +++ b/doc/source/user/resources/network/v2/local_ip.rst @@ -0,0 +1,12 @@ +openstack.network.v2.local_ip +============================= + +.. automodule:: openstack.network.v2.local_ip + +The LocalIP Class +----------------- + +The ``LocalIP`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.local_ip.LocalIP + :members: diff --git a/doc/source/user/resources/network/v2/local_ip_association.rst b/doc/source/user/resources/network/v2/local_ip_association.rst new file mode 100644 index 0000000000..12b59a9f20 --- /dev/null +++ b/doc/source/user/resources/network/v2/local_ip_association.rst @@ -0,0 +1,13 @@ +openstack.network.v2.local_ip_association +========================================= + +.. automodule:: openstack.network.v2.local_ip_association + +The LocalIPAssociation Class +---------------------------- + +The ``LocalIPAssociation`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.local_ip_association.LocalIPAssociation + :members: diff --git a/doc/source/users/resources/network/v2/metering_label.rst b/doc/source/user/resources/network/v2/metering_label.rst similarity index 75% rename from doc/source/users/resources/network/v2/metering_label.rst rename to doc/source/user/resources/network/v2/metering_label.rst index dacb3801bd..d07f025121 100644 --- a/doc/source/users/resources/network/v2/metering_label.rst +++ b/doc/source/user/resources/network/v2/metering_label.rst @@ -6,7 +6,8 @@ openstack.network.v2.metering_label The MeteringLabel Class ----------------------- -The ``MeteringLabel`` class inherits from :class:`~openstack.resource.Resource`. +The ``MeteringLabel`` class inherits from +:class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.metering_label.MeteringLabel :members: diff --git a/doc/source/users/resources/network/v2/metering_label_rule.rst b/doc/source/user/resources/network/v2/metering_label_rule.rst similarity index 77% rename from doc/source/users/resources/network/v2/metering_label_rule.rst rename to doc/source/user/resources/network/v2/metering_label_rule.rst index 390884933a..9bcf7840c3 100644 --- a/doc/source/users/resources/network/v2/metering_label_rule.rst +++ b/doc/source/user/resources/network/v2/metering_label_rule.rst @@ -6,7 +6,8 @@ openstack.network.v2.metering_label_rule The MeteringLabelRule Class --------------------------- -The ``MeteringLabelRule`` class inherits from :class:`~openstack.resource.Resource`. +The ``MeteringLabelRule`` class inherits from +:class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.metering_label_rule.MeteringLabelRule :members: diff --git a/doc/source/user/resources/network/v2/ndp_proxy.rst b/doc/source/user/resources/network/v2/ndp_proxy.rst new file mode 100644 index 0000000000..c5479e79b5 --- /dev/null +++ b/doc/source/user/resources/network/v2/ndp_proxy.rst @@ -0,0 +1,12 @@ +openstack.network.v2.ndp_proxy +============================== + +.. automodule:: openstack.network.v2.ndp_proxy + +The NDPProxy Class +------------------ + +The ``NDPProxy`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.ndp_proxy.NDPProxy + :members: diff --git a/doc/source/users/resources/network/v2/network.rst b/doc/source/user/resources/network/v2/network.rst similarity index 100% rename from doc/source/users/resources/network/v2/network.rst rename to doc/source/user/resources/network/v2/network.rst diff --git a/doc/source/user/resources/network/v2/network_ip_availability.rst b/doc/source/user/resources/network/v2/network_ip_availability.rst new file mode 100644 index 0000000000..900f2461b2 --- /dev/null +++ b/doc/source/user/resources/network/v2/network_ip_availability.rst @@ -0,0 +1,13 @@ +openstack.network.v2.network_ip_availability +============================================ + +.. automodule:: openstack.network.v2.network_ip_availability + +The NetworkIPAvailability Class +------------------------------- + +The ``NetworkIPAvailability`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.network_ip_availability.NetworkIPAvailability + :members: diff --git a/doc/source/user/resources/network/v2/network_segment_range.rst b/doc/source/user/resources/network/v2/network_segment_range.rst new file mode 100644 index 0000000000..1f2c55ddf0 --- /dev/null +++ b/doc/source/user/resources/network/v2/network_segment_range.rst @@ -0,0 +1,13 @@ +openstack.network.v2.network_segment_range +========================================== + +.. automodule:: openstack.network.v2.network_segment_range + +The NetworkSegmentRange Class +----------------------------- + +The ``NetworkSegmentRange`` class inherits from :class:`~openstack.resource +.Resource`. + +.. autoclass:: openstack.network.v2.network_segment_range.NetworkSegmentRange + :members: diff --git a/doc/source/users/resources/network/v2/pool.rst b/doc/source/user/resources/network/v2/pool.rst similarity index 100% rename from doc/source/users/resources/network/v2/pool.rst rename to doc/source/user/resources/network/v2/pool.rst diff --git a/doc/source/users/resources/network/v2/pool_member.rst b/doc/source/user/resources/network/v2/pool_member.rst similarity index 100% rename from doc/source/users/resources/network/v2/pool_member.rst rename to doc/source/user/resources/network/v2/pool_member.rst diff --git a/doc/source/users/resources/network/v2/port.rst b/doc/source/user/resources/network/v2/port.rst similarity index 100% rename from doc/source/users/resources/network/v2/port.rst rename to doc/source/user/resources/network/v2/port.rst diff --git a/doc/source/users/resources/network/v2/qos_bandwidth_limit_rule.rst b/doc/source/user/resources/network/v2/qos_bandwidth_limit_rule.rst similarity index 78% rename from doc/source/users/resources/network/v2/qos_bandwidth_limit_rule.rst rename to doc/source/user/resources/network/v2/qos_bandwidth_limit_rule.rst index 115a55de5b..98d904027d 100644 --- a/doc/source/users/resources/network/v2/qos_bandwidth_limit_rule.rst +++ b/doc/source/user/resources/network/v2/qos_bandwidth_limit_rule.rst @@ -6,7 +6,8 @@ openstack.network.v2.qos_bandwidth_limit_rule The QoSBandwidthLimitRule Class ------------------------------- -The ``QoSBandwidthLimitRule`` class inherits from :class:`~openstack.resource.Resource`. +The ``QoSBandwidthLimitRule`` class inherits from +:class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule :members: diff --git a/doc/source/users/resources/network/v2/qos_dscp_marking_rule.rst b/doc/source/user/resources/network/v2/qos_dscp_marking_rule.rst similarity index 77% rename from doc/source/users/resources/network/v2/qos_dscp_marking_rule.rst rename to doc/source/user/resources/network/v2/qos_dscp_marking_rule.rst index 6d2cf9a457..c000236427 100644 --- a/doc/source/users/resources/network/v2/qos_dscp_marking_rule.rst +++ b/doc/source/user/resources/network/v2/qos_dscp_marking_rule.rst @@ -6,7 +6,8 @@ openstack.network.v2.qos_dscp_marking_rule The QoSDSCPMarkingRule Class ---------------------------- -The ``QoSDSCPMarkingRule`` class inherits from :class:`~openstack.resource.Resource`. +The ``QoSDSCPMarkingRule`` class inherits from +:class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule :members: diff --git a/doc/source/users/resources/network/v2/qos_minimum_bandwidth_rule.rst b/doc/source/user/resources/network/v2/qos_minimum_bandwidth_rule.rst similarity index 78% rename from doc/source/users/resources/network/v2/qos_minimum_bandwidth_rule.rst rename to doc/source/user/resources/network/v2/qos_minimum_bandwidth_rule.rst index 6ba004b4de..05c52768ba 100644 --- a/doc/source/users/resources/network/v2/qos_minimum_bandwidth_rule.rst +++ b/doc/source/user/resources/network/v2/qos_minimum_bandwidth_rule.rst @@ -6,7 +6,8 @@ openstack.network.v2.qos_minimum_bandwidth_rule The QoSMinimumBandwidthRule Class --------------------------------- -The ``QoSMinimumBandwidthRule`` class inherits from :class:`~openstack.resource.Resource`. +The ``QoSMinimumBandwidthRule`` class inherits from +:class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule :members: diff --git a/doc/source/user/resources/network/v2/qos_minimum_packet_rate_rule.rst b/doc/source/user/resources/network/v2/qos_minimum_packet_rate_rule.rst new file mode 100644 index 0000000000..19b7b01766 --- /dev/null +++ b/doc/source/user/resources/network/v2/qos_minimum_packet_rate_rule.rst @@ -0,0 +1,13 @@ +openstack.network.v2.qos_minimum_packet_rate_rule +================================================= + +.. automodule:: openstack.network.v2.qos_minimum_packet_rate_rule + +The QoSMinimumPacketRateRule Class +---------------------------------- + +The ``QoSMinimumPacketRateRule`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule + :members: diff --git a/doc/source/user/resources/network/v2/qos_packet_rate_limit_rule.rst b/doc/source/user/resources/network/v2/qos_packet_rate_limit_rule.rst new file mode 100644 index 0000000000..a4b527ca3a --- /dev/null +++ b/doc/source/user/resources/network/v2/qos_packet_rate_limit_rule.rst @@ -0,0 +1,13 @@ +openstack.network.v2.qos_packet_rate_limit_rule +=============================================== + +.. automodule:: openstack.network.v2.qos_packet_rate_limit_rule + +The QoSPacketRateLimitRule Class +---------------------------------- + +The ``QoSPacketRateLimitRule`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.qos_packet_rate_limit_rule.QoSPacketRateLimitRule + :members: diff --git a/doc/source/users/resources/network/v2/qos_policy.rst b/doc/source/user/resources/network/v2/qos_policy.rst similarity index 100% rename from doc/source/users/resources/network/v2/qos_policy.rst rename to doc/source/user/resources/network/v2/qos_policy.rst diff --git a/doc/source/users/resources/network/v2/qos_rule_type.rst b/doc/source/user/resources/network/v2/qos_rule_type.rst similarity index 100% rename from doc/source/users/resources/network/v2/qos_rule_type.rst rename to doc/source/user/resources/network/v2/qos_rule_type.rst diff --git a/doc/source/users/resources/network/v2/quota.rst b/doc/source/user/resources/network/v2/quota.rst similarity index 100% rename from doc/source/users/resources/network/v2/quota.rst rename to doc/source/user/resources/network/v2/quota.rst diff --git a/doc/source/users/resources/network/v2/rbac_policy.rst b/doc/source/user/resources/network/v2/rbac_policy.rst similarity index 95% rename from doc/source/users/resources/network/v2/rbac_policy.rst rename to doc/source/user/resources/network/v2/rbac_policy.rst index daf76007ad..062da1d7a4 100644 --- a/doc/source/users/resources/network/v2/rbac_policy.rst +++ b/doc/source/user/resources/network/v2/rbac_policy.rst @@ -9,4 +9,4 @@ The RBACPolicy Class The ``RBACPolicy`` class inherits from :class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.rbac_policy.RBACPolicy - :members: \ No newline at end of file + :members: diff --git a/doc/source/users/resources/network/v2/router.rst b/doc/source/user/resources/network/v2/router.rst similarity index 100% rename from doc/source/users/resources/network/v2/router.rst rename to doc/source/user/resources/network/v2/router.rst diff --git a/doc/source/users/resources/network/v2/security_group.rst b/doc/source/user/resources/network/v2/security_group.rst similarity index 75% rename from doc/source/users/resources/network/v2/security_group.rst rename to doc/source/user/resources/network/v2/security_group.rst index 5d47884365..2d0c860a9e 100644 --- a/doc/source/users/resources/network/v2/security_group.rst +++ b/doc/source/user/resources/network/v2/security_group.rst @@ -6,7 +6,8 @@ openstack.network.v2.security_group The SecurityGroup Class ----------------------- -The ``SecurityGroup`` class inherits from :class:`~openstack.resource.Resource`. +The ``SecurityGroup`` class inherits from +:class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.security_group.SecurityGroup :members: diff --git a/doc/source/users/resources/network/v2/security_group_rule.rst b/doc/source/user/resources/network/v2/security_group_rule.rst similarity index 77% rename from doc/source/users/resources/network/v2/security_group_rule.rst rename to doc/source/user/resources/network/v2/security_group_rule.rst index 2457200537..8af566ebc7 100644 --- a/doc/source/users/resources/network/v2/security_group_rule.rst +++ b/doc/source/user/resources/network/v2/security_group_rule.rst @@ -6,7 +6,8 @@ openstack.network.v2.security_group_rule The SecurityGroupRule Class --------------------------- -The ``SecurityGroupRule`` class inherits from :class:`~openstack.resource.Resource`. +The ``SecurityGroupRule`` class inherits from +:class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.security_group_rule.SecurityGroupRule :members: diff --git a/doc/source/users/resources/network/v2/segment.rst b/doc/source/user/resources/network/v2/segment.rst similarity index 100% rename from doc/source/users/resources/network/v2/segment.rst rename to doc/source/user/resources/network/v2/segment.rst diff --git a/doc/source/users/resources/network/v2/service_profile.rst b/doc/source/user/resources/network/v2/service_profile.rst similarity index 76% rename from doc/source/users/resources/network/v2/service_profile.rst rename to doc/source/user/resources/network/v2/service_profile.rst index c1c0130283..5d66c44735 100644 --- a/doc/source/users/resources/network/v2/service_profile.rst +++ b/doc/source/user/resources/network/v2/service_profile.rst @@ -6,7 +6,8 @@ openstack.network.v2.service_profile The ServiceProfile Class ------------------------ -The ``ServiceProfile`` class inherits from :class:`~openstack.resource.Resource`. +The ``ServiceProfile`` class inherits from +:class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.service_profile.ServiceProfile :members: diff --git a/doc/source/users/resources/network/v2/service_provider.rst b/doc/source/user/resources/network/v2/service_provider.rst similarity index 76% rename from doc/source/users/resources/network/v2/service_provider.rst rename to doc/source/user/resources/network/v2/service_provider.rst index e9d6678487..02f912deca 100644 --- a/doc/source/users/resources/network/v2/service_provider.rst +++ b/doc/source/user/resources/network/v2/service_provider.rst @@ -6,7 +6,8 @@ openstack.network.v2.service_provider The Service Provider Class -------------------------- -The ``Service Provider`` class inherits from :class:`~openstack.resource.Resource`. +The ``Service Provider`` class inherits from +:class:`~openstack.resource.Resource`. .. autoclass:: openstack.network.v2.service_provider.ServiceProvider :members: diff --git a/doc/source/user/resources/network/v2/sfc_flow_classifier.rst b/doc/source/user/resources/network/v2/sfc_flow_classifier.rst new file mode 100644 index 0000000000..da94f27560 --- /dev/null +++ b/doc/source/user/resources/network/v2/sfc_flow_classifier.rst @@ -0,0 +1,13 @@ +openstack.network.v2.sfc_flow_classifier +======================================== + +.. automodule:: openstack.network.v2.sfc_flow_classifier + +The SfcFlowClassifier Class +--------------------------- + +The ``SfcFlowClassifier`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier + :members: diff --git a/doc/source/user/resources/network/v2/sfc_port_chain.rst b/doc/source/user/resources/network/v2/sfc_port_chain.rst new file mode 100644 index 0000000000..3842f57099 --- /dev/null +++ b/doc/source/user/resources/network/v2/sfc_port_chain.rst @@ -0,0 +1,12 @@ +openstack.network.v2.sfc_port_chain +=================================== + +.. automodule:: openstack.network.v2.sfc_port_chain + +The SfcPortChain Class +---------------------- + +The ``SfcPortChain`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.sfc_port_chain.SfcPortChain + :members: diff --git a/doc/source/user/resources/network/v2/sfc_port_pair.rst b/doc/source/user/resources/network/v2/sfc_port_pair.rst new file mode 100644 index 0000000000..84e65000cf --- /dev/null +++ b/doc/source/user/resources/network/v2/sfc_port_pair.rst @@ -0,0 +1,12 @@ +openstack.network.v2.sfc_port_pair +================================== + +.. automodule:: openstack.network.v2.sfc_port_pair + +The SfcPortPair Class +--------------------- + +The ``SfcPortPair`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.sfc_port_pair.SfcPortPair + :members: diff --git a/doc/source/user/resources/network/v2/sfc_port_pair_group.rst b/doc/source/user/resources/network/v2/sfc_port_pair_group.rst new file mode 100644 index 0000000000..3333062bce --- /dev/null +++ b/doc/source/user/resources/network/v2/sfc_port_pair_group.rst @@ -0,0 +1,13 @@ +openstack.network.v2.sfc_port_pair_group +======================================== + +.. automodule:: openstack.network.v2.sfc_port_pair_group + +The SfcPortPairGroup Class +-------------------------- + +The ``SfcPortPairGroup`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.sfc_port_pair_group.SfcPortPairGroup + :members: diff --git a/doc/source/user/resources/network/v2/sfc_service_graph.rst b/doc/source/user/resources/network/v2/sfc_service_graph.rst new file mode 100644 index 0000000000..718b7201e4 --- /dev/null +++ b/doc/source/user/resources/network/v2/sfc_service_graph.rst @@ -0,0 +1,13 @@ +openstack.network.v2.sfc_service_graph +====================================== + +.. automodule:: openstack.network.v2.sfc_service_graph + +The SfcServiceGraph Class +------------------------- + +The ``SfcServiceGraph`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.sfc_service_graph.SfcServiceGraph + :members: diff --git a/doc/source/users/resources/network/v2/subnet.rst b/doc/source/user/resources/network/v2/subnet.rst similarity index 100% rename from doc/source/users/resources/network/v2/subnet.rst rename to doc/source/user/resources/network/v2/subnet.rst diff --git a/doc/source/users/resources/network/v2/subnet_pool.rst b/doc/source/user/resources/network/v2/subnet_pool.rst similarity index 100% rename from doc/source/users/resources/network/v2/subnet_pool.rst rename to doc/source/user/resources/network/v2/subnet_pool.rst diff --git a/doc/source/user/resources/network/v2/tap_flow.rst b/doc/source/user/resources/network/v2/tap_flow.rst new file mode 100644 index 0000000000..21cbce3aa2 --- /dev/null +++ b/doc/source/user/resources/network/v2/tap_flow.rst @@ -0,0 +1,12 @@ +openstack.network.v2.tap_flow +============================= + +.. automodule:: openstack.network.v2.tap_flow + +The TapFlow Class +----------------- + +The ``TapFlow`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.tap_flow.TapFlow + :members: diff --git a/doc/source/user/resources/network/v2/tap_mirror.rst b/doc/source/user/resources/network/v2/tap_mirror.rst new file mode 100644 index 0000000000..8697b1973f --- /dev/null +++ b/doc/source/user/resources/network/v2/tap_mirror.rst @@ -0,0 +1,12 @@ +openstack.network.v2.tap_mirror +=============================== + +.. automodule:: openstack.network.v2.tap_mirror + +The TapMirror Class +------------------- + +The ``TapMirror`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.tap_mirror.TapMirror + :members: diff --git a/doc/source/user/resources/network/v2/tap_service.rst b/doc/source/user/resources/network/v2/tap_service.rst new file mode 100644 index 0000000000..74b51c8028 --- /dev/null +++ b/doc/source/user/resources/network/v2/tap_service.rst @@ -0,0 +1,12 @@ +openstack.network.v2.tap_service +================================ + +.. automodule:: openstack.network.v2.tap_service + +The TapService Class +-------------------- + +The ``TapService`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.tap_service.TapService + :members: diff --git a/doc/source/user/resources/network/v2/vpn/endpoint_group.rst b/doc/source/user/resources/network/v2/vpn/endpoint_group.rst new file mode 100644 index 0000000000..3e8478cc3c --- /dev/null +++ b/doc/source/user/resources/network/v2/vpn/endpoint_group.rst @@ -0,0 +1,13 @@ +openstack.network.v2.vpn_endpoint_group +======================================= + +.. automodule:: openstack.network.v2.vpn_endpoint_group + +The VpnEndpointGroup Class +-------------------------- + +The ``VpnEndpointGroup`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup + :members: diff --git a/doc/source/user/resources/network/v2/vpn/ike_policy.rst b/doc/source/user/resources/network/v2/vpn/ike_policy.rst new file mode 100644 index 0000000000..81bc6b0dd4 --- /dev/null +++ b/doc/source/user/resources/network/v2/vpn/ike_policy.rst @@ -0,0 +1,13 @@ +openstack.network.v2.vpn_ike_policy +=================================== + +.. automodule:: openstack.network.v2.vpn_ike_policy + +The VpnIkePolicy Class +---------------------- + +The ``VpnIkePolicy`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.vpn_ike_policy.VpnIkePolicy + :members: diff --git a/doc/source/user/resources/network/v2/vpn/index.rst b/doc/source/user/resources/network/v2/vpn/index.rst new file mode 100644 index 0000000000..fee6fe9316 --- /dev/null +++ b/doc/source/user/resources/network/v2/vpn/index.rst @@ -0,0 +1,8 @@ +VPNaaS Resources +================ + +.. toctree:: + :maxdepth: 1 + :glob: + + * diff --git a/doc/source/user/resources/network/v2/vpn/ipsec_policy.rst b/doc/source/user/resources/network/v2/vpn/ipsec_policy.rst new file mode 100644 index 0000000000..5a5d759307 --- /dev/null +++ b/doc/source/user/resources/network/v2/vpn/ipsec_policy.rst @@ -0,0 +1,13 @@ +openstack.network.v2.vpn_ipsec_policy +===================================== + +.. automodule:: openstack.network.v2.vpn_ipsec_policy + +The VpnIpsecPolicy Class +------------------------ + +The ``VpnIpsecPolicy`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy + :members: diff --git a/doc/source/user/resources/network/v2/vpn/ipsec_site_connection.rst b/doc/source/user/resources/network/v2/vpn/ipsec_site_connection.rst new file mode 100644 index 0000000000..f90e000b42 --- /dev/null +++ b/doc/source/user/resources/network/v2/vpn/ipsec_site_connection.rst @@ -0,0 +1,13 @@ +openstack.network.v2.vpn_ipsec_site_connection +============================================== + +.. automodule:: openstack.network.v2.vpn_ipsec_site_connection + +The VpnIPSecSiteConnection Class +-------------------------------- + +The ``VpnIPSecSiteConnection`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection + :members: diff --git a/doc/source/user/resources/network/v2/vpn/service.rst b/doc/source/user/resources/network/v2/vpn/service.rst new file mode 100644 index 0000000000..0a185ba636 --- /dev/null +++ b/doc/source/user/resources/network/v2/vpn/service.rst @@ -0,0 +1,13 @@ +openstack.network.v2.vpn_service +================================ + +.. automodule:: openstack.network.v2.vpn_service + +The VpnService Class +-------------------- + +The ``VpnService`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.network.v2.vpn_service.VpnService + :members: diff --git a/doc/source/users/resources/object_store/index.rst b/doc/source/user/resources/object_store/index.rst similarity index 100% rename from doc/source/users/resources/object_store/index.rst rename to doc/source/user/resources/object_store/index.rst diff --git a/doc/source/users/resources/object_store/v1/account.rst b/doc/source/user/resources/object_store/v1/account.rst similarity index 100% rename from doc/source/users/resources/object_store/v1/account.rst rename to doc/source/user/resources/object_store/v1/account.rst diff --git a/doc/source/users/resources/object_store/v1/container.rst b/doc/source/user/resources/object_store/v1/container.rst similarity index 100% rename from doc/source/users/resources/object_store/v1/container.rst rename to doc/source/user/resources/object_store/v1/container.rst diff --git a/doc/source/users/resources/object_store/v1/obj.rst b/doc/source/user/resources/object_store/v1/obj.rst similarity index 100% rename from doc/source/users/resources/object_store/v1/obj.rst rename to doc/source/user/resources/object_store/v1/obj.rst diff --git a/doc/source/user/resources/orchestration/index.rst b/doc/source/user/resources/orchestration/index.rst new file mode 100644 index 0000000000..4323a99ac9 --- /dev/null +++ b/doc/source/user/resources/orchestration/index.rst @@ -0,0 +1,15 @@ +Orchestration Resources +======================= + +.. toctree:: + :maxdepth: 1 + + v1/resource + v1/software_config + v1/software_deployment + v1/stack + v1/stack_environment + v1/stack_event + v1/stack_files + v1/stack_template + v1/template diff --git a/doc/source/users/resources/orchestration/v1/resource.rst b/doc/source/user/resources/orchestration/v1/resource.rst similarity index 100% rename from doc/source/users/resources/orchestration/v1/resource.rst rename to doc/source/user/resources/orchestration/v1/resource.rst diff --git a/doc/source/user/resources/orchestration/v1/software_config.rst b/doc/source/user/resources/orchestration/v1/software_config.rst new file mode 100644 index 0000000000..b38adcc369 --- /dev/null +++ b/doc/source/user/resources/orchestration/v1/software_config.rst @@ -0,0 +1,13 @@ +openstack.orchestration.v1.software_config +========================================== + +.. automodule:: openstack.orchestration.v1.software_config + +The SoftwareConfig Class +------------------------ + +The ``SoftwareConfig`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.orchestration.v1.software_config.SoftwareConfig + :members: diff --git a/doc/source/user/resources/orchestration/v1/software_deployment.rst b/doc/source/user/resources/orchestration/v1/software_deployment.rst new file mode 100644 index 0000000000..d39bd89a32 --- /dev/null +++ b/doc/source/user/resources/orchestration/v1/software_deployment.rst @@ -0,0 +1,13 @@ +openstack.orchestration.v1.software_deployment +============================================== + +.. automodule:: openstack.orchestration.v1.software_deployment + +The SoftwareDeployment Class +---------------------------- + +The ``SoftwareDeployment`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.orchestration.v1.software_deployment.SoftwareDeployment + :members: diff --git a/doc/source/users/resources/orchestration/v1/stack.rst b/doc/source/user/resources/orchestration/v1/stack.rst similarity index 100% rename from doc/source/users/resources/orchestration/v1/stack.rst rename to doc/source/user/resources/orchestration/v1/stack.rst diff --git a/doc/source/user/resources/orchestration/v1/stack_environment.rst b/doc/source/user/resources/orchestration/v1/stack_environment.rst new file mode 100644 index 0000000000..a159e04011 --- /dev/null +++ b/doc/source/user/resources/orchestration/v1/stack_environment.rst @@ -0,0 +1,13 @@ +openstack.orchestration.v1.stack_environment +============================================ + +.. automodule:: openstack.orchestration.v1.stack_environment + +The StackEnvironment Class +-------------------------- + +The ``StackEnvironment`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.orchestration.v1.stack_environment.StackEnvironment + :members: diff --git a/doc/source/user/resources/orchestration/v1/stack_event.rst b/doc/source/user/resources/orchestration/v1/stack_event.rst new file mode 100644 index 0000000000..8838f44f4e --- /dev/null +++ b/doc/source/user/resources/orchestration/v1/stack_event.rst @@ -0,0 +1,12 @@ +openstack.orchestration.v1.stack_event +====================================== + +.. automodule:: openstack.orchestration.v1.stack_event + +The StackEvent Class +-------------------- + +The ``StackEvent`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.orchestration.v1.stack_event.StackEvent + :members: diff --git a/doc/source/user/resources/orchestration/v1/stack_files.rst b/doc/source/user/resources/orchestration/v1/stack_files.rst new file mode 100644 index 0000000000..2144747e4f --- /dev/null +++ b/doc/source/user/resources/orchestration/v1/stack_files.rst @@ -0,0 +1,13 @@ +openstack.orchestration.v1.stack_files +====================================== + +.. automodule:: openstack.orchestration.v1.stack_files + +The StackFiles Class +-------------------- + +The ``StackFiles`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.orchestration.v1.stack_files.StackFiles + :members: diff --git a/doc/source/user/resources/orchestration/v1/stack_template.rst b/doc/source/user/resources/orchestration/v1/stack_template.rst new file mode 100644 index 0000000000..ac2af5c9b2 --- /dev/null +++ b/doc/source/user/resources/orchestration/v1/stack_template.rst @@ -0,0 +1,13 @@ +openstack.orchestration.v1.stack_template +========================================= + +.. automodule:: openstack.orchestration.v1.stack_template + +The StackTemplate Class +----------------------- + +The ``StackTemplate`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.orchestration.v1.stack_template.StackTemplate + :members: diff --git a/doc/source/user/resources/orchestration/v1/template.rst b/doc/source/user/resources/orchestration/v1/template.rst new file mode 100644 index 0000000000..ae9f2b453d --- /dev/null +++ b/doc/source/user/resources/orchestration/v1/template.rst @@ -0,0 +1,13 @@ +openstack.orchestration.v1.template +=================================== + +.. automodule:: openstack.orchestration.v1.template + +The Template Class +------------------ + +The ``Template`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.orchestration.v1.template.Template + :members: diff --git a/doc/source/user/resources/placement/index.rst b/doc/source/user/resources/placement/index.rst new file mode 100644 index 0000000000..b57b797404 --- /dev/null +++ b/doc/source/user/resources/placement/index.rst @@ -0,0 +1,10 @@ +Placement v1 Resources +====================== + +.. toctree:: + :maxdepth: 1 + + v1/resource_class + v1/resource_provider + v1/resource_provider_inventory + v1/trait diff --git a/doc/source/user/resources/placement/v1/resource_class.rst b/doc/source/user/resources/placement/v1/resource_class.rst new file mode 100644 index 0000000000..2ef5817b9a --- /dev/null +++ b/doc/source/user/resources/placement/v1/resource_class.rst @@ -0,0 +1,13 @@ +openstack.placement.v1.resource_class +===================================== + +.. automodule:: openstack.placement.v1.resource_class + +The ResourceClass Class +----------------------- + +The ``ResourceClass`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.placement.v1.resource_class.ResourceClass + :members: diff --git a/doc/source/user/resources/placement/v1/resource_provider.rst b/doc/source/user/resources/placement/v1/resource_provider.rst new file mode 100644 index 0000000000..8ab028b4c6 --- /dev/null +++ b/doc/source/user/resources/placement/v1/resource_provider.rst @@ -0,0 +1,13 @@ +openstack.placement.v1.resource_provider +======================================== + +.. automodule:: openstack.placement.v1.resource_provider + +The ResourceProvider Class +-------------------------- + +The ``ResourceProvider`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.placement.v1.resource_provider.ResourceProvider + :members: diff --git a/doc/source/user/resources/placement/v1/resource_provider_inventory.rst b/doc/source/user/resources/placement/v1/resource_provider_inventory.rst new file mode 100644 index 0000000000..1f3491a19f --- /dev/null +++ b/doc/source/user/resources/placement/v1/resource_provider_inventory.rst @@ -0,0 +1,13 @@ +openstack.placement.v1.resource_provider_inventory +================================================== + +.. automodule:: openstack.placement.v1.resource_provider_inventory + +The ResourceProviderInventory Class +----------------------------------- + +The ``ResourceProviderInventory`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory + :members: diff --git a/doc/source/user/resources/placement/v1/trait.rst b/doc/source/user/resources/placement/v1/trait.rst new file mode 100644 index 0000000000..e9c70a887f --- /dev/null +++ b/doc/source/user/resources/placement/v1/trait.rst @@ -0,0 +1,12 @@ +openstack.placement.v1.trait +============================ + +.. automodule:: openstack.placement.v1.trait + +The Trait Class +--------------- + +The ``Trait`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.placement.v1.trait.Trait + :members: diff --git a/doc/source/user/resources/shared_file_system/index.rst b/doc/source/user/resources/shared_file_system/index.rst new file mode 100644 index 0000000000..d34b4a0dd7 --- /dev/null +++ b/doc/source/user/resources/shared_file_system/index.rst @@ -0,0 +1,21 @@ +Shared File System service resources +==================================== + +.. toctree:: + :maxdepth: 1 + + v2/availability_zone + v2/storage_pool + v2/limit + v2/share + v2/share_instance + v2/share_network_subnet + v2/share_snapshot + v2/share_snapshot_instance + v2/share_network + v2/user_message + v2/share_group + v2/share_access_rule + v2/share_group_snapshot + v2/resource_locks + v2/quota_class_set diff --git a/doc/source/user/resources/shared_file_system/v2/availability_zone.rst b/doc/source/user/resources/shared_file_system/v2/availability_zone.rst new file mode 100644 index 0000000000..9a518908c3 --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/availability_zone.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.availability_zone +================================================= + +.. automodule:: openstack.shared_file_system.v2.availability_zone + +The AvailabilityZone Class +-------------------------- + +The ``AvailabilityZone`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.availability_zone.AvailabilityZone + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/limit.rst b/doc/source/user/resources/shared_file_system/v2/limit.rst new file mode 100644 index 0000000000..33342c125d --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/limit.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.limit +===================================== + +.. automodule:: openstack.shared_file_system.v2.limit + +The Limit Class +--------------- + +The ``Limit`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.limit.Limit + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/quota_class_set.rst b/doc/source/user/resources/shared_file_system/v2/quota_class_set.rst new file mode 100644 index 0000000000..4ad5311f82 --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/quota_class_set.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.quota_class_set +=============================================== + +.. automodule:: openstack.shared_file_system.v2.quota_class_set + +The QuotaClassSet Class +----------------------- + +The ``QuotaClassSet`` class inherits from +:class:`~openstack.resource.Resource` and can be used to query quota class + +.. autoclass:: openstack.shared_file_system.v2.quota_class_set.QuotaClassSet + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/resource_locks.rst b/doc/source/user/resources/shared_file_system/v2/resource_locks.rst new file mode 100644 index 0000000000..6040bfa5a1 --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/resource_locks.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.resource_locks +============================================== + +.. automodule:: openstack.shared_file_system.v2.resource_locks + +The Resource Locks Class +------------------------ + +The ``ResourceLock`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.resource_locks.ResourceLock + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/share.rst b/doc/source/user/resources/shared_file_system/v2/share.rst new file mode 100644 index 0000000000..bac5e96025 --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/share.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.share +===================================== + +.. automodule:: openstack.shared_file_system.v2.share + +The Share Class +--------------- + +The ``Share`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.share.Share + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/share_access_rule.rst b/doc/source/user/resources/shared_file_system/v2/share_access_rule.rst new file mode 100644 index 0000000000..eec6b43c0b --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/share_access_rule.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.share_access_rule +================================================= + +.. automodule:: openstack.shared_file_system.v2.share_access_rule + +The ShareAccessRule Class +------------------------- + +The ``ShareAccessRule`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.share_access_rule.ShareAccessRule + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/share_group.rst b/doc/source/user/resources/shared_file_system/v2/share_group.rst new file mode 100644 index 0000000000..232202b56b --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/share_group.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.share_group +=========================================== + +.. automodule:: openstack.shared_file_system.v2.share_group + +The ShareGroup Class +-------------------- + +The ``ShareGroup`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.share_group.ShareGroup + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/share_group_snapshot.rst b/doc/source/user/resources/shared_file_system/v2/share_group_snapshot.rst new file mode 100644 index 0000000000..40972ad938 --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/share_group_snapshot.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.share_group_snapshot +==================================================== + +.. automodule:: openstack.shared_file_system.v2.share_group_snapshot + +The ShareGroupSnapshot Class +---------------------------- + +The ``ShareGroupSnapshot`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.share_group_snapshot.ShareGroupSnapshot + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/share_instance.rst b/doc/source/user/resources/shared_file_system/v2/share_instance.rst new file mode 100644 index 0000000000..0b058335bd --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/share_instance.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.share_instance +============================================== + +.. automodule:: openstack.shared_file_system.v2.share_instance + +The ShareInstance Class +----------------------- + +The ``ShareInstance`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.share_instance.ShareInstance + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/share_network.rst b/doc/source/user/resources/shared_file_system/v2/share_network.rst new file mode 100644 index 0000000000..793265d873 --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/share_network.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.share_network +============================================= + +.. automodule:: openstack.shared_file_system.v2.share_network + +The ShareNetwork Class +---------------------- + +The ``ShareNetwork`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.share_network.ShareNetwork + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/share_network_subnet.rst b/doc/source/user/resources/shared_file_system/v2/share_network_subnet.rst new file mode 100644 index 0000000000..95638a60b4 --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/share_network_subnet.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.share_network_subnet +==================================================== + +.. automodule:: openstack.shared_file_system.v2.share_network_subnet + +The ShareNetworkSubnet Class +---------------------------- + +The ``ShareNetworkSubnet`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.share_network_subnet.ShareNetworkSubnet + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/share_snapshot.rst b/doc/source/user/resources/shared_file_system/v2/share_snapshot.rst new file mode 100644 index 0000000000..4063c48739 --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/share_snapshot.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.share_snapshot +============================================== + +.. automodule:: openstack.shared_file_system.v2.share_snapshot + +The ShareSnapshot Class +----------------------- + +The ``ShareSnapshot`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.share_snapshot.ShareSnapshot + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/share_snapshot_instance.rst b/doc/source/user/resources/shared_file_system/v2/share_snapshot_instance.rst new file mode 100644 index 0000000000..9184599901 --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/share_snapshot_instance.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.share_snapshot_instance +======================================================= + +.. automodule:: openstack.shared_file_system.v2.share_snapshot_instance + +The ShareSnapshotInstance Class +------------------------------- + +The ``ShareSnapshotInstance`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.share_snapshot_instance.ShareSnapshotInstance + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/storage_pool.rst b/doc/source/user/resources/shared_file_system/v2/storage_pool.rst new file mode 100644 index 0000000000..86649b83a7 --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/storage_pool.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.storage_pool +============================================ + +.. automodule:: openstack.shared_file_system.v2.storage_pool + +The StoragePool Class +--------------------- + +The ``StoragePool`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.storage_pool.StoragePool + :members: diff --git a/doc/source/user/resources/shared_file_system/v2/user_message.rst b/doc/source/user/resources/shared_file_system/v2/user_message.rst new file mode 100644 index 0000000000..de3b21f66c --- /dev/null +++ b/doc/source/user/resources/shared_file_system/v2/user_message.rst @@ -0,0 +1,13 @@ +openstack.shared_file_system.v2.user_message +============================================ + +.. automodule:: openstack.shared_file_system.v2.user_message + +The UserMessage Class +--------------------- + +The ``UserMessage`` class inherits from +:class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.shared_file_system.v2.user_message.UserMessage + :members: diff --git a/doc/source/user/resources/workflow/index.rst b/doc/source/user/resources/workflow/index.rst new file mode 100644 index 0000000000..8d66f71fe7 --- /dev/null +++ b/doc/source/user/resources/workflow/index.rst @@ -0,0 +1,9 @@ +Workflow Resources +================== + +.. toctree:: + :maxdepth: 1 + + v2/execution + v2/workflow + v2/crontrigger diff --git a/doc/source/user/resources/workflow/v2/crontrigger.rst b/doc/source/user/resources/workflow/v2/crontrigger.rst new file mode 100644 index 0000000000..828af768e3 --- /dev/null +++ b/doc/source/user/resources/workflow/v2/crontrigger.rst @@ -0,0 +1,12 @@ +openstack.workflow.v2.cron_trigger +================================== + +.. automodule:: openstack.workflow.v2.cron_trigger + +The CronTrigger Class +--------------------- + +The ``CronTrigger`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.workflow.v2.cron_trigger.CronTrigger + :members: diff --git a/doc/source/user/resources/workflow/v2/execution.rst b/doc/source/user/resources/workflow/v2/execution.rst new file mode 100644 index 0000000000..62ec283a12 --- /dev/null +++ b/doc/source/user/resources/workflow/v2/execution.rst @@ -0,0 +1,12 @@ +openstack.workflow.v2.execution +=============================== + +.. automodule:: openstack.workflow.v2.execution + +The Execution Class +------------------- + +The ``Execution`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.workflow.v2.execution.Execution + :members: diff --git a/doc/source/user/resources/workflow/v2/workflow.rst b/doc/source/user/resources/workflow/v2/workflow.rst new file mode 100644 index 0000000000..8f8950e52b --- /dev/null +++ b/doc/source/user/resources/workflow/v2/workflow.rst @@ -0,0 +1,12 @@ +openstack.workflow.v2.workflow +============================== + +.. automodule:: openstack.workflow.v2.workflow + +The Workflow Class +------------------ + +The ``Workflow`` class inherits from :class:`~openstack.resource.Resource`. + +.. autoclass:: openstack.workflow.v2.workflow.Workflow + :members: diff --git a/doc/source/user/service_description.rst b/doc/source/user/service_description.rst new file mode 100644 index 0000000000..3eeeb4a7ae --- /dev/null +++ b/doc/source/user/service_description.rst @@ -0,0 +1,10 @@ +ServiceDescription +================== +.. automodule:: openstack.service_description + + +ServiceDescription object +------------------------- + +.. autoclass:: openstack.service_description.ServiceDescription + :members: diff --git a/doc/source/user/testing/fakes.rst b/doc/source/user/testing/fakes.rst new file mode 100644 index 0000000000..ec238ef227 --- /dev/null +++ b/doc/source/user/testing/fakes.rst @@ -0,0 +1,5 @@ +Fakes +===== + +.. automodule:: openstack.test.fakes + :members: diff --git a/doc/source/user/testing/index.rst b/doc/source/user/testing/index.rst new file mode 100644 index 0000000000..ef602a9a60 --- /dev/null +++ b/doc/source/user/testing/index.rst @@ -0,0 +1,8 @@ +======================================== +Testing applications using OpenStack SDK +======================================== + +.. toctree:: + :maxdepth: 1 + + fakes diff --git a/doc/source/user/transition_from_profile.rst b/doc/source/user/transition_from_profile.rst new file mode 100644 index 0000000000..a6edc6a5e6 --- /dev/null +++ b/doc/source/user/transition_from_profile.rst @@ -0,0 +1,186 @@ +Transition from Profile +======================= + +.. note:: This section describes migrating code from a previous interface of + openstacksdk and can be ignored by people writing new code. + +If you have code that currently uses the :class:`~openstack.profile.Profile` +object and/or an ``authenticator`` instance from an object based on +``openstack.auth.base.BaseAuthPlugin``, that code should be updated to use the +:class:`~openstack.config.cloud_region.CloudRegion` object instead. + +.. important:: + + :class:`~openstack.profile.Profile` is going away. Existing code using it + should be migrated as soon as possible. + +Writing Code that Works with Both +--------------------------------- + +These examples should all work with both the old and new interface, with one +caveat. With the old interface, the ``CloudConfig`` object comes from the +``os-client-config`` library, and in the new interface that has been moved +into the SDK. In order to write code that works with both the old and new +interfaces, use the following code to import the config namespace: + +.. code-block:: python + + try: + from openstack import config as occ + except ImportError: + from os_client_config import config as occ + +The examples will assume that the config module has been imported in that +manner. + +.. note:: Yes, there is an easier and less verbose way to do all of these. + These are verbose to handle both the old and new interfaces in the + same codebase. + +Replacing authenticator +----------------------- + +There is no direct replacement for ``openstack.auth.base.BaseAuthPlugin``. +``openstacksdk`` uses the `keystoneauth`_ library for authentication +and HTTP interactions. `keystoneauth`_ has `auth plugins`_ that can be used +to control how authentication is done. The ``auth_type`` config parameter +can be set to choose the correct authentication method to be used. + +Replacing Profile +----------------- + +The right way to replace the use of ``openstack.profile.Profile`` depends +a bit on what you're trying to accomplish. Common patterns are listed below, +but in general the approach is either to pass a cloud name to the +`openstack.connection.Connection` constructor, or to construct a +`openstack.config.cloud_region.CloudRegion` object and pass it to the +constructor. + +All of the examples on this page assume that you want to support old and +new interfaces simultaneously. There are easier and less verbose versions +of each that are available if you can just make a clean transition. + +Getting a Connection to a named cloud from clouds.yaml +------------------------------------------------------ + +If you want is to construct a `openstack.connection.Connection` based on +parameters configured in a ``clouds.yaml`` file, or from environment variables: + +.. code-block:: python + + import openstack.connection + + conn = connection.from_config(cloud_name='name-of-cloud-you-want') + +Getting a Connection from python arguments avoiding clouds.yaml +--------------------------------------------------------------- + +If, on the other hand, you want to construct a +`openstack.connection.Connection`, but are in a context where reading config +from a clouds.yaml file is undesirable, such as inside of a Service: + +* create a `openstack.config.loader.OpenStackConfig` object, telling + it to not load yaml files. Optionally pass an ``app_name`` and + ``app_version`` which will be added to user-agent strings. +* get a `openstack.config.cloud_region.CloudRegion` object from it +* get a `openstack.connection.Connection` + +.. code-block:: python + + try: + from openstack import config as occ + except ImportError: + from os_client_config import config as occ + from openstack import connection + + loader = occ.OpenStackConfig( + load_yaml_files=False, + app_name='spectacular-app', + app_version='1.0') + cloud_region = loader.get_one_cloud( + region_name='my-awesome-region', + auth_type='password', + auth=dict( + auth_url='https://auth.example.com', + username='amazing-user', + user_domain_name='example-domain', + project_name='astounding-project', + user_project_name='example-domain', + password='super-secret-password', + )) + conn = connection.from_config(cloud_config=cloud_region) + +.. note:: app_name and app_version are completely optional, and auth_type + defaults to 'password'. They are shown here for clarity as to + where they should go if they want to be set. + +Getting a Connection from python arguments and optionally clouds.yaml +--------------------------------------------------------------------- + +If you want to make a connection from python arguments and want to allow +one of them to optionally be ``cloud`` to allow selection of a named cloud, +it's essentially the same as the previous example, except without +``load_yaml_files=False``. + +.. code-block:: python + + try: + from openstack import config as occ + except ImportError: + from os_client_config import config as occ + from openstack import connection + + loader = occ.OpenStackConfig( + app_name='spectacular-app', + app_version='1.0') + cloud_region = loader.get_one_cloud( + region_name='my-awesome-region', + auth_type='password', + auth=dict( + auth_url='https://auth.example.com', + username='amazing-user', + user_domain_name='example-domain', + project_name='astounding-project', + user_project_name='example-domain', + password='super-secret-password', + )) + conn = connection.from_config(cloud_config=cloud_region) + +Parameters to get_one_cloud +--------------------------- + +The most important things to note are: + +* ``auth_type`` specifies which kind of authentication plugin to use. It + controls how authentication is done, as well as what parameters are required. +* ``auth`` is a dictionary containing the parameters needed by the auth plugin. + The most common information it needs are user, project, domain, auth_url + and password. +* The rest of the keyword arguments to + ``openstack.config.loader.OpenStackConfig.get_one_cloud`` are either + parameters needed by the `keystoneauth Session`_ object, which control how + HTTP connections are made, or parameters needed by the + `keystoneauth Adapter`_ object, which control how services are found in the + Keystone Catalog. + +For `keystoneauth Adapter`_ parameters, since there is one +`openstack.connection.Connection` object but many services, per-service +parameters are formed by using the official ``service_type`` of the service +in question. For instance, to override the endpoint for the ``compute`` +service, the parameter ``compute_endpoint_override`` would be used. + +``region_name`` in ``openstack.profile.Profile`` was a per-service parameter. +This is no longer a valid concept. An `openstack.connection.Connection` is a +connection to a region of a cloud. If you are in an extreme situation where +you have one service in one region and a different service in a different +region, you must use two different `openstack.connection.Connection` objects. + +.. note:: service_type, although a parameter for keystoneauth1.adapter.Adapter, + is not a valid parameter for get_one_cloud. service_type is the key + by which services are referred, so saying + 'compute_service_type="henry"' doesn't have any meaning. + +.. _keystoneauth: https://docs.openstack.org/keystoneauth/latest/ +.. _auth plugins: https://docs.openstack.org/keystoneauth/latest/authentication-plugins.html +.. _keystoneauth Adapter: https://docs.openstack.org/keystoneauth/latest/api/keystoneauth1.html#keystoneauth1.adapter.Adapter +.. _keystoneauth Session: https://docs.openstack.org/keystoneauth/latest/api/keystoneauth1.html#keystoneauth1.session.Session diff --git a/doc/source/user/utils.rst b/doc/source/user/utils.rst new file mode 100644 index 0000000000..5c1f39de99 --- /dev/null +++ b/doc/source/user/utils.rst @@ -0,0 +1,3 @@ +Utilities +========= +.. automodule:: openstack.utils diff --git a/doc/source/user/warnings.rst b/doc/source/user/warnings.rst new file mode 100644 index 0000000000..34d1893fe1 --- /dev/null +++ b/doc/source/user/warnings.rst @@ -0,0 +1,20 @@ +Warnings +======== + +openstacksdk uses the `warnings`__ infrastructure to warn users about +deprecated resources and resource fields, as well as deprecated behavior in +openstacksdk itself. These warnings are derived from ``Warning`` or +``DeprecationWarning``. In Python, warnings are emitted by default while +deprecation warnings are silenced by default and must be turned on using the +``-Wa`` Python command line option or the ``PYTHONWARNINGS`` environment +variable. If you are writing an application that uses openstacksdk, you may +wish to enable some of these warnings during test runs to ensure you migrate +away from deprecated behavior. + +Available warnings +------------------ + +.. automodule:: openstack.warnings + :members: + +.. __: https://docs.python.org/3/library/warnings.html diff --git a/doc/source/users/connection.rst b/doc/source/users/connection.rst deleted file mode 100644 index 70301a29b1..0000000000 --- a/doc/source/users/connection.rst +++ /dev/null @@ -1,13 +0,0 @@ -Connection -========== -.. automodule:: openstack.connection - - from_config - ----------- - .. autofunction:: openstack.connection.from_config - -Connection Object ------------------ - -.. autoclass:: openstack.connection.Connection - :members: diff --git a/doc/source/users/examples b/doc/source/users/examples deleted file mode 120000 index d4cb9b9c80..0000000000 --- a/doc/source/users/examples +++ /dev/null @@ -1 +0,0 @@ -../../../examples/ \ No newline at end of file diff --git a/doc/source/users/guides/block_store.rst b/doc/source/users/guides/block_store.rst deleted file mode 100644 index bb67a4eed1..0000000000 --- a/doc/source/users/guides/block_store.rst +++ /dev/null @@ -1,9 +0,0 @@ -Using OpenStack Block Store -=========================== - -Before working with the Block Store service, you'll need to create a -connection to your OpenStack cloud by following the :doc:`connect` user -guide. This will provide you with the ``conn`` variable used in the examples -below. - -.. TODO(thowe): Implement this guide diff --git a/doc/source/users/guides/cluster.rst b/doc/source/users/guides/cluster.rst deleted file mode 100644 index d0a0474b80..0000000000 --- a/doc/source/users/guides/cluster.rst +++ /dev/null @@ -1,36 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - - -======================= -Using OpenStack Cluster -======================= - -Before working with the Cluster service, you'll need to create a connection -to your OpenStack cloud by following the :doc:`connect` user guide. This will -provide you with the ``conn`` variable used by all examples in this guide. - -The primary abstractions/resources of the Cluster service are: - -.. toctree:: - :maxdepth: 1 - - Profile Type - Profile - Cluster - Node - Policy Type - Policy - Receiver - Action - Event diff --git a/doc/source/users/guides/cluster/action.rst b/doc/source/users/guides/cluster/action.rst deleted file mode 100644 index 1a07479eb8..0000000000 --- a/doc/source/users/guides/cluster/action.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -==================== -Working with Actions -==================== - -.. TODO(Qiming): Implement this guide diff --git a/doc/source/users/guides/cluster/cluster.rst b/doc/source/users/guides/cluster/cluster.rst deleted file mode 100644 index b4772ef33a..0000000000 --- a/doc/source/users/guides/cluster/cluster.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================= -Managing Clusters -================= - -.. TODO(Qiming): Implement this guide diff --git a/doc/source/users/guides/cluster/event.rst b/doc/source/users/guides/cluster/event.rst deleted file mode 100644 index 185f454c50..0000000000 --- a/doc/source/users/guides/cluster/event.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -=================== -Working with Events -=================== - -.. TODO(Qiming): Implement this guide diff --git a/doc/source/users/guides/cluster/node.rst b/doc/source/users/guides/cluster/node.rst deleted file mode 100644 index d4e2f54f16..0000000000 --- a/doc/source/users/guides/cluster/node.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -============== -Managing Nodes -============== - -.. TODO(Qiming): Implement this guide diff --git a/doc/source/users/guides/cluster/policy.rst b/doc/source/users/guides/cluster/policy.rst deleted file mode 100644 index 7c708cac1c..0000000000 --- a/doc/source/users/guides/cluster/policy.rst +++ /dev/null @@ -1,102 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================= -Managing Policies -================= - -A **policy type** can be treated as the meta-type of a `Policy` object. A -registry of policy types is built when the Cluster service starts. When -creating a `Policy` object, you will indicate the policy type used in its -`spec` property. - - -List Policies -~~~~~~~~~~~~~ - -To examine the list of policies: - -.. literalinclude:: ../../examples/cluster/policy.py - :pyobject: list_policys - -When listing policies, you can specify the sorting option using the ``sort`` -parameter and you can do pagination using the ``limit`` and ``marker`` -parameters. - -Full example: `manage policy`_ - - -Create Policy -~~~~~~~~~~~~~ - -When creating a policy, you will provide a dictionary with keys and values -according to the policy type referenced. - -.. literalinclude:: ../../examples/cluster/policy.py - :pyobject: create_policy - -Optionally, you can specify a ``metadata`` keyword argument that contains some -key-value pairs to be associated with the policy. - -Full example: `manage policy`_ - - -Find Policy -~~~~~~~~~~~ - -To find a policy based on its name or ID: - -.. literalinclude:: ../../examples/cluster/policy.py - :pyobject: find_policy - -Full example: `manage policy`_ - - -Get Policy -~~~~~~~~~~ - -To get a policy based on its name or ID: - -.. literalinclude:: ../../examples/cluster/policy.py - :pyobject: get_policy - -Full example: `manage policy`_ - - -Update Policy -~~~~~~~~~~~~~ - -After a policy is created, most of its properties are immutable. Still, you -can update a policy's ``name`` and/or ``metadata``. - -.. literalinclude:: ../../examples/cluster/policy.py - :pyobject: update_policy - -The Cluster service doesn't allow updating the ``spec`` of a policy. The only -way to achieve that is to create a new policy. - -Full example: `manage policy`_ - - -Delete Policy -~~~~~~~~~~~~~ - -A policy can be deleted after creation, provided that it is not referenced -by any active clusters or nodes. If you attempt to delete a policy that is -still in use, you will get an error message. - -.. literalinclude:: ../../examples/cluster/policy.py - :pyobject: delete_policy - - -.. _manage policy: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/cluster/policy.py diff --git a/doc/source/users/guides/cluster/policy_type.rst b/doc/source/users/guides/cluster/policy_type.rst deleted file mode 100644 index 3211564d47..0000000000 --- a/doc/source/users/guides/cluster/policy_type.rst +++ /dev/null @@ -1,45 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -========================= -Working with Policy Types -========================= - -A **policy** is a template that encodes the information needed for specifying -the rules that are checked/enforced before/after certain actions are performed -on a cluster. The rules are encoded in a property named ``spec``. - - -List Policy Types -~~~~~~~~~~~~~~~~~ - -To examine the known policy types: - -.. literalinclude:: ../../examples/cluster/policy_type.py - :pyobject: list_policy_types - -Full example: `manage policy type`_ - - -Get Policy Type -~~~~~~~~~~~~~~~ - -To retrieve the details about a policy type, you need to provide the name of -it. - -.. literalinclude:: ../../examples/cluster/policy_type.py - :pyobject: get_policy_type - -Full example: `manage policy type`_ - -.. _manage profile type: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/cluster/policy_type.py diff --git a/doc/source/users/guides/cluster/profile.rst b/doc/source/users/guides/cluster/profile.rst deleted file mode 100644 index ad3f5e5bea..0000000000 --- a/doc/source/users/guides/cluster/profile.rst +++ /dev/null @@ -1,105 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================= -Managing Profiles -================= - -A **profile type** can be treated as the meta-type of a `Profile` object. A -registry of profile types is built when the Cluster service starts. When -creating a `Profile` object, you will indicate the profile type used in its -`spec` property. - - -List Profiles -~~~~~~~~~~~~~ - -To examine the list of profiles: - -.. literalinclude:: ../../examples/cluster/profile.py - :pyobject: list_profiles - -When listing profiles, you can specify the sorting option using the ``sort`` -parameter and you can do pagination using the ``limit`` and ``marker`` -parameters. - -Full example: `manage profile`_ - - -Create Profile -~~~~~~~~~~~~~~ - -When creating a profile, you will provide a dictionary with keys and values -specified according to the profile type referenced. - -.. literalinclude:: ../../examples/cluster/profile.py - :pyobject: create_profile - -Optionally, you can specify a ``metadata`` keyword argument that contains some -key-value pairs to be associated with the profile. - -Full example: `manage profile`_ - - -Find Profile -~~~~~~~~~~~~ - -To find a profile based on its name or ID: - -.. literalinclude:: ../../examples/cluster/profile.py - :pyobject: find_profile - -The Cluster service doesn't allow updating the ``spec`` of a profile. The only -way to achieve that is to create a new profile. - -Full example: `manage profile`_ - - -Get Profile -~~~~~~~~~~~~ - -To get a profile based on its name or ID: - -.. literalinclude:: ../../examples/cluster/profile.py - :pyobject: get_profile - -Full example: `manage profile`_ - - -Update Profile -~~~~~~~~~~~~~~ - -After a profile is created, most of its properties are immutable. Still, you -can update a profile's ``name`` and/or ``metadata``. - -.. literalinclude:: ../../examples/cluster/profile.py - :pyobject: update_profile - -The Cluster service doesn't allow updating the ``spec`` of a profile. The only -way to achieve that is to create a new profile. - -Full example: `manage profile`_ - - -Delete Profile -~~~~~~~~~~~~~~ - -A profile can be deleted after creation, provided that it is not referenced -by any active clusters or nodes. If you attempt to delete a profile that is -still in use, you will get an error message. - -.. literalinclude:: ../../examples/cluster/profile.py - :pyobject: delete_profile - - -.. _manage profile: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/cluster/profile.py diff --git a/doc/source/users/guides/cluster/profile_type.rst b/doc/source/users/guides/cluster/profile_type.rst deleted file mode 100644 index 45183bf0aa..0000000000 --- a/doc/source/users/guides/cluster/profile_type.rst +++ /dev/null @@ -1,44 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -========================== -Working with Profile Types -========================== - -A **profile** is a template used to create and manage nodes, i.e. objects -exposed by other OpenStack services. A profile encodes the information needed -for node creation in a property named ``spec``. - - -List Profile Types -~~~~~~~~~~~~~~~~~~ - -To examine the known profile types: - -.. literalinclude:: ../../examples/cluster/profile_type.py - :pyobject: list_profile_types - -Full example: `manage profile type`_ - - -Get Profile Type -~~~~~~~~~~~~~~~~ - -To get the details about a profile type, you need to provide the name of it. - -.. literalinclude:: ../../examples/cluster/profile_type.py - :pyobject: get_profile_type - -Full example: `manage profile type`_ - -.. _manage profile type: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/cluster/profile_type.py diff --git a/doc/source/users/guides/cluster/receiver.rst b/doc/source/users/guides/cluster/receiver.rst deleted file mode 100644 index a34f67b7ea..0000000000 --- a/doc/source/users/guides/cluster/receiver.rst +++ /dev/null @@ -1,18 +0,0 @@ -.. - Licensed under the Apache License, Version 2.0 (the "License"); you may - not use this file except in compliance with the License. You may obtain - a copy of the License at - - http://www.apache.org/licenses/LICENSE-2.0 - - Unless required by applicable law or agreed to in writing, software - distributed under the License is distributed on an "AS IS" BASIS, WITHOUT - WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the - License for the specific language governing permissions and limitations - under the License. - -================== -Managing Receivers -================== - -.. TODO(Qiming): Implement this guide diff --git a/doc/source/users/guides/compute.rst b/doc/source/users/guides/compute.rst deleted file mode 100644 index cc06fa28c4..0000000000 --- a/doc/source/users/guides/compute.rst +++ /dev/null @@ -1,89 +0,0 @@ -Using OpenStack Compute -======================= - -Before working with the Compute service, you'll need to create a connection -to your OpenStack cloud by following the :doc:`connect` user guide. This will -provide you with the ``conn`` variable used in the examples below. - -.. contents:: Table of Contents - :local: - -The primary resource of the Compute service is the server. - -List Servers ------------- - -A **server** is a virtual machine that provides access to a compute instance -being run by your cloud provider. - -.. literalinclude:: ../examples/compute/list.py - :pyobject: list_servers - -Full example: `compute resource list`_ - -List Images ------------ - -An **image** is the operating system you want to use for your server. - -.. literalinclude:: ../examples/compute/list.py - :pyobject: list_images - -Full example: `compute resource list`_ - -List Flavors ------------- - -A **flavor** is the resource configuration for a server. Each flavor is a -unique combination of disk, memory, vCPUs, and network bandwidth. - -.. literalinclude:: ../examples/compute/list.py - :pyobject: list_flavors - -Full example: `compute resource list`_ - -List Networks -------------- - -A **network** provides connectivity to servers. - -.. literalinclude:: ../examples/network/list.py - :pyobject: list_networks - -Full example: `network resource list`_ - -Create Key Pair ---------------- - -A **key pair** is the public key and private key of -`public–key cryptography`_. They are used to encrypt and decrypt login -information when connecting to your server. - -.. literalinclude:: ../examples/compute/create.py - :pyobject: create_keypair - -Full example: `compute resource create`_ - -Create Server -------------- - -At minimum, a server requires a name, an image, a flavor, and a network on -creation. You can discover the names and IDs of these attributes by listing -them as above and then using the find methods to get the appropriate -resources. - -Ideally you'll also create a server using a keypair so you can login to that -server with the private key. - -Servers take time to boot so we call ``wait_for_server`` to wait -for it to become active. - -.. literalinclude:: ../examples/compute/create.py - :pyobject: create_server - -Full example: `compute resource create`_ - -.. _compute resource list: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/compute/list.py -.. _network resource list: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/network/list.py -.. _compute resource create: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/compute/create.py -.. _public–key cryptography: https://en.wikipedia.org/wiki/Public-key_cryptography \ No newline at end of file diff --git a/doc/source/users/guides/connect.rst b/doc/source/users/guides/connect.rst deleted file mode 100644 index 80267b80e0..0000000000 --- a/doc/source/users/guides/connect.rst +++ /dev/null @@ -1,44 +0,0 @@ -Connect -======= - -In order to work with an OpenStack cloud you first need to create a -:class:`~openstack.connection.Connection` to it using your credentials. A -:class:`~openstack.connection.Connection` can be -created in 3 ways, using the class itself, a file, or environment variables. -If this is your first time using the SDK, we recommend simply using the -class itself as illustrated below. - -.. note:: To get your credentials - `Download the OpenStack RC file `_. - -Create Connection ------------------ - -To create a connection you need a :class:`~openstack.profile.Profile` and a -:class:`~openstack.connection.Connection`. - -.. literalinclude:: ../examples/connect.py - :pyobject: create_connection - -The :class:`~openstack.profile.Profile` sets your preferences for each -service. You will pass it the region of the OpenStack cloud that this -connection will use. - -The :class:`~openstack.connection.Connection` is a context for a connection -to an OpenStack cloud. You will primarily use it to set the -:class:`~openstack.profile.Profile` and authentication information. You can -also set the ``user_agent`` to something that describes your application -(e.g. ``my-web-app/1.3.4``). - -Full example at `connect.py `_ - -.. note:: To enable logging, see the :doc:`logging` user guide. - -Next ----- -Now that you can create a connection, continue with the :ref:`user_guides` -to work with an OpenStack service. - -As an alternative to creating a :class:`~openstack.connection.Connection` -using the class itself, you can connect using a file or environment -variables. See the :doc:`connect_from_config` user guide. diff --git a/doc/source/users/guides/identity.rst b/doc/source/users/guides/identity.rst deleted file mode 100644 index 1cd0c4422f..0000000000 --- a/doc/source/users/guides/identity.rst +++ /dev/null @@ -1,111 +0,0 @@ -Using OpenStack Identity -======================== - -Before working with the Identity service, you'll need to create a connection -to your OpenStack cloud by following the :doc:`connect` user guide. This will -provide you with the ``conn`` variable used in the examples below. - -The OpenStack Identity service is the default identity management system for -OpenStack. The Identity service authentication process confirms the identity -of a user and an incoming request by validating a set of credentials that the -user supplies. Initially, these credentials are a user name and password or a -user name and API key. When the Identity service validates user credentials, -it issues an authentication token that the user provides in subsequent -requests. An authentication token is an alpha-numeric text string that enables -access to OpenStack APIs and resources. A token may be revoked at any time and -is valid for a finite duration. - -List Users ----------- -A **user** is a digital representation of a person, system, or service that -uses OpenStack cloud services. The Identity service validates that incoming -requests are made by the user who claims to be making the call. Users have -a login and can access resources by using assigned tokens. Users can be -directly assigned to a particular project and behave as if they are contained -in that project. - -.. literalinclude:: ../examples/identity/list.py - :pyobject: list_users - -Full example: `identity resource list`_ - -List Credentials ----------------- -**Credentials** are data that confirms the identity of the user. For example, -user name and password, user name and API key, or an authentication token that -the Identity service provides. - -.. literalinclude:: ../examples/identity/list.py - :pyobject: list_credentials - -Full example: `identity resource list`_ - -List Projects -------------- -A **project** is a container that groups or isolates resources or identity -objects. - -.. literalinclude:: ../examples/identity/list.py - :pyobject: list_projects - -Full example: `identity resource list`_ - -List Domains ------------- -A **domain** is an Identity service API v3 entity and represents a collection -of projects and users that defines administrative boundaries for the management -of Identity entities. Users can be granted the administrator role for a domain. -A domain administrator can create projects, users, and groups in a domain and -assign roles to users and groups in a domain. - -.. literalinclude:: ../examples/identity/list.py - :pyobject: list_domains - -Full example: `identity resource list`_ - -List Groups ------------ -A **group** is an Identity service API v3 entity and represents a collection of -users that are owned by a domain. A group role granted to a domain or project -applies to all users in the group. Adding users to, or removing users from, a -group respectively grants, or revokes, their role and authentication to the -associated domain or project. - -.. literalinclude:: ../examples/identity/list.py - :pyobject: list_groups - -Full example: `identity resource list`_ - -List Services -------------- -A **service** is an OpenStack service, such as Compute, Object Storage, or -Image service, that provides one or more endpoints through which users can -access resources and perform operations. - -.. literalinclude:: ../examples/identity/list.py - :pyobject: list_services - -Full example: `identity resource list`_ - -List Endpoints --------------- -An **endpoint** is a network-accessible address, usually a URL, through which -you can access a service. - -.. literalinclude:: ../examples/identity/list.py - :pyobject: list_endpoints - -Full example: `identity resource list`_ - -List Regions ------------- -A **region** is an Identity service API v3 entity and represents a general -division in an OpenStack deployment. You can associate zero or more -sub-regions with a region to make a tree-like structured hierarchy. - -.. literalinclude:: ../examples/identity/list.py - :pyobject: list_regions - -Full example: `identity resource list`_ - -.. _identity resource list: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/identity/list.py diff --git a/doc/source/users/guides/image.rst b/doc/source/users/guides/image.rst deleted file mode 100644 index 322628933a..0000000000 --- a/doc/source/users/guides/image.rst +++ /dev/null @@ -1,47 +0,0 @@ -Using OpenStack Image -===================== - -Before working with the Image service, you'll need to create a connection -to your OpenStack cloud by following the :doc:`connect` user guide. This will -provide you with the ``conn`` variable used in the examples below. - -The primary resource of the Image service is the image. - -List Images ------------ - -An **image** is a collection of files for a specific operating system -that you use to create or rebuild a server. OpenStack provides -`pre-built images `_. -You can also create custom images, or snapshots, from servers that you have -launched. Images come in different formats and are sometimes called virtual -machine images. - -.. literalinclude:: ../examples/image/list.py - :pyobject: list_images - -Full example: `image resource list`_ - -Create Image ------------- - -Create an image by uploading its data and setting its attributes. - -.. literalinclude:: ../examples/image/create.py - :pyobject: upload_image - -Full example: `image resource create`_ - -Delete Image ------------- - -Delete an image. - -.. literalinclude:: ../examples/image/delete.py - :pyobject: delete_image - -Full example: `image resource delete`_ - -.. _image resource create: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/image/create.py -.. _image resource delete: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/image/delete.py -.. _image resource list: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/image/list.py diff --git a/doc/source/users/guides/key_manager.rst b/doc/source/users/guides/key_manager.rst deleted file mode 100644 index 1efd12b7a7..0000000000 --- a/doc/source/users/guides/key_manager.rst +++ /dev/null @@ -1,56 +0,0 @@ -Using OpenStack Key Manager -=========================== - -Before working with the Key Manager service, you'll need to create a -connection to your OpenStack cloud by following the :doc:`connect` user -guide. This will provide you with the ``conn`` variable used in the examples -below. - -.. contents:: Table of Contents - :local: - -.. note:: Some interactions with the Key Manager service differ from that - of other services in that resources do not have a proper ``id`` parameter, - which is necessary to make some calls. Instead, resources have a separately - named id attribute, e.g., the Secret resource has ``secret_id``. - - The examples below outline when to pass in those id values. - -Create a Secret ---------------- - -The Key Manager service allows you to create new secrets by passing the -attributes of the :class:`~openstack.key_manager.v1.secret.Secret` to the -:meth:`~openstack.key_manager.v1._proxy.Proxy.create_secret` method. - -.. literalinclude:: ../examples/key_manager/create.py - :pyobject: create_secret - -List Secrets ------------- - -Once you have stored some secrets, they are available for you to list -via the :meth:`~openstack.key_manager.v1._proxy.Proxy.secrets` method. -This method returns a generator, which yields each -:class:`~openstack.key_manager.v1.secret.Secret`. - -.. literalinclude:: ../examples/key_manager/list.py - :pyobject: list_secrets - -The :meth:`~openstack.key_manager.v1._proxy.Proxy.secrets` method can -also make more advanced queries to limit the secrets that are returned. - -.. literalinclude:: ../examples/key_manager/list.py - :pyobject: list_secrets_query - -Get Secret Payload ------------------- - -Once you have received a :class:`~openstack.key_manager.v1.secret.Secret`, -you can obtain the payload for it by passing the secret's id value to -the :meth:`~openstack.key_manager.v1._proxy.Proxy.secrets` method. -Use the :data:`~openstack.key_manager.v1.secret.Secret.secret_id` attribute -when making this request. - -.. literalinclude:: ../examples/key_manager/get.py - :pyobject: get_secret_payload diff --git a/doc/source/users/guides/logging.rst b/doc/source/users/guides/logging.rst deleted file mode 100644 index 623916fad1..0000000000 --- a/doc/source/users/guides/logging.rst +++ /dev/null @@ -1,79 +0,0 @@ -Logging -======= - -Logging can save you time and effort when developing your code or looking -for help. If your code is not behaving how you expect it to, enabling and -configuring logging can quickly give you valuable insight into the root -cause of the issue. If you need help from the OpenStack community, the -logs can help the people there assist you. - -.. note:: By default, no logging is done. - -Enable SDK Logging ------------------- - -To enable logging you use :func:`~openstack.utils.enable_logging`. - -The ``debug`` parameter controls the logging level. Set ``debug=True`` to -log debug and higher messages. Set ``debug=False`` to log warning and higher -messages. - -To log debug and higher messages:: - - import sys - from openstack import utils - - utils.enable_logging(debug=True, stream=sys.stdout) - -The ``path`` parameter controls the location of a log file. If set, this -parameter will send log messages to a file using a :py:class:`~logging.FileHandler`. - -To log messages to a file called ``openstack.log``:: - - from openstack import utils - - utils.enable_logging(debug=True, path='openstack.log') - -The ``stream`` parameter controls the stream where log message are written to. -If set to ``sys.stdout`` or ``sys.stderr``, this parameter will send log -messages to that stream using a :py:class:`~logging.StreamHandler` - -To log messages to the console on ``stdout``:: - - import sys - from openstack import utils - - utils.enable_logging(debug=True, stream=sys.stdout) - -You can combine the ``path`` and ``stream`` parameters to log to both places -simultaneously. - -To log messages to a file called ``openstack.log`` and the console on -``stdout``:: - - import sys - from openstack import utils - - utils.enable_logging(debug=True, path='openstack.log', stream=sys.stdout) - - -Enable requests Logging ------------------------ - -The SDK depends on a small number other libraries. Notably, it uses -`requests `_ for its transport layer. -To get even more information about the request/response cycle, you enable -logging of requests the same as you would any other library. - -To log messages to the console on ``stdout``:: - - import logging - import sys - - logger = logging.getLogger('requests') - formatter = logging.Formatter( - '%(asctime)s %(levelname)s: %(name)s %(message)s') - console = logging.StreamHandler(sys.stdout) - console.setFormatter(formatter) - logger.setLevel(logging.DEBUG) - logger.addHandler(console) diff --git a/doc/source/users/guides/network.rst b/doc/source/users/guides/network.rst deleted file mode 100644 index e0701bc591..0000000000 --- a/doc/source/users/guides/network.rst +++ /dev/null @@ -1,116 +0,0 @@ -Using OpenStack Network -======================= - -Before working with the Network service, you'll need to create a connection -to your OpenStack cloud by following the :doc:`connect` user guide. This will -provide you with the ``conn`` variable used in the examples below. - -.. contents:: Table of Contents - :local: - -The primary resource of the Network service is the network. - -List Networks -------------- - -A **network** is an isolated `Layer 2 `_ -networking segment. There are two types of networks, project and provider networks. -Project networks are fully isolated and are not shared with other projects. Provider -networks map to existing physical networks in the data center and provide external -network access for servers. Only an OpenStack administrator can create provider -networks. Networks can be connected via routers. - -.. literalinclude:: ../examples/network/list.py - :pyobject: list_networks - -Full example: `network resource list`_ - -List Subnets ------------- - -A **subnet** is a block of IP addresses and associated configuration state. -Subnets are used to allocate IP addresses when new ports are created on a -network. - -.. literalinclude:: ../examples/network/list.py - :pyobject: list_subnets - -Full example: `network resource list`_ - -List Ports ----------- - -A **port** is a connection point for attaching a single device, such as the -`NIC `_ -of a server, to a network. The port also describes the associated network -configuration, such as the `MAC `_ -and IP addresses to be used on that port. - -.. literalinclude:: ../examples/network/list.py - :pyobject: list_ports - -Full example: `network resource list`_ - -List Security Groups --------------------- - -A **security group** acts as a virtual firewall for servers. It is a container -for security group rules which specify the type of network traffic and direction -that is allowed to pass through a port. - -.. literalinclude:: ../examples/network/list.py - :pyobject: list_security_groups - -Full example: `network resource list`_ - -List Routers ------------- - -A **router** is a logical component that forwards data packets between networks. -It also provides `Layer 3 `_ and -`NAT `_ forwarding to -provide external network access for servers on project networks. - -.. literalinclude:: ../examples/network/list.py - :pyobject: list_routers - -Full example: `network resource list`_ - -List Network Agents ------------- - -A **network agent** is a plugin that handles various tasks used to -implement virtual networks. These agents include neutron-dhcp-agent, -neutron-l3-agent, neutron-metering-agent, and neutron-lbaas-agent, -among others. - -.. literalinclude:: ../examples/network/list.py - :pyobject: list_network_agents - -Full example: `network resource list`_ - -Create Network --------------- - -Create a project network and subnet. This network can be used when creating -a server and allows the server to communicate with others servers on the -same project network. - -.. literalinclude:: ../examples/network/create.py - :pyobject: create_network - -Full example: `network resource create`_ - -Delete Network --------------- - -Delete a project network and its subnets. - -.. literalinclude:: ../examples/network/delete.py - :pyobject: delete_network - -Full example: `network resource delete`_ - -.. _network resource create: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/network/create.py -.. _network resource delete: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/network/delete.py -.. _network resource list: http://git.openstack.org/cgit/openstack/python-openstacksdk/tree/examples/network/list.py diff --git a/doc/source/users/guides/object_store.rst b/doc/source/users/guides/object_store.rst deleted file mode 100644 index da8c57e2cd..0000000000 --- a/doc/source/users/guides/object_store.rst +++ /dev/null @@ -1,211 +0,0 @@ -Using OpenStack Object Store -============================ - -Before working with the Object Store service, you'll need to create a -connection to your OpenStack cloud by following the :doc:`connect` user -guide. This will provide you with the ``conn`` variable used in the examples -below. - -.. contents:: Table of Contents - :local: - -The primary resources of the Object Store service are containers and objects. - -Working with Containers ------------------------ - -Listing Containers -****************** - -To list existing containers, use the -:meth:`~openstack.object_store.v1._proxy.Proxy.containers` method. :: - - >>> for cont in conn.object_store.containers(): - ... print cont - ... - openstack.object_store.v1.container.Container: {u'count': 5, - u'bytes': 500, u'name': u'my container'} - openstack.object_store.v1.container.Container: {u'count': 0, - u'bytes': 0, u'name': u'empty container'} - openstack.object_store.v1.container.Container: {u'count': 100, - u'bytes': 1000000, u'name': u'another container'} - -The ``containers`` method returns a generator which yields -:class:`~openstack.object_store.v1.container.Container` objects. It handles -pagination for you, which can be adjusted via the ``limit`` argument. -By default, the ``containers`` method will yield as many containers as the -service will return, and it will continue requesting until it receives -no more. :: - - >>> for cont in conn.object_store.containers(limit=500): - ... print(cont) - ... - <500 Containers> - ... another request transparently made to the Object Store service - <500 more Containers> - ... - -Creating Containers -******************* - -To create a container, use the -:meth:`~openstack.object_store.v1._proxy.Proxy.create_container` method. :: - - >>> cont = conn.object_store.create_container(name="new container") - >>> cont - openstack.object_store.v1.container.Container: {'name': u'new container'} - -Working with Container Metadata -******************************* - -To get the metadata for a container, use the -:meth:`~openstack.object_store.v1._proxy.Proxy.get_container_metadata` method. -This method either takes the name of a container, or a -:class:`~openstack.object_store.v1.container.Container` object, and it returns -a `Container` object with all of its metadata attributes set. :: - - >>> cont = conn.object_store.get_container_metadata("new container") - openstack.object_store.v1.container.Container: {'content-length': '0', - 'x-container-object-count': '0', 'name': u'new container', - 'accept-ranges': 'bytes', - 'x-trans-id': 'tx22c5de63466e4c05bb104-0054740c39', - 'date': 'Tue, 25 Nov 2014 04:57:29 GMT', - 'x-timestamp': '1416889793.23520', 'x-container-read': '.r:mysite.com', - 'x-container-bytes-used': '0', 'content-type': 'text/plain; charset=utf-8'} - -To set the metadata for a container, use the -:meth:`~openstack.object_store.v1._proxy.Proxy.set_container_metadata` method. -This method takes a :class:`~openstack.object_store.v1.container.Container` -object. For example, to grant another user write access to this container, -you can set the -:attr:`~openstack.object_store.v1.container.Container.write_ACL` on a -resource and pass it to `set_container_metadata`. :: - - >>> cont.write_ACL = "big_project:another_user" - >>> conn.object_store.set_container_metadata(cont) - openstack.object_store.v1.container.Container: {'content-length': '0', - 'x-container-object-count': '0', - 'name': u'my new container', 'accept-ranges': 'bytes', - 'x-trans-id': 'txc3ee751f971d41de9e9f4-0054740ec1', - 'date': 'Tue, 25 Nov 2014 05:08:17 GMT', - 'x-timestamp': '1416889793.23520', 'x-container-read': '.r:mysite.com', - 'x-container-bytes-used': '0', 'content-type': 'text/plain; charset=utf-8', - 'x-container-write': 'big_project:another_user'} - -Working with Objects --------------------- - -Objects are held in containers. From an API standpoint, you work with -them using similarly named methods, typically with an additional argument -to specify their container. - -Listing Objects -*************** - -To list the objects that exist in a container, use the -:meth:`~openstack.object_store.v1._proxy.Proxy.objects` method. - -If you have a :class:`~openstack.object_store.v1.container.Container` -object, you can pass it to ``objects``. :: - - >>> print cont.name - pictures - >>> for obj in conn.object_store.objects(cont): - ... print obj - ... - openstack.object_store.v1.container.Object: - {u'hash': u'0522d4ccdf9956badcb15c4087a0c4cb', - u'name': u'pictures/selfie.jpg', u'bytes': 15744, - 'last-modified': u'2014-10-31T06:33:36.618640', - u'last_modified': u'2014-10-31T06:33:36.618640', - u'content_type': u'image/jpeg', 'container': u'pictures', - 'content-type': u'image/jpeg'} - ... - -Similar to the :meth:`~openstack.object_store.v1._proxy.Proxy.containers` -method, ``objects`` returns a generator which yields -:class:`~openstack.object_store.v1.obj.Object` objects stored in the -container. It also handles pagination for you, which you can adjust -with the ``limit`` parameter, otherwise making each request for the maximum -that your Object Store will return. - -If you have the name of a container instead of an object, you can also -pass that to the ``objects`` method. :: - - >>> for obj in conn.object_store.objects("pictures".decode("utf8"), - limit=100): - ... print obj - ... - <100 Objects> - ... another request transparently made to the Object Store service - <100 more Objects> - -Getting Object Data -******************* - -Once you have an :class:`~openstack.object_store.v1.obj.Object`, you get -the data stored inside of it with the -:meth:`~openstack.object_store.v1._proxy.Proxy.get_object_data` method. :: - - >>> print ob.name - message.txt - >>> data = conn.object_store.get_object_data(ob) - >>> print data - Hello, world! - -Additionally, if you want to save the object to disk, the -:meth:`~openstack.object_store.v1._proxy.Proxy.download_object` convenience -method takes an :class:`~openstack.object_store.v1.obj.Object` and a -``path`` to write the contents to. :: - - >>> conn.object_store.download_object(ob, "the_message.txt") - -Uploading Objects -***************** - -Once you have data you'd like to store in the Object Store service, you use -the :meth:`~openstack.object_store.v1._proxy.Proxy.upload_object` method. -This method takes the ``data`` to be stored, along with at least an object -``name`` and the ``container`` it is to be stored in. :: - - >>> hello = conn.object_store.upload_object(container="messages", - name="helloworld.txt", - data="Hello, world!") - >>> print hello - openstack.object_store.v1.container.Object: {'content-length': '0', - 'container': u'messages', 'name': u'helloworld.txt', - 'last-modified': 'Tue, 25 Nov 2014 17:39:29 GMT', - 'etag': '5eb63bbbe01eeed093cb22bb8f5acdc3', - 'x-trans-id': 'tx3035d41b03334aeaaf3dd-005474bed0', - 'date': 'Tue, 25 Nov 2014 17:39:28 GMT', - 'content-type': 'text/html; charset=UTF-8'} - -Working with Object Metadata -**************************** - -Working with metadata on objects is identical to how it's done with -containers. You use the -:meth:`~openstack.object_store.v1._proxy.Proxy.get_object_metadata` and -:meth:`~openstack.object_store.v1._proxy.Proxy.set_object_metadata` methods. - -The metadata attributes to be set can be found on the -:class:`~openstack.object_store.v1.obj.Object` object. :: - - >>> secret.delete_after = 300 - >>> secret = conn.object_store.set_object_metadata(secret) - -We set the :attr:`~openstack.object_store.obj.Object.delete_after` -value to 500 seconds, causing the object to be deleted in 300 seconds, -or five minutes. That attribute corresponds to the ``X-Delete-After`` -header value, which you can see is returned when we retreive the updated -metadata. :: - - >>> conn.object_store.get_object_metadata(ob) - openstack.object_store.v1.container.Object: {'content-length': '11', - 'container': u'Secret Container', - 'name': u'selfdestruct.txt', 'x-delete-after': 300, - 'accept-ranges': 'bytes', 'last-modified': 'Tue, 25 Nov 2014 17:50:45 GMT', - 'etag': '5eb63bbbe01eeed093cb22bb8f5acdc3', - 'x-timestamp': '1416937844.36805', - 'x-trans-id': 'tx5c3fd94adf7c4e1b8f334-005474c17b', - 'date': 'Tue, 25 Nov 2014 17:50:51 GMT', 'content-type': 'text/plain'} diff --git a/doc/source/users/guides/telemetry.rst b/doc/source/users/guides/telemetry.rst deleted file mode 100644 index cf7acae040..0000000000 --- a/doc/source/users/guides/telemetry.rst +++ /dev/null @@ -1,11 +0,0 @@ -Using OpenStack Telemetry -========================= - -.. caution:: - BETA: This API is a work in progress and is subject to change. - -Before working with the Telemetry service, you'll need to create a connection -to your OpenStack cloud by following the :doc:`connect` user guide. This will -provide you with the ``conn`` variable used in the examples below. - -.. TODO(thowe): Implement this guide diff --git a/doc/source/users/index.rst b/doc/source/users/index.rst deleted file mode 100644 index b03cb4c921..0000000000 --- a/doc/source/users/index.rst +++ /dev/null @@ -1,127 +0,0 @@ -Getting started with the OpenStack SDK -====================================== - -For a listing of terms used throughout the SDK, including the names of -projects and services supported by it, see the :doc:`glossary <../glossary>`. - -Installation ------------- - -The OpenStack SDK is available on -`PyPI `_ under the name -**openstacksdk**. To install it, use ``pip``:: - - $ pip install openstacksdk - -.. _user_guides: - -User Guides ------------ - -These guides walk you through how to make use of the libraries we provide -to work with each OpenStack service. If you're looking for a cookbook -approach, this is where you'll want to begin. - -.. toctree:: - :maxdepth: 1 - - Connect to an OpenStack Cloud - Connect to an OpenStack Cloud Using a Config File - Logging - Block Store - Cluster - Compute - Database - Identity - Image - Key Manager - Network - Object Store - Orchestration - Telemetry - -API Documentation ------------------ - -Service APIs are exposed through a two-layered approach. The classes -exposed through our *Connection* interface are the place to start if you're -an application developer consuming an OpenStack cloud. The *Resource* -interface is the layer upon which the *Connection* is built, with -*Connection* methods accepting and returning *Resource* objects. - -Connection Interface -******************** - -A *Connection* instance maintains your session, authentication, transport, -and profile, providing you with a set of higher-level interfaces to work -with OpenStack services. - -.. toctree:: - :maxdepth: 1 - - connection - profile - -Once you have a *Connection* instance, the following services may be exposed -to you. Your user profile determine the full set of exposed services, -but listed below are the ones provided by this SDK by default. - -.. toctree:: - :maxdepth: 1 - - Block Store - Cluster - Compute - Database - Identity - Image - Key Manager - Network - Object Store - Orchestration - Telemetry - -Resource Interface -****************** - -The *Resource* layer is a lower-level interface to communicate with OpenStack -services. While the classes exposed by the *Connection* build a convenience -layer on top of this, *Resources* can be used directly. However, the most -common usage of this layer is in receiving an object from a class in the -*Connection* layer, modifying it, and sending it back into the *Connection* -layer, such as to update a resource on the server. - -The following services have exposed *Resource* classes. - -.. toctree:: - :maxdepth: 1 - - Block Store - Cluster - Compute - Database - Identity - Image - Key Management - Metric - Network - Orchestration - Object Store - Telemetry - -Low-Level Classes -***************** - -The following classes are not commonly used by application developers, -but are used to construct applications to talk to OpenStack APIs. Typically -these parts are managed through the `Connection Interface`_, but their use -can be customized. - -.. toctree:: - :maxdepth: 1 - - session - resource - resource2 - service_filter - utils diff --git a/doc/source/users/profile.rst b/doc/source/users/profile.rst deleted file mode 100644 index 195a1848c1..0000000000 --- a/doc/source/users/profile.rst +++ /dev/null @@ -1,9 +0,0 @@ -Profile -======= -.. automodule:: openstack.profile - -Profile Object --------------- - -.. autoclass:: openstack.profile.Profile - :members: diff --git a/doc/source/users/proxies/block_store.rst b/doc/source/users/proxies/block_store.rst deleted file mode 100644 index fcc09c4c06..0000000000 --- a/doc/source/users/proxies/block_store.rst +++ /dev/null @@ -1,16 +0,0 @@ -Block Store API -=============== - -For details on how to use block_store, see :doc:`/users/guides/block_store` - -.. automodule:: openstack.block_store.v2._proxy - -The BlockStore Class --------------------- - -The block_store high-level interface is available through the ``block_store`` -member of a :class:`~openstack.connection.Connection` object. -The ``block_store`` member will only be added if the service is detected. - -.. autoclass:: openstack.block_store.v2._proxy.Proxy - :members: diff --git a/doc/source/users/proxies/cluster.rst b/doc/source/users/proxies/cluster.rst deleted file mode 100644 index 6715fd3837..0000000000 --- a/doc/source/users/proxies/cluster.rst +++ /dev/null @@ -1,14 +0,0 @@ -Cluster API -=========== - -.. automodule:: openstack.cluster.v1._proxy - -The Cluster Class ------------------ - -The cluster high-level interface is available through the ``cluster`` -member of a :class:`~openstack.connection.Connection` object. The -``cluster`` member will only be added if the service is detected. - -.. autoclass:: openstack.cluster.v1._proxy.Proxy - :members: diff --git a/doc/source/users/proxies/compute.rst b/doc/source/users/proxies/compute.rst deleted file mode 100644 index c57a315998..0000000000 --- a/doc/source/users/proxies/compute.rst +++ /dev/null @@ -1,16 +0,0 @@ -Compute API -=========== - -For details on how to use compute, see :doc:`/users/guides/compute` - -.. automodule:: openstack.compute.v2._proxy - -The Compute Class ------------------ - -The compute high-level interface is available through the ``compute`` -member of a :class:`~openstack.connection.Connection` object. The -``compute`` member will only be added if the service is detected. - -.. autoclass:: openstack.compute.v2._proxy.Proxy - :members: diff --git a/doc/source/users/proxies/database.rst b/doc/source/users/proxies/database.rst deleted file mode 100644 index 784eb0999f..0000000000 --- a/doc/source/users/proxies/database.rst +++ /dev/null @@ -1,16 +0,0 @@ -Database API -============ - -For details on how to use database, see :doc:`/users/guides/database` - -.. automodule:: openstack.database.v1._proxy - -The Database Class ------------------- - -The database high-level interface is available through the ``database`` -member of a :class:`~openstack.connection.Connection` object. The -``database`` member will only be added if the service is detected. - -.. autoclass:: openstack.database.v1._proxy.Proxy - :members: diff --git a/doc/source/users/proxies/identity.rst b/doc/source/users/proxies/identity.rst deleted file mode 100644 index 037398caf5..0000000000 --- a/doc/source/users/proxies/identity.rst +++ /dev/null @@ -1,33 +0,0 @@ -Identity API v2 -=============== - -For details on how to use identity, see :doc:`/users/guides/identity` - -.. automodule:: openstack.identity.v2._proxy - -The Identity v2 Class ---------------------- - -The identity high-level interface is available through the ``identity`` -member of a :class:`~openstack.connection.Connection` object. The -``identity`` member will only be added if the service is detected. - -.. autoclass:: openstack.identity.v2._proxy.Proxy - :members: - -Identity API v3 -=============== - -For details on how to use identity, see :doc:`/users/guides/identity` - -.. automodule:: openstack.identity.v3._proxy - -The Identity v3 Class ---------------------- - -The identity high-level interface is available through the ``identity`` -member of a :class:`~openstack.connection.Connection` object. The -``identity`` member will only be added if the service is detected. - -.. autoclass:: openstack.identity.v3._proxy.Proxy - :members: diff --git a/doc/source/users/proxies/image.rst b/doc/source/users/proxies/image.rst deleted file mode 100644 index 0542671e54..0000000000 --- a/doc/source/users/proxies/image.rst +++ /dev/null @@ -1,33 +0,0 @@ -Image API v1 -============ - -For details on how to use image, see :doc:`/users/guides/image` - -.. automodule:: openstack.image.v1._proxy - -The Image v1 Class ------------------- - -The image high-level interface is available through the ``image`` member of a -:class:`~openstack.connection.Connection` object. The ``image`` member will -only be added if the service is detected. - -.. autoclass:: openstack.image.v1._proxy.Proxy - :members: - -Image API v2 -============ - -For details on how to use image, see :doc:`/users/guides/image` - -.. automodule:: openstack.image.v2._proxy - -The Image v2 Class ------------------- - -The image high-level interface is available through the ``image`` member of a -:class:`~openstack.connection.Connection` object. The ``image`` member will -only be added if the service is detected. - -.. autoclass:: openstack.image.v2._proxy.Proxy - :members: diff --git a/doc/source/users/proxies/key_manager.rst b/doc/source/users/proxies/key_manager.rst deleted file mode 100644 index 455e1b5d27..0000000000 --- a/doc/source/users/proxies/key_manager.rst +++ /dev/null @@ -1,18 +0,0 @@ -KeyManager API -============== - -For details on how to use key_management, see -:doc:`/users/guides/key_manager` - -.. automodule:: openstack.key_manager.v1._proxy - -The KeyManager Class --------------------- - -The key_management high-level interface is available through the -``key_manager`` member of a :class:`~openstack.connection.Connection` -object. The ``key_manager`` member will only be added if the service is -detected. - -.. autoclass:: openstack.key_manager.v1._proxy.Proxy - :members: diff --git a/doc/source/users/proxies/network.rst b/doc/source/users/proxies/network.rst deleted file mode 100644 index 5d9a17130e..0000000000 --- a/doc/source/users/proxies/network.rst +++ /dev/null @@ -1,16 +0,0 @@ -Network API -=========== - -For details on how to use network, see :doc:`/users/guides/network` - -.. automodule:: openstack.network.v2._proxy - -The Network Class ------------------ - -The network high-level interface is available through the ``network`` -member of a :class:`~openstack.connection.Connection` object. The -``network`` member will only be added if the service is detected. - -.. autoclass:: openstack.network.v2._proxy.Proxy - :members: diff --git a/doc/source/users/proxies/object_store.rst b/doc/source/users/proxies/object_store.rst deleted file mode 100644 index 3294d8b6aa..0000000000 --- a/doc/source/users/proxies/object_store.rst +++ /dev/null @@ -1,15 +0,0 @@ -Object Store API -================ - -For details on how to use this API, see :doc:`/users/guides/object_store` - -.. automodule:: openstack.object_store.v1._proxy - -The Object Store Class ----------------------- - -The Object Store high-level interface is exposed as the ``object_store`` -object on :class:`~openstack.connection.Connection` objects. - -.. autoclass:: openstack.object_store.v1._proxy.Proxy - :members: diff --git a/doc/source/users/proxies/orchestration.rst b/doc/source/users/proxies/orchestration.rst deleted file mode 100644 index 9621a6479b..0000000000 --- a/doc/source/users/proxies/orchestration.rst +++ /dev/null @@ -1,17 +0,0 @@ -Orchestration API -================= - -For details on how to use orchestration, see :doc:`/users/guides/orchestration` - -.. automodule:: openstack.orchestration.v1._proxy - -The Orchestration Class ------------------------ - -The orchestration high-level interface is available through the -``orchestration`` member of a :class:`~openstack.connection.Connection` -object. The ``orchestration`` member will only be added if the service -is detected. - -.. autoclass:: openstack.orchestration.v1._proxy.Proxy - :members: diff --git a/doc/source/users/proxies/telemetry.rst b/doc/source/users/proxies/telemetry.rst deleted file mode 100644 index ebd8d88dee..0000000000 --- a/doc/source/users/proxies/telemetry.rst +++ /dev/null @@ -1,19 +0,0 @@ -Telemetry API -============= - -.. caution:: - BETA: This API is a work in progress and is subject to change. - -For details on how to use telemetry, see :doc:`/users/guides/telemetry` - -.. automodule:: openstack.telemetry.v2._proxy - -The Telemetry Class -------------------- - -The telemetry high-level interface is available through the ``telemetry`` -member of a :class:`~openstack.connection.Connection` object. The -``telemetry`` member will only be added if the service is detected. - -.. autoclass:: openstack.telemetry.v2._proxy.Proxy - :members: diff --git a/doc/source/users/resource.rst b/doc/source/users/resource.rst deleted file mode 100644 index f40188e182..0000000000 --- a/doc/source/users/resource.rst +++ /dev/null @@ -1,39 +0,0 @@ -**NOTE: This module is being phased out in favor of** -:mod:`openstack.resource2`. **Once all services have been moved over to use -resource2, that module will take this `resource` name.** - -Resource -======== -.. automodule:: openstack.resource - -The prop class --------------- - -.. autoclass:: openstack.resource.prop - :members: - -The Resource class ------------------- - -.. autoclass:: openstack.resource.Resource - :members: - :member-order: bysource - -How path_args are used -********************** - -As :class:`Resource`\s often contain compound :data:`Resource.base_path`\s, -meaning the path is constructed from more than just that string, the -various request methods need a way to fill in the missing parts. -That's where ``path_args`` come in. - -For example:: - - class ServerIP(resource.Resource): - base_path = "/servers/%(server_id)s/ips" - -Making a GET request to obtain server IPs requires the ID of the server -to check. This is handled by passing ``{"server_id": "12345"}`` as the -``path_args`` argument when calling :meth:`Resource.get_by_id`. From there, -the method uses Python's string interpolation to fill in the ``server_id`` -piece of the URL, and then makes the request. diff --git a/doc/source/users/resource2.rst b/doc/source/users/resource2.rst deleted file mode 100644 index bc664213d7..0000000000 --- a/doc/source/users/resource2.rst +++ /dev/null @@ -1,26 +0,0 @@ -**Note: This class is in the process of being applied as the new base class -for resources around the OpenStack SDK. Once that has been completed, -this module will be drop the 2 suffix and be the only resource module.** - -Resource -======== -.. automodule:: openstack.resource2 - -Components ----------- - -.. autoclass:: openstack.resource2.Body - :members: - -.. autoclass:: openstack.resource2.Header - :members: - -.. autoclass:: openstack.resource2.URI - :members: - -The Resource class ------------------- - -.. autoclass:: openstack.resource2.Resource - :members: - :member-order: bysource diff --git a/doc/source/users/resources/block_store/index.rst b/doc/source/users/resources/block_store/index.rst deleted file mode 100644 index ba0be3c1f3..0000000000 --- a/doc/source/users/resources/block_store/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -Block Store Resources -===================== - -.. toctree:: - :maxdepth: 1 - - v2/snapshot - v2/type - v2/volume diff --git a/doc/source/users/resources/block_store/v2/snapshot.rst b/doc/source/users/resources/block_store/v2/snapshot.rst deleted file mode 100644 index e742ffcf82..0000000000 --- a/doc/source/users/resources/block_store/v2/snapshot.rst +++ /dev/null @@ -1,21 +0,0 @@ -openstack.block_store.v2.snapshot -================================= - -.. automodule:: openstack.block_store.v2.snapshot - -The Snapshot Class ------------------- - -The ``Snapshot`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.block_store.v2.snapshot.Snapshot - :members: - -The SnapshotDetail Class ------------------------- - -The ``SnapshotDetail`` class inherits from -:class:`~openstack.block_store.v2.snapshot.Snapshot`. - -.. autoclass:: openstack.block_store.v2.snapshot.SnapshotDetail - :members: diff --git a/doc/source/users/resources/block_store/v2/type.rst b/doc/source/users/resources/block_store/v2/type.rst deleted file mode 100644 index fcab77fe28..0000000000 --- a/doc/source/users/resources/block_store/v2/type.rst +++ /dev/null @@ -1,13 +0,0 @@ -openstack.block_store.v2.type -============================= - -.. automodule:: openstack.block_store.v2.type - -The Type Class --------------- - -The ``Type`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.block_store.v2.type.Type - :members: - diff --git a/doc/source/users/resources/block_store/v2/volume.rst b/doc/source/users/resources/block_store/v2/volume.rst deleted file mode 100644 index 1154525358..0000000000 --- a/doc/source/users/resources/block_store/v2/volume.rst +++ /dev/null @@ -1,21 +0,0 @@ -openstack.block_store.v2.volume -=============================== - -.. automodule:: openstack.block_store.v2.volume - -The Volume Class ----------------- - -The ``Volume`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.block_store.v2.volume.Volume - :members: - -The VolumeDetail Class ----------------------- - -The ``VolumeDetail`` class inherits from -:class:`~openstack.block_store.v2.volume.Volume`. - -.. autoclass:: openstack.block_store.v2.volume.VolumeDetail - :members: diff --git a/doc/source/users/resources/cluster/v1/action.rst b/doc/source/users/resources/cluster/v1/action.rst deleted file mode 100644 index f75deb4243..0000000000 --- a/doc/source/users/resources/cluster/v1/action.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.cluster.v1.action -=========================== - -.. automodule:: openstack.cluster.v1.action - -The Action Class ----------------- - -The ``Action`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.cluster.v1.action.Action - :members: diff --git a/doc/source/users/resources/cluster/v1/build_info.rst b/doc/source/users/resources/cluster/v1/build_info.rst deleted file mode 100644 index 8534e1f7a0..0000000000 --- a/doc/source/users/resources/cluster/v1/build_info.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.cluster.v1.build_info -=============================== - -.. automodule:: openstack.cluster.v1.build_info - -The BuildInfo Class -------------------- - -The ``BuildInfo`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.cluster.v1.build_info.BuildInfo - :members: diff --git a/doc/source/users/resources/cluster/v1/cluster.rst b/doc/source/users/resources/cluster/v1/cluster.rst deleted file mode 100644 index a54ce6cf57..0000000000 --- a/doc/source/users/resources/cluster/v1/cluster.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.cluster.v1.Cluster -============================ - -.. automodule:: openstack.cluster.v1.cluster - -The Cluster Class ------------------ - -The ``Cluster`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.cluster.v1.cluster.Cluster - :members: diff --git a/doc/source/users/resources/cluster/v1/cluster_policy.rst b/doc/source/users/resources/cluster/v1/cluster_policy.rst deleted file mode 100644 index d3a55d541d..0000000000 --- a/doc/source/users/resources/cluster/v1/cluster_policy.rst +++ /dev/null @@ -1,13 +0,0 @@ -openstack.cluster.v1.cluster_policy -=================================== - -.. automodule:: openstack.cluster.v1.cluster_policy - -The ClusterPolicy Class ------------------------ - -The ``ClusterPolicy`` class inherits from -:class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.cluster.v1.cluster_policy.ClusterPolicy - :members: diff --git a/doc/source/users/resources/cluster/v1/event.rst b/doc/source/users/resources/cluster/v1/event.rst deleted file mode 100644 index 29678062af..0000000000 --- a/doc/source/users/resources/cluster/v1/event.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.cluster.v1.event -========================== - -.. automodule:: openstack.cluster.v1.event - -The Event Class ---------------- - -The ``Event`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.cluster.v1.event.Event - :members: diff --git a/doc/source/users/resources/cluster/v1/node.rst b/doc/source/users/resources/cluster/v1/node.rst deleted file mode 100644 index 74f11f3500..0000000000 --- a/doc/source/users/resources/cluster/v1/node.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.cluster.v1.Node -========================= - -.. automodule:: openstack.cluster.v1.node - -The Node Class --------------- - -The ``Node`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.cluster.v1.node.Node - :members: diff --git a/doc/source/users/resources/cluster/v1/policy.rst b/doc/source/users/resources/cluster/v1/policy.rst deleted file mode 100644 index 0fe59378c7..0000000000 --- a/doc/source/users/resources/cluster/v1/policy.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.cluster.v1.policy -=========================== - -.. automodule:: openstack.cluster.v1.policy - -The Policy Class ----------------- - -The ``Policy`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.cluster.v1.policy.Policy - :members: diff --git a/doc/source/users/resources/cluster/v1/policy_type.rst b/doc/source/users/resources/cluster/v1/policy_type.rst deleted file mode 100644 index ee74b31014..0000000000 --- a/doc/source/users/resources/cluster/v1/policy_type.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.cluster.v1.policy_type -================================ - -.. automodule:: openstack.cluster.v1.policy_type - -The PolicyType Class --------------------- - -The ``PolicyType`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.cluster.v1.policy_type.PolicyType - :members: diff --git a/doc/source/users/resources/cluster/v1/profile.rst b/doc/source/users/resources/cluster/v1/profile.rst deleted file mode 100644 index bdf782dcb1..0000000000 --- a/doc/source/users/resources/cluster/v1/profile.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.cluster.v1.profile -============================ - -.. automodule:: openstack.cluster.v1.profile - -The Profile Class ------------------ - -The ``Profile`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.cluster.v1.profile.Profile - :members: diff --git a/doc/source/users/resources/cluster/v1/profile_type.rst b/doc/source/users/resources/cluster/v1/profile_type.rst deleted file mode 100644 index 48c007f0f4..0000000000 --- a/doc/source/users/resources/cluster/v1/profile_type.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.cluster.v1.profile_type -================================= - -.. automodule:: openstack.cluster.v1.profile_type - -The ProfileType Class ---------------------- - -The ``ProfileType`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.cluster.v1.profile_type.ProfileType - :members: diff --git a/doc/source/users/resources/cluster/v1/receiver.rst b/doc/source/users/resources/cluster/v1/receiver.rst deleted file mode 100644 index 8d757d55df..0000000000 --- a/doc/source/users/resources/cluster/v1/receiver.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.cluster.v1.receiver -============================= - -.. automodule:: openstack.cluster.v1.receiver - -The Reciever Class ------------------- - -The ``Reciever`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.cluster.v1.receiver.Receiver - :members: diff --git a/doc/source/users/resources/compute/index.rst b/doc/source/users/resources/compute/index.rst deleted file mode 100644 index 1731a574c9..0000000000 --- a/doc/source/users/resources/compute/index.rst +++ /dev/null @@ -1,16 +0,0 @@ -Compute Resources -================= - -.. toctree:: - :maxdepth: 1 - - v2/extension - v2/flavor - v2/image - v2/keypair - v2/limits - v2/server - v2/server_interface - v2/server_ip - v2/server_meta - v2/server_metadata diff --git a/doc/source/users/resources/compute/v2/flavor.rst b/doc/source/users/resources/compute/v2/flavor.rst deleted file mode 100644 index 9f62f96f22..0000000000 --- a/doc/source/users/resources/compute/v2/flavor.rst +++ /dev/null @@ -1,21 +0,0 @@ -openstack.compute.v2.flavor -============================ - -.. automodule:: openstack.compute.v2.flavor - -The Flavor Class ----------------- - -The ``Flavor`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.compute.v2.flavor.Flavor - :members: - -The FlavorDetail Class ----------------------- - -The ``FlavorDetail`` class inherits from -:class:`~openstack.compute.v2.flavor.Flavor`. - -.. autoclass:: openstack.compute.v2.flavor.FlavorDetail - :members: diff --git a/doc/source/users/resources/compute/v2/limits.rst b/doc/source/users/resources/compute/v2/limits.rst deleted file mode 100644 index ecea1f9462..0000000000 --- a/doc/source/users/resources/compute/v2/limits.rst +++ /dev/null @@ -1,28 +0,0 @@ -openstack.compute.v2.limits -=========================== - -.. automodule:: openstack.compute.v2.limits - -The Limits Class ----------------- - -The ``Limits`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.compute.v2.limits.Limits - :members: - -The AbsoluteLimits Class ------------------------- - -The ``AbsoluteLimits`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.compute.v2.limits.AbsoluteLimits - :members: - -The RateLimits Class --------------------- - -The ``RateLimits`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.compute.v2.limits.RateLimits - :members: diff --git a/doc/source/users/resources/compute/v2/server_meta.rst b/doc/source/users/resources/compute/v2/server_meta.rst deleted file mode 100644 index cd06b56920..0000000000 --- a/doc/source/users/resources/compute/v2/server_meta.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.compute.v2.server_meta -================================ - -.. automodule:: openstack.compute.v2.server_meta - -The ServerMeta Class --------------------- - -The ``ServerMeta`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.compute.v2.server_meta.ServerMeta - :members: diff --git a/doc/source/users/resources/compute/v2/server_metadata.rst b/doc/source/users/resources/compute/v2/server_metadata.rst deleted file mode 100644 index 0248a1eb35..0000000000 --- a/doc/source/users/resources/compute/v2/server_metadata.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.compute.v2.server_metadata -==================================== - -.. automodule:: openstack.compute.v2.server_metadata - -The ServerMetadata Class ------------------------- - -The ``ServerMetadata`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.compute.v2.server_metadata.ServerMetadata - :members: diff --git a/doc/source/users/resources/database/index.rst b/doc/source/users/resources/database/index.rst deleted file mode 100644 index 962aa081d2..0000000000 --- a/doc/source/users/resources/database/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -Database Resources -====================== - -.. toctree:: - :maxdepth: 1 - - v1/database - v1/flavor - v1/instance - v1/user diff --git a/doc/source/users/resources/identity/index.rst b/doc/source/users/resources/identity/index.rst deleted file mode 100644 index b536e8b79c..0000000000 --- a/doc/source/users/resources/identity/index.rst +++ /dev/null @@ -1,26 +0,0 @@ -Identity v2 Resources -===================== - -.. toctree:: - :maxdepth: 1 - - v2/extension - v2/role - v2/tenant - v2/user - -Identity v3 Resources -===================== - -.. toctree:: - :maxdepth: 1 - - v3/credential - v3/domain - v3/endpoint - v3/group - v3/policy - v3/project - v3/service - v3/trust - v3/user diff --git a/doc/source/users/resources/identity/v3/group.rst b/doc/source/users/resources/identity/v3/group.rst deleted file mode 100644 index fe6c4462ae..0000000000 --- a/doc/source/users/resources/identity/v3/group.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.identity.v3.group -=========================== - -.. automodule:: openstack.identity.v3.group - -The Group Class ---------------- - -The ``Group`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.identity.v3.group.Group - :members: diff --git a/doc/source/users/resources/image/index.rst b/doc/source/users/resources/image/index.rst deleted file mode 100644 index 3cd17ed8d8..0000000000 --- a/doc/source/users/resources/image/index.rst +++ /dev/null @@ -1,17 +0,0 @@ -Image v1 Resources -================== - -.. toctree:: - :maxdepth: 1 - - v1/image - -Image v2 Resources -================== - -.. toctree:: - :maxdepth: 1 - - v2/image - v2/member - v2/tag diff --git a/doc/source/users/resources/image/v2/tag.rst b/doc/source/users/resources/image/v2/tag.rst deleted file mode 100644 index 1947fc8272..0000000000 --- a/doc/source/users/resources/image/v2/tag.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.image.v2.tag -====================== - -.. automodule:: openstack.image.v2.tag - -The Tag Class -------------- - -The ``Tag`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.image.v2.tag.Tag - :members: diff --git a/doc/source/users/resources/key_manager/index.rst b/doc/source/users/resources/key_manager/index.rst deleted file mode 100644 index 76b6659c6e..0000000000 --- a/doc/source/users/resources/key_manager/index.rst +++ /dev/null @@ -1,9 +0,0 @@ -KeyManager Resources -==================== - -.. toctree:: - :maxdepth: 1 - - v1/container - v1/order - v1/secret diff --git a/doc/source/users/resources/metric/index.rst b/doc/source/users/resources/metric/index.rst deleted file mode 100644 index 1bfa667db8..0000000000 --- a/doc/source/users/resources/metric/index.rst +++ /dev/null @@ -1,10 +0,0 @@ -Metric Resources -================ - -.. toctree:: - :maxdepth: 1 - - v1/archive_policy - v1/capabilities - v1/metric - v1/resource diff --git a/doc/source/users/resources/metric/v1/archive_policy.rst b/doc/source/users/resources/metric/v1/archive_policy.rst deleted file mode 100644 index 903aaa4f0b..0000000000 --- a/doc/source/users/resources/metric/v1/archive_policy.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.metric.v1.archive_policy -================================== - -.. automodule:: openstack.metric.v1.archive_policy - -The ArchivePolicy Class ------------------------ - -The ``ArchivePolicy`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.metric.v1.archive_policy.ArchivePolicy - :members: diff --git a/doc/source/users/resources/metric/v1/capabilities.rst b/doc/source/users/resources/metric/v1/capabilities.rst deleted file mode 100644 index 571d460fe5..0000000000 --- a/doc/source/users/resources/metric/v1/capabilities.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.metric.v1.capabilities -================================ - -.. automodule:: openstack.metric.v1.capabilities - -The Capabilities Class ----------------------- - -The ``Capabilities`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.metric.v1.capabilities.Capabilities - :members: diff --git a/doc/source/users/resources/metric/v1/metric.rst b/doc/source/users/resources/metric/v1/metric.rst deleted file mode 100644 index 4c4feb5bbf..0000000000 --- a/doc/source/users/resources/metric/v1/metric.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.metric.v1.metric -========================== - -.. automodule:: openstack.metric.v1.metric - -The Metric Class ----------------- - -The ``Metric`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.metric.v1.metric.Metric - :members: diff --git a/doc/source/users/resources/metric/v1/resource.rst b/doc/source/users/resources/metric/v1/resource.rst deleted file mode 100644 index 748fb47cf1..0000000000 --- a/doc/source/users/resources/metric/v1/resource.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.metric.v1.resource -============================ - -.. automodule:: openstack.metric.v1.resource - -The Generic Class ------------------ - -The ``Generic`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.metric.v1.resource.Generic - :members: diff --git a/doc/source/users/resources/network/index.rst b/doc/source/users/resources/network/index.rst deleted file mode 100644 index 21da8cf9f0..0000000000 --- a/doc/source/users/resources/network/index.rst +++ /dev/null @@ -1,38 +0,0 @@ -Network Resources -================= - -.. toctree:: - :maxdepth: 1 - - v2/address_scope - v2/agent - v2/auto_allocated_topology - v2/availability_zone - v2/extension - v2/flavor - v2/floating_ip - v2/health_monitor - v2/listener - v2/load_balancer - v2/metering_label - v2/metering_label_rule - v2/network - v2/network_ip_availability - v2/pool - v2/pool_member - v2/port - v2/qos_bandwidth_limit_rule - v2/qos_dscp_marking_rule - v2/qos_minimum_bandwidth_rule - v2/qos_policy - v2/qos_rule_type - v2/quota - v2/rbac_policy - v2/router - v2/security_group - v2/security_group_rule - v2/segment - v2/service_profile - v2/service_provider - v2/subnet - v2/subnet_pool diff --git a/doc/source/users/resources/network/v2/agent.rst b/doc/source/users/resources/network/v2/agent.rst deleted file mode 100644 index f83856a62b..0000000000 --- a/doc/source/users/resources/network/v2/agent.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.network.v2.network -============================ - -.. automodule:: openstack.network.v2.agent - -The Agent Class ------------------ - -The ``Agent`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.network.v2.agent.Agent - :members: diff --git a/doc/source/users/resources/network/v2/auto_allocated_topology.rst b/doc/source/users/resources/network/v2/auto_allocated_topology.rst deleted file mode 100644 index 828f1a5618..0000000000 --- a/doc/source/users/resources/network/v2/auto_allocated_topology.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.network.v2.auto_allocated_topology -============================================ - -.. automodule:: openstack.network.v2.auto_allocated_topology - -The Auto Allocated Topology Class ---------------------------------- - -The ``Auto Allocated Toplogy`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.network.v2.auto_allocated_topology:AutoAllocatedTopology - :members: diff --git a/doc/source/users/resources/network/v2/availability_zone.rst b/doc/source/users/resources/network/v2/availability_zone.rst deleted file mode 100644 index a33f2b65eb..0000000000 --- a/doc/source/users/resources/network/v2/availability_zone.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.network.v2.availability_zone -====================================== - -.. automodule:: openstack.network.v2.availability_zone - -The AvailabilityZone Class --------------------------- - -The ``AvailabilityZone`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.network.v2.availability_zone.AvailabilityZone - :members: diff --git a/doc/source/users/resources/network/v2/health_monitor.rst b/doc/source/users/resources/network/v2/health_monitor.rst deleted file mode 100644 index 63a029cd12..0000000000 --- a/doc/source/users/resources/network/v2/health_monitor.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.network.v2.health_monitor -=================================== - -.. automodule:: openstack.network.v2.health_monitor - -The HealthMonitor Class ------------------------ - -The ``HealthMonitor`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.network.v2.health_monitor.HealthMonitor - :members: diff --git a/doc/source/users/resources/network/v2/network_ip_availability.rst b/doc/source/users/resources/network/v2/network_ip_availability.rst deleted file mode 100644 index 80bc576a8c..0000000000 --- a/doc/source/users/resources/network/v2/network_ip_availability.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.network.v2.network_ip_availability -=========================================== - -.. automodule:: openstack.network.v2.network_ip_availability - -The NetworkIPAvailability Class -------------------------------- - -The ``NetworkIPAvailability`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.network.v2.network_ip_availability.NetworkIPAvailability - :members: diff --git a/doc/source/users/resources/orchestration/index.rst b/doc/source/users/resources/orchestration/index.rst deleted file mode 100644 index 0af426b4ad..0000000000 --- a/doc/source/users/resources/orchestration/index.rst +++ /dev/null @@ -1,8 +0,0 @@ -Orchestration Resources -======================= - -.. toctree:: - :maxdepth: 1 - - v1/stack - v1/resource diff --git a/doc/source/users/resources/telemetry/index.rst b/doc/source/users/resources/telemetry/index.rst deleted file mode 100644 index f377bc749d..0000000000 --- a/doc/source/users/resources/telemetry/index.rst +++ /dev/null @@ -1,13 +0,0 @@ -Telemetry Resources -=================== - -.. toctree:: - :maxdepth: 1 - - v2/alarm - v2/alarm_change - v2/capability - v2/meter - v2/resource - v2/sample - v2/statistics diff --git a/doc/source/users/resources/telemetry/v2/alarm.rst b/doc/source/users/resources/telemetry/v2/alarm.rst deleted file mode 100644 index 3109d90e5f..0000000000 --- a/doc/source/users/resources/telemetry/v2/alarm.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.telemetry.v2.alarm -============================ - -.. automodule:: openstack.telemetry.v2.alarm - -The Alarm Class ---------------- - -The ``Alarm`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.telemetry.v2.alarm.Alarm - :members: diff --git a/doc/source/users/resources/telemetry/v2/alarm_change.rst b/doc/source/users/resources/telemetry/v2/alarm_change.rst deleted file mode 100644 index 55c8d1e0ca..0000000000 --- a/doc/source/users/resources/telemetry/v2/alarm_change.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.telemetry.v2.alarm_change -=================================== - -.. automodule:: openstack.telemetry.v2.alarm_change - -The AlarmChange Class ---------------------- - -The ``AlarmChange`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.telemetry.v2.alarm_change.AlarmChange - :members: diff --git a/doc/source/users/resources/telemetry/v2/capability.rst b/doc/source/users/resources/telemetry/v2/capability.rst deleted file mode 100644 index c17edd5017..0000000000 --- a/doc/source/users/resources/telemetry/v2/capability.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.telemetry.v2.capability -================================= - -.. automodule:: openstack.telemetry.v2.capability - -The Capability Class --------------------- - -The ``Capability`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.telemetry.v2.capability.Capability - :members: diff --git a/doc/source/users/resources/telemetry/v2/meter.rst b/doc/source/users/resources/telemetry/v2/meter.rst deleted file mode 100644 index b38bc5b6b1..0000000000 --- a/doc/source/users/resources/telemetry/v2/meter.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.telemetry.v2.meter -============================ - -.. automodule:: openstack.telemetry.v2.meter - -The Meter Class ----------------- - -The ``Meter`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.telemetry.v2.meter.Meter - :members: diff --git a/doc/source/users/resources/telemetry/v2/resource.rst b/doc/source/users/resources/telemetry/v2/resource.rst deleted file mode 100644 index f3b33887d5..0000000000 --- a/doc/source/users/resources/telemetry/v2/resource.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.telemetry.v2.resource -=============================== - -.. automodule:: openstack.telemetry.v2.resource - -The Resource Class ------------------- - -The ``Resource`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.telemetry.v2.resource.Resource - :members: diff --git a/doc/source/users/resources/telemetry/v2/sample.rst b/doc/source/users/resources/telemetry/v2/sample.rst deleted file mode 100644 index f2430df27a..0000000000 --- a/doc/source/users/resources/telemetry/v2/sample.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.telemetry.v2.sample -============================= - -.. automodule:: openstack.telemetry.v2.sample - -The Sample Class ----------------- - -The ``Sample`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.telemetry.v2.sample.Sample - :members: diff --git a/doc/source/users/resources/telemetry/v2/statistics.rst b/doc/source/users/resources/telemetry/v2/statistics.rst deleted file mode 100644 index 7e1e6b8a54..0000000000 --- a/doc/source/users/resources/telemetry/v2/statistics.rst +++ /dev/null @@ -1,12 +0,0 @@ -openstack.telemetry.v2.statistics -================================= - -.. automodule:: openstack.telemetry.v2.statistics - -The Statistics Class --------------------- - -The ``Statistics`` class inherits from :class:`~openstack.resource.Resource`. - -.. autoclass:: openstack.telemetry.v2.statistics.Statistics - :members: diff --git a/doc/source/users/service_filter.rst b/doc/source/users/service_filter.rst deleted file mode 100644 index 60910ce6ed..0000000000 --- a/doc/source/users/service_filter.rst +++ /dev/null @@ -1,10 +0,0 @@ -ServiceFilter -============== -.. automodule:: openstack.service_filter - - -ServiceFilter object --------------------- - -.. autoclass:: openstack.service_filter.ServiceFilter - :members: diff --git a/doc/source/users/session.rst b/doc/source/users/session.rst deleted file mode 100644 index 44ac576b20..0000000000 --- a/doc/source/users/session.rst +++ /dev/null @@ -1,10 +0,0 @@ -Session -======= - -.. automodule:: openstack.session - -Session Object --------------- - -.. autoclass:: openstack.session.Session - :members: diff --git a/doc/source/users/utils.rst b/doc/source/users/utils.rst deleted file mode 100644 index f69638e30f..0000000000 --- a/doc/source/users/utils.rst +++ /dev/null @@ -1,4 +0,0 @@ -Utilities -========= -.. automodule:: openstack.utils - :members: enable_logging diff --git a/examples/baremetal/list.py b/examples/baremetal/list.py new file mode 100644 index 0000000000..a5595abd1e --- /dev/null +++ b/examples/baremetal/list.py @@ -0,0 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +List resources from the Bare Metal service. +""" + + +def list_nodes(conn): + print("List Nodes:") + + for node in conn.baremetal.nodes(): + print(node) + + +# TODO(dtantsur): other resources diff --git a/examples/baremetal/provisioning.py b/examples/baremetal/provisioning.py new file mode 100644 index 0000000000..7eb758051c --- /dev/null +++ b/examples/baremetal/provisioning.py @@ -0,0 +1,33 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Operations with the provision state in the Bare Metal service. +""" + + +def manage_and_inspect_node(conn, uuid): + node = conn.baremetal.find_node(uuid) + print('Before:', node.provision_state) + conn.baremetal.set_node_provision_state(node, 'manage') + conn.baremetal.wait_for_nodes_provision_state([node], 'manageable') + conn.baremetal.set_node_provision_state(node, 'inspect') + res = conn.baremetal.wait_for_nodes_provision_state([node], 'manageable') + print('After:', res[0].provision_state) + + +def provide_node(conn, uuid): + node = conn.baremetal.find_node(uuid) + print('Before:', node.provision_state) + conn.baremetal.set_node_provision_state(node, 'provide') + res = conn.baremetal.wait_for_nodes_provision_state([node], 'available') + print('After:', res[0].provision_state) diff --git a/examples/cloud/cleanup-servers.py b/examples/cloud/cleanup-servers.py new file mode 100644 index 0000000000..076415e5e0 --- /dev/null +++ b/examples/cloud/cleanup-servers.py @@ -0,0 +1,26 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import cloud as openstack + +# Initialize and turn on debug logging +openstack.enable_logging(debug=True) + +for cloud_name, region_name in [ + ('my-vexxhost', 'ca-ymq-1'), + ('my-citycloud', 'Buf1'), + ('my-internap', 'ams01'), +]: + # Initialize cloud + cloud = openstack.connect(cloud=cloud_name, region_name=region_name) + for server in cloud.search_servers('my-server'): + cloud.delete_server(server, wait=True, delete_ips=True) diff --git a/examples/cloud/create-server-dict.py b/examples/cloud/create-server-dict.py new file mode 100644 index 0000000000..479923817f --- /dev/null +++ b/examples/cloud/create-server-dict.py @@ -0,0 +1,46 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import cloud as openstack + +# Initialize and turn on debug logging +openstack.enable_logging(debug=True) + +for cloud_name, region_name, image, flavor_id in [ + ( + 'my-vexxhost', + 'ca-ymq-1', + 'Ubuntu 16.04.1 LTS [2017-03-03]', + '5cf64088-893b-46b5-9bb1-ee020277635d', + ), + ( + 'my-citycloud', + 'Buf1', + 'Ubuntu 16.04 Xenial Xerus', + '0dab10b5-42a2-438e-be7b-505741a7ffcc', + ), + ('my-internap', 'ams01', 'Ubuntu 16.04 LTS (Xenial Xerus)', 'A1.4'), +]: + # Initialize cloud + cloud = openstack.connect(cloud=cloud_name, region_name=region_name) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + server = cloud.create_server( + 'my-server', + image=image, + flavor=dict(id=flavor_id), + wait=True, + auto_ip=True, + ) + # Delete it - this is a demo + cloud.delete_server(server, wait=True, delete_ips=True) diff --git a/examples/cloud/create-server-name-or-id.py b/examples/cloud/create-server-name-or-id.py new file mode 100644 index 0000000000..2f3521d63b --- /dev/null +++ b/examples/cloud/create-server-name-or-id.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import cloud as openstack + +# Initialize and turn on debug logging +openstack.enable_logging(debug=True) + +for cloud_name, region_name, image, flavor in [ + ( + 'my-vexxhost', + 'ca-ymq-1', + 'Ubuntu 16.04.1 LTS [2017-03-03]', + 'v1-standard-4', + ), + ('my-citycloud', 'Buf1', 'Ubuntu 16.04 Xenial Xerus', '4C-4GB-100GB'), + ('my-internap', 'ams01', 'Ubuntu 16.04 LTS (Xenial Xerus)', 'A1.4'), +]: + # Initialize cloud + cloud = openstack.connect(cloud=cloud_name, region_name=region_name) + cloud.delete_server('my-server', wait=True, delete_ips=True) + + # Boot a server, wait for it to boot, and then do whatever is needed + # to get a public ip for it. + server = cloud.create_server( + 'my-server', image=image, flavor=flavor, wait=True, auto_ip=True + ) + print(server.name) + print(server['name']) + cloud.pprint(server) + # Delete it - this is a demo + cloud.delete_server(server, wait=True, delete_ips=True) diff --git a/examples/cloud/debug-logging.py b/examples/cloud/debug-logging.py new file mode 100644 index 0000000000..a49061919b --- /dev/null +++ b/examples/cloud/debug-logging.py @@ -0,0 +1,18 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import cloud as openstack + +openstack.enable_logging(debug=True) + +cloud = openstack.connect(cloud='my-vexxhost', region_name='ca-ymq-1') +cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]') diff --git a/examples/cloud/find-an-image.py b/examples/cloud/find-an-image.py new file mode 100644 index 0000000000..11da214d01 --- /dev/null +++ b/examples/cloud/find-an-image.py @@ -0,0 +1,20 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import cloud as openstack + +openstack.enable_logging() + +cloud = openstack.connect(cloud='fuga', region_name='cystack') +cloud.pprint( + [image for image in cloud.list_images() if 'ubuntu' in image.name.lower()] +) diff --git a/examples/cloud/http-debug-logging.py b/examples/cloud/http-debug-logging.py new file mode 100644 index 0000000000..bbe9413cfb --- /dev/null +++ b/examples/cloud/http-debug-logging.py @@ -0,0 +1,18 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import cloud as openstack + +openstack.enable_logging(http_debug=True) + +cloud = openstack.connect(cloud='my-vexxhost', region_name='ca-ymq-1') +cloud.get_image('Ubuntu 16.04.1 LTS [2017-03-03]') diff --git a/examples/cloud/munch-dict-object.py b/examples/cloud/munch-dict-object.py new file mode 100644 index 0000000000..e7730e7fb5 --- /dev/null +++ b/examples/cloud/munch-dict-object.py @@ -0,0 +1,20 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import cloud as openstack + +openstack.enable_logging(debug=True) + +cloud = openstack.connect(cloud='ovh', region_name='SBG1') +image = cloud.get_image('Ubuntu 16.10') +print(image.name) +print(image['name']) diff --git a/examples/cloud/normalization.py b/examples/cloud/normalization.py new file mode 100644 index 0000000000..9c719e1799 --- /dev/null +++ b/examples/cloud/normalization.py @@ -0,0 +1,21 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import cloud as openstack + +openstack.enable_logging() + +cloud = openstack.connect(cloud='fuga', region_name='cystack') +image = cloud.get_image( + 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image' +) +cloud.pprint(image) diff --git a/examples/cloud/server-information.py b/examples/cloud/server-information.py new file mode 100644 index 0000000000..616ed041b5 --- /dev/null +++ b/examples/cloud/server-information.py @@ -0,0 +1,38 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import openstack + +openstack.enable_logging(debug=True) + +cloud = openstack.connect(cloud='my-citycloud', region_name='Buf1') +try: + server = cloud.create_server( + 'my-server', + image='Ubuntu 16.04 Xenial Xerus', + flavor=dict(id='0dab10b5-42a2-438e-be7b-505741a7ffcc'), + wait=True, + auto_ip=True, + ) + + print("\n\nFull Server\n\n") + cloud.pprint(server) + + print("\n\nTurn Detailed Off\n\n") + cloud.pprint(cloud.get_server('my-server', detailed=False)) + + print("\n\nBare Server\n\n") + cloud.pprint(cloud.get_server('my-server', bare=True)) + +finally: + # Delete it - this is a demo + cloud.delete_server(server, wait=True, delete_ips=True) diff --git a/examples/cloud/service-conditional-overrides.py b/examples/cloud/service-conditional-overrides.py new file mode 100644 index 0000000000..77d540e8a3 --- /dev/null +++ b/examples/cloud/service-conditional-overrides.py @@ -0,0 +1,18 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import openstack + +openstack.enable_logging(debug=True) + +cloud = openstack.connect(cloud='rax', region_name='DFW') +print(cloud.has_service('network')) diff --git a/examples/cloud/service-conditionals.py b/examples/cloud/service-conditionals.py new file mode 100644 index 0000000000..f8ca94a22e --- /dev/null +++ b/examples/cloud/service-conditionals.py @@ -0,0 +1,19 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import openstack + +openstack.enable_logging(debug=True) + +cloud = openstack.connect(cloud='kiss', region_name='region1') +print(cloud.has_service('network')) +print(cloud.has_service('container-orchestration')) diff --git a/examples/cloud/strict-mode.py b/examples/cloud/strict-mode.py new file mode 100644 index 0000000000..393af8d1b1 --- /dev/null +++ b/examples/cloud/strict-mode.py @@ -0,0 +1,21 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import openstack + +openstack.enable_logging() + +cloud = openstack.connect(cloud='fuga', region_name='cystack', strict=True) +image = cloud.get_image( + 'Ubuntu 16.04 LTS - Xenial Xerus - 64-bit - Fuga Cloud Based Image' +) +cloud.pprint(image) diff --git a/examples/cloud/upload-large-object.py b/examples/cloud/upload-large-object.py new file mode 100644 index 0000000000..2ac3b84383 --- /dev/null +++ b/examples/cloud/upload-large-object.py @@ -0,0 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import openstack + +openstack.enable_logging(debug=True) + +cloud = openstack.connect(cloud='ovh', region_name='SBG1') +cloud.create_object( + container='my-container', + name='my-object', + filename='/home/mordred/briarcliff.sh3d', + segment_size=1000000, +) +cloud.delete_object('my-container', 'my-object') +cloud.delete_container('my-container') diff --git a/examples/cloud/upload-object.py b/examples/cloud/upload-object.py new file mode 100644 index 0000000000..2ac3b84383 --- /dev/null +++ b/examples/cloud/upload-object.py @@ -0,0 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import openstack + +openstack.enable_logging(debug=True) + +cloud = openstack.connect(cloud='ovh', region_name='SBG1') +cloud.create_object( + container='my-container', + name='my-object', + filename='/home/mordred/briarcliff.sh3d', + segment_size=1000000, +) +cloud.delete_object('my-container', 'my-object') +cloud.delete_container('my-container') diff --git a/examples/cloud/user-agent.py b/examples/cloud/user-agent.py new file mode 100644 index 0000000000..b616dcb4cc --- /dev/null +++ b/examples/cloud/user-agent.py @@ -0,0 +1,20 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import openstack + +openstack.enable_logging(http_debug=True) + +cloud = openstack.connect( + cloud='datacentred', app_name='AmazingApp', app_version='1.0' +) +cloud.list_networks() diff --git a/examples/cluster/policy.py b/examples/cluster/policy.py deleted file mode 100644 index 051b838dbc..0000000000 --- a/examples/cluster/policy.py +++ /dev/null @@ -1,73 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Managing policies in the Cluster service. - -For a full guide see -http://developer.openstack.org/sdks/python/openstacksdk/users/guides/cluster.html -""" - - -def list_policies(conn): - print("List Policies:") - - for policy in conn.cluster.policies(): - print(policy.to_dict()) - - for policy in conn.cluster.policies(sort='name:asc'): - print(policy.to_dict()) - - -def create_policy(conn): - print("Create Policy:") - - spec = { - 'policy': 'senlin.policy.deletion', - 'version': 1.0, - 'properties': { - 'criteria': 'oldest_first', - 'destroy_after_deletion': True, - } - } - - policy = conn.cluster.create_policy('dp01', spec) - print(policy.to_dict()) - - -def get_policy(conn): - print("Get Policy:") - - policy = conn.cluster.get_policy('dp01') - print(policy.to_dict()) - - -def find_policy(conn): - print("Find Policy:") - - policy = conn.cluster.find_policy('dp01') - print(policy.to_dict()) - - -def update_policy(conn): - print("Update Policy:") - - policy = conn.cluster.update_policy('dp01', name='dp02') - print(policy.to_dict()) - - -def delete_policy(conn): - print("Delete Policy:") - - conn.cluster.delete_policy('dp01') - - print("Policy deleted.") diff --git a/examples/cluster/policy_type.py b/examples/cluster/policy_type.py deleted file mode 100644 index f3bb03692a..0000000000 --- a/examples/cluster/policy_type.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Managing policy types in the Cluster service. - -For a full guide see -http://developer.openstack.org/sdks/python/openstacksdk/users/guides/cluster.html -""" - - -def list_policy_types(conn): - print("List Policy Types:") - - for pt in conn.cluster.policy_types(): - print(pt.to_dict()) - - -def get_policy_type(conn): - print("Get Policy Type:") - - pt = conn.cluster.get_policy_type('senlin.policy.deletion-1.0') - - print(pt.to_dict()) diff --git a/examples/cluster/profile.py b/examples/cluster/profile.py deleted file mode 100644 index 6f1709fbb0..0000000000 --- a/examples/cluster/profile.py +++ /dev/null @@ -1,82 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from examples.connect import FLAVOR_NAME -from examples.connect import IMAGE_NAME -from examples.connect import NETWORK_NAME -from examples.connect import SERVER_NAME - -""" -Managing profiles in the Cluster service. - -For a full guide see -http://developer.openstack.org/sdks/python/openstacksdk/users/guides/cluster.html -""" - - -def list_profiles(conn): - print("List Profiles:") - - for profile in conn.cluster.profiles(): - print(profile.to_dict()) - - for profile in conn.cluster.profiles(sort='name:asc'): - print(profile.to_dict()) - - -def create_profile(conn): - print("Create Profile:") - - spec = { - 'profile': 'os.nova.server', - 'version': 1.0, - 'properties': { - 'name': SERVER_NAME, - 'flavor': FLAVOR_NAME, - 'image': IMAGE_NAME, - 'networks': { - 'network': NETWORK_NAME - } - } - } - - profile = conn.cluster.create_profile('os_server', spec) - print(profile.to_dict()) - - -def get_profile(conn): - print("Get Profile:") - - profile = conn.cluster.get_profile('os_server') - print(profile.to_dict()) - - -def find_profile(conn): - print("Find Profile:") - - profile = conn.cluster.find_profile('os_server') - print(profile.to_dict()) - - -def update_profile(conn): - print("Update Profile:") - - profile = conn.cluster.update_profile('os_server', name='old_server') - print(profile.to_dict()) - - -def delete_profile(conn): - print("Delete Profile:") - - conn.cluster.delete_profile('os_server') - - print("Profile deleted.") diff --git a/examples/cluster/profile_type.py b/examples/cluster/profile_type.py deleted file mode 100644 index e1d8ff7f75..0000000000 --- a/examples/cluster/profile_type.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Managing profile types in the Cluster service. - -For a full guide see -http://developer.openstack.org/sdks/python/openstacksdk/users/guides/cluster.html -""" - - -def list_profile_types(conn): - print("List Profile Types:") - - for pt in conn.cluster.profile_types(): - print(pt.to_dict()) - - -def get_profile_type(conn): - print("Get Profile Type:") - - pt = conn.cluster.get_profile_type('os.nova.server-1.0') - - print(pt.to_dict()) diff --git a/examples/cluster/__init__.py b/examples/clustering/__init__.py similarity index 100% rename from examples/cluster/__init__.py rename to examples/clustering/__init__.py diff --git a/examples/clustering/action.py b/examples/clustering/action.py new file mode 100644 index 0000000000..ff0f0b4ed3 --- /dev/null +++ b/examples/clustering/action.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Managing policies in the Cluster service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html +""" + +ACTION_ID = "06ad259b-d6ab-4eb2-a0fa-fb144437eab1" + + +def list_actions(conn): + print("List Actions:") + + for actions in conn.clustering.actions(): + print(actions.to_dict()) + + for actions in conn.clustering.actions(sort='name:asc'): + print(actions.to_dict()) + + +def get_action(conn): + print("Get Action:") + + action = conn.clustering.get_action(ACTION_ID) + print(action.to_dict()) diff --git a/examples/clustering/cluster.py b/examples/clustering/cluster.py new file mode 100644 index 0000000000..ddeb5e99ff --- /dev/null +++ b/examples/clustering/cluster.py @@ -0,0 +1,171 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Managing policies in the Cluster service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html +""" + +CLUSTER_NAME = "Test_Cluster" +CLUSTER_ID = "47d808e5-ce75-4a1e-bfd2-4ed4639e8640" +PROFILE_ID = "b0e3a680-e270-4eb8-9361-e5c9503fba0a" +NODE_ID = "dd803d4a-015d-4223-b15f-db29bad3146c" +POLICY_ID = "c0e3a680-e270-4eb8-9361-e5c9503fba00" + + +def list_cluster(conn): + print("List clusters:") + + for cluster in conn.clustering.clusters(): + print(cluster.to_dict()) + + for cluster in conn.clustering.clusters(sort='name:asc'): + print(cluster.to_dict()) + + +def create_cluster(conn): + print("Create cluster:") + + spec = { + "name": CLUSTER_NAME, + "profile_id": PROFILE_ID, + "min_size": 0, + "max_size": -1, + "desired_capacity": 1, + } + + cluster = conn.clustering.create_cluster(**spec) + print(cluster.to_dict()) + + +def get_cluster(conn): + print("Get cluster:") + + cluster = conn.clustering.get_cluster(CLUSTER_ID) + print(cluster.to_dict()) + + +def find_cluster(conn): + print("Find cluster:") + + cluster = conn.clustering.find_cluster(CLUSTER_ID) + print(cluster.to_dict()) + + +def update_cluster(conn): + print("Update cluster:") + + spec = { + "name": "Test_Cluster001", + "profile_id": "c0e3a680-e270-4eb8-9361-e5c9503fba0a", + "profile_only": True, + } + cluster = conn.clustering.update_cluster(CLUSTER_ID, **spec) + print(cluster.to_dict()) + + +def delete_cluster(conn): + print("Delete cluster:") + + conn.clustering.delete_cluster(CLUSTER_ID) + print("Cluster deleted.") + + # cluster support force delete + conn.clustering.delete_cluster(CLUSTER_ID, False, True) + print("Cluster deleted") + + +def add_nodes_to_cluster(conn): + print("Add nodes to cluster:") + + node_ids = [NODE_ID] + res = conn.clustering.add_nodes_to_cluster(CLUSTER_ID, node_ids) + print(res) + + +def remove_nodes_from_cluster(conn): + print("Remove nodes from a cluster:") + + node_ids = [NODE_ID] + res = conn.clustering.remove_nodes_from_cluster(CLUSTER_ID, node_ids) + print(res) + + +def replace_nodes_in_cluster(conn): + print("Replace the nodes in a cluster with specified nodes:") + + old_node = NODE_ID + new_node = "cd803d4a-015d-4223-b15f-db29bad3146c" + spec = {old_node: new_node} + res = conn.clustering.replace_nodes_in_cluster(CLUSTER_ID, **spec) + print(res) + + +def scale_out_cluster(conn): + print("Inflate the size of a cluster:") + + res = conn.clustering.scale_out_cluster(CLUSTER_ID, 1) + print(res) + + +def scale_in_cluster(conn): + print("Shrink the size of a cluster:") + + res = conn.clustering.scale_in_cluster(CLUSTER_ID, 1) + print(res) + + +def resize_cluster(conn): + print("Resize of cluster:") + + spec = { + 'min_size': 1, + 'max_size': 6, + 'adjustment_type': 'EXACT_CAPACITY', + 'number': 2, + } + res = conn.clustering.resize_cluster(CLUSTER_ID, **spec) + print(res) + + +def attach_policy_to_cluster(conn): + print("Attach policy to a cluster:") + + spec = {'enabled': True} + res = conn.clustering.attach_policy_to_cluster( + CLUSTER_ID, POLICY_ID, **spec + ) + print(res) + + +def detach_policy_from_cluster(conn): + print("Detach a policy from a cluster:") + + res = conn.clustering.detach_policy_from_cluster(CLUSTER_ID, POLICY_ID) + print(res) + + +def check_cluster(conn): + print("Check cluster:") + + res = conn.clustering.check_cluster(CLUSTER_ID) + print(res) + + +def recover_cluster(conn): + print("Recover cluster:") + + spec = {'check': True} + res = conn.clustering.recover_cluster(CLUSTER_ID, **spec) + print(res) diff --git a/examples/clustering/event.py b/examples/clustering/event.py new file mode 100644 index 0000000000..e6f18807ad --- /dev/null +++ b/examples/clustering/event.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Managing policies in the Cluster service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html +""" + +EVENT_ID = "5d982071-76c5-4733-bf35-b9e38a563c99" + + +def list_events(conn): + print("List Events:") + + for events in conn.clustering.events(): + print(events.to_dict()) + + for events in conn.clustering.events(sort='name:asc'): + print(events.to_dict()) + + +def get_event(conn): + print("Get Event:") + + event = conn.clustering.get_event(EVENT_ID) + print(event.to_dict()) diff --git a/examples/clustering/node.py b/examples/clustering/node.py new file mode 100644 index 0000000000..4217a206e8 --- /dev/null +++ b/examples/clustering/node.py @@ -0,0 +1,93 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Managing policies in the Cluster service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html +""" + +NODE_NAME = 'Test_Node' +NODE_ID = 'dd803d4a-015d-4223-b15f-db29bad3146c' +PROFILE_ID = "b0e3a680-e270-4eb8-9361-e5c9503fba0a" + + +def list_nodes(conn): + print("List Nodes:") + + for node in conn.clustering.nodes(): + print(node.to_dict()) + for node in conn.clustering.nodes(sort='asc:name'): + print(node.to_dict()) + + +def create_node(conn): + print("Create Node:") + + spec = { + 'name': NODE_NAME, + 'profile_id': PROFILE_ID, + } + node = conn.clustering.create_node(**spec) + print(node.to_dict()) + + +def get_node(conn): + print("Get Node:") + + node = conn.clustering.get_node(NODE_ID) + print(node.to_dict()) + + +def find_node(conn): + print("Find Node:") + + node = conn.clustering.find_node(NODE_ID) + print(node.to_dict()) + + +def update_node(conn): + print("Update Node:") + + spec = { + 'name': 'Test_Node01', + 'profile_id': 'c0e3a680-e270-4eb8-9361-e5c9503fba0b', + } + + node = conn.clustering.update_node(NODE_ID, **spec) + print(node.to_dict()) + + +def delete_node(conn): + print("Delete Node:") + + conn.clustering.delete_node(NODE_ID) + print("Node deleted.") + # node support force delete + conn.clustering.delete_node(NODE_ID, False, True) + print("Node deleted") + + +def check_node(conn): + print("Check Node:") + + node = conn.clustering.check_node(NODE_ID) + print(node) + + +def recover_node(conn): + print("Recover Node:") + + spec = {'check': True} + node = conn.clustering.recover_node(NODE_ID, **spec) + print(node) diff --git a/examples/clustering/policy.py b/examples/clustering/policy.py new file mode 100644 index 0000000000..da56e63431 --- /dev/null +++ b/examples/clustering/policy.py @@ -0,0 +1,75 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Managing policies in the Cluster service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html +""" + + +def list_policies(conn): + print("List Policies:") + + for policy in conn.clustering.policies(): + print(policy.to_dict()) + + for policy in conn.clustering.policies(sort='name:asc'): + print(policy.to_dict()) + + +def create_policy(conn): + print("Create Policy:") + attrs = { + 'name': 'dp01', + 'spec': { + 'policy': 'senlin.policy.deletion', + 'version': 1.0, + 'properties': { + 'criteria': 'oldest_first', + 'destroy_after_deletion': True, + }, + }, + } + + policy = conn.clustering.create_policy(attrs) + print(policy.to_dict()) + + +def get_policy(conn): + print("Get Policy:") + + policy = conn.clustering.get_policy('dp01') + print(policy.to_dict()) + + +def find_policy(conn): + print("Find Policy:") + + policy = conn.clustering.find_policy('dp01') + print(policy.to_dict()) + + +def update_policy(conn): + print("Update Policy:") + + policy = conn.clustering.update_policy('dp01', name='dp02') + print(policy.to_dict()) + + +def delete_policy(conn): + print("Delete Policy:") + + conn.clustering.delete_policy('dp01') + + print("Policy deleted.") diff --git a/examples/clustering/policy_type.py b/examples/clustering/policy_type.py new file mode 100644 index 0000000000..a5618e418a --- /dev/null +++ b/examples/clustering/policy_type.py @@ -0,0 +1,33 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Managing policy types in the Cluster service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html +""" + + +def list_policy_types(conn): + print("List Policy Types:") + + for pt in conn.clustering.policy_types(): + print(pt.to_dict()) + + +def get_policy_type(conn): + print("Get Policy Type:") + + pt = conn.clustering.get_policy_type('senlin.policy.deletion-1.0') + + print(pt.to_dict()) diff --git a/examples/clustering/profile.py b/examples/clustering/profile.py new file mode 100644 index 0000000000..9fc2c9fcb2 --- /dev/null +++ b/examples/clustering/profile.py @@ -0,0 +1,81 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from examples.connect import FLAVOR_NAME +from examples.connect import IMAGE_NAME +from examples.connect import NETWORK_NAME +from examples.connect import SERVER_NAME + +""" +Managing profiles in the Cluster service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html +""" + + +def list_profiles(conn): + print("List Profiles:") + + for profile in conn.clustering.profiles(): + print(profile.to_dict()) + + for profile in conn.clustering.profiles(sort='name:asc'): + print(profile.to_dict()) + + +def create_profile(conn): + print("Create Profile:") + + spec = { + 'profile': 'os.nova.server', + 'version': 1.0, + 'name': 'os_server', + 'properties': { + 'name': SERVER_NAME, + 'flavor': FLAVOR_NAME, + 'image': IMAGE_NAME, + 'networks': {'network': NETWORK_NAME}, + }, + } + + profile = conn.clustering.create_profile(spec) + print(profile.to_dict()) + + +def get_profile(conn): + print("Get Profile:") + + profile = conn.clustering.get_profile('os_server') + print(profile.to_dict()) + + +def find_profile(conn): + print("Find Profile:") + + profile = conn.clustering.find_profile('os_server') + print(profile.to_dict()) + + +def update_profile(conn): + print("Update Profile:") + + profile = conn.clustering.update_profile('os_server', name='old_server') + print(profile.to_dict()) + + +def delete_profile(conn): + print("Delete Profile:") + + conn.clustering.delete_profile('os_server') + + print("Profile deleted.") diff --git a/examples/clustering/profile_type.py b/examples/clustering/profile_type.py new file mode 100644 index 0000000000..ab84811d56 --- /dev/null +++ b/examples/clustering/profile_type.py @@ -0,0 +1,33 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Managing profile types in the Cluster service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html +""" + + +def list_profile_types(conn): + print("List Profile Types:") + + for pt in conn.clustering.profile_types(): + print(pt.to_dict()) + + +def get_profile_type(conn): + print("Get Profile Type:") + + pt = conn.clustering.get_profile_type('os.nova.server-1.0') + + print(pt.to_dict()) diff --git a/examples/clustering/receiver.py b/examples/clustering/receiver.py new file mode 100644 index 0000000000..2c9a9c4b3e --- /dev/null +++ b/examples/clustering/receiver.py @@ -0,0 +1,76 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Managing policies in the Cluster service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/clustering.html +""" + +FAKE_NAME = 'test_receiver' +CLUSTER_ID = "ae63a10b-4a90-452c-aef1-113a0b255ee3" + + +def list_receivers(conn): + print("List Receivers:") + + for receiver in conn.clustering.receivers(): + print(receiver.to_dict()) + + for receiver in conn.clustering.receivers(sort='name:asc'): + print(receiver.to_dict()) + + +def create_receiver(conn): + print("Create Receiver:") + + # Build the receiver attributes and create the recever. + spec = { + "action": "CLUSTER_SCALE_OUT", + "cluster_id": CLUSTER_ID, + "name": FAKE_NAME, + "params": {"count": "1"}, + "type": "webhook", + } + + receiver = conn.clustering.create_receiver(**spec) + print(receiver.to_dict()) + + +def get_receiver(conn): + print("Get Receiver:") + + receiver = conn.clustering.get_receiver(FAKE_NAME) + print(receiver.to_dict()) + + +def find_receiver(conn): + print("Find Receiver:") + + receiver = conn.clustering.find_receiver(FAKE_NAME) + print(receiver.to_dict()) + + +def update_receiver(conn): + print("Update Receiver:") + + spec = {"name": "test_receiver2", "params": {"count": "2"}} + receiver = conn.clustering.update_receiver(FAKE_NAME, **spec) + print(receiver.to_dict()) + + +def delete_receiver(conn): + print("Delete Receiver:") + + conn.clustering.delete_receiver(FAKE_NAME) + print("Receiver deleted.") diff --git a/examples/compute/create.py b/examples/compute/create.py index c04d898147..dc68af1aef 100644 --- a/examples/compute/create.py +++ b/examples/compute/create.py @@ -24,7 +24,8 @@ """ Create resources with the Compute service. -For a full guide see TODO(etoews):link to docs on developer.openstack.org +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/compute.html """ @@ -45,7 +46,7 @@ def create_keypair(conn): raise e with open(PRIVATE_KEYPAIR_FILE, 'w') as f: - f.write("%s" % keypair.private_key) + f.write(str(keypair.private_key)) os.chmod(PRIVATE_KEYPAIR_FILE, 0o400) @@ -55,17 +56,19 @@ def create_keypair(conn): def create_server(conn): print("Create Server:") - image = conn.compute.find_image(IMAGE_NAME) + image = conn.image.find_image(IMAGE_NAME) flavor = conn.compute.find_flavor(FLAVOR_NAME) network = conn.network.find_network(NETWORK_NAME) keypair = create_keypair(conn) server = conn.compute.create_server( - name=SERVER_NAME, image_id=image.id, flavor_id=flavor.id, - networks=[{"uuid": network.id}], key_name=keypair.name) + name=SERVER_NAME, + image_id=image.id, + flavor_id=flavor.id, + networks=[{"uuid": network.id}], + key_name=keypair.name, + ) server = conn.compute.wait_for_server(server) - print("ssh -i {key} root@{ip}".format( - key=PRIVATE_KEYPAIR_FILE, - ip=server.access_ipv4)) + print(f"ssh -i {PRIVATE_KEYPAIR_FILE} root@{server.access_ipv4}") diff --git a/examples/compute/delete.py b/examples/compute/delete.py index ba34431479..6fce1a3a2e 100644 --- a/examples/compute/delete.py +++ b/examples/compute/delete.py @@ -20,7 +20,8 @@ """ Delete resources with the Compute service. -For a full guide see TODO(etoews):link to docs on developer.openstack.org +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/compute.html """ diff --git a/examples/compute/find.py b/examples/compute/find.py index b009df922c..988b4d970e 100644 --- a/examples/compute/find.py +++ b/examples/compute/find.py @@ -15,14 +15,15 @@ """ Find a resource from the Compute service. -For a full guide see TODO(etoews):link to docs on developer.openstack.org +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/compute.html """ def find_image(conn): print("Find Image:") - image = conn.compute.find_image(examples.connect.IMAGE_NAME) + image = conn.image.find_image(examples.connect.IMAGE_NAME) print(image) diff --git a/examples/compute/list.py b/examples/compute/list.py index 0886f8818a..db53e81f38 100644 --- a/examples/compute/list.py +++ b/examples/compute/list.py @@ -13,7 +13,8 @@ """ List resources from the Compute service. -For a full guide see TODO(etoews):link to docs on developer.openstack.org +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/compute.html """ diff --git a/examples/connect.py b/examples/connect.py index e1ea259dde..4446fe7295 100644 --- a/examples/connect.py +++ b/examples/connect.py @@ -13,30 +13,31 @@ """ Connect to an OpenStack cloud. -For a full guide see TODO(etoews):link to docs on developer.openstack.org +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/connect_from_config.html """ import argparse import os - -import os_client_config - -from openstack import connection -from openstack import profile -from openstack import utils import sys -utils.enable_logging(True, stream=sys.stdout) +import openstack +from openstack.config import loader -#: Defines the OpenStack Client Config (OCC) cloud key in your OCC config -#: file, typically in $HOME/.config/openstack/clouds.yaml. That configuration +openstack.enable_logging(True, stream=sys.stdout) + +#: Defines the OpenStack Config cloud key in your config file, +#: typically in $HOME/.config/openstack/clouds.yaml. That configuration #: will determine where the examples will be run and what resource defaults #: will be used to run the examples. -TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'test_cloud') +TEST_CLOUD = os.getenv('OS_TEST_CLOUD', 'devstack-admin') +EXAMPLE_CONFIG_KEY = os.getenv('OPENSTACKSDK_EXAMPLE_CONFIG_KEY', 'example') +config = loader.OpenStackConfig() +cloud = openstack.connect(cloud=TEST_CLOUD) -class Opts(object): - def __init__(self, cloud_name='test_cloud', debug=False): +class Opts: + def __init__(self, cloud_name='devstack-admin', debug=False): self.cloud = cloud_name self.debug = debug # Use identity v3 API for examples. @@ -44,52 +45,53 @@ def __init__(self, cloud_name='test_cloud', debug=False): def _get_resource_value(resource_key, default): - try: - return cloud.config['example'][resource_key] - except KeyError: - return default + return config.get_extra_config(EXAMPLE_CONFIG_KEY).get( + resource_key, default + ) -occ = os_client_config.OpenStackConfig() -cloud = occ.get_one_cloud(TEST_CLOUD) SERVER_NAME = 'openstacksdk-example' -IMAGE_NAME = _get_resource_value('image_name', 'cirros-0.3.4-x86_64-uec') +IMAGE_NAME = _get_resource_value('image_name', 'cirros-0.4.0-x86_64-disk') FLAVOR_NAME = _get_resource_value('flavor_name', 'm1.small') NETWORK_NAME = _get_resource_value('network_name', 'private') KEYPAIR_NAME = _get_resource_value('keypair_name', 'openstacksdk-example') SSH_DIR = _get_resource_value( - 'ssh_dir', '{home}/.ssh'.format(home=os.path.expanduser("~"))) + 'ssh_dir', '{home}/.ssh'.format(home=os.path.expanduser("~")) +) PRIVATE_KEYPAIR_FILE = _get_resource_value( - 'private_keypair_file', '{ssh_dir}/id_rsa.{key}'.format( - ssh_dir=SSH_DIR, key=KEYPAIR_NAME)) + 'private_keypair_file', + f'{SSH_DIR}/id_rsa.{KEYPAIR_NAME}', +) EXAMPLE_IMAGE_NAME = 'openstacksdk-example-public-image' def create_connection_from_config(): - opts = Opts(cloud_name=TEST_CLOUD) - occ = os_client_config.OpenStackConfig() - cloud = occ.get_one_cloud(opts.cloud) - return connection.from_config(cloud_config=cloud, options=opts) + return openstack.connect(cloud=TEST_CLOUD) def create_connection_from_args(): parser = argparse.ArgumentParser() - config = os_client_config.OpenStackConfig() - config.register_argparse_arguments(parser, sys.argv[1:]) - args = parser.parse_args() - return connection.from_config(options=args) - - -def create_connection(auth_url, region, project_name, username, password): - prof = profile.Profile() - prof.set_region(profile.Profile.ALL, region) - - return connection.Connection( - profile=prof, - user_agent='examples', + return openstack.connect(options=parser) + + +def create_connection( + auth_url, + region, + project_name, + username, + password, + user_domain, + project_domain, +): + return openstack.connect( auth_url=auth_url, project_name=project_name, username=username, - password=password + password=password, + region_name=region, + user_domain_name=user_domain, + project_domain_name=project_domain, + app_name='examples', + app_version='1.0', ) diff --git a/openstack/bare_metal/__init__.py b/examples/dns/__init__.py similarity index 100% rename from openstack/bare_metal/__init__.py rename to examples/dns/__init__.py diff --git a/examples/dns/create.py b/examples/dns/create.py new file mode 100644 index 0000000000..94cd81f08e --- /dev/null +++ b/examples/dns/create.py @@ -0,0 +1,69 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Create resources from the DNS service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/dns.html +""" + + +def create_zone( + conn, + name, + email, + ttl=3600, + description="Default description", + zone_type="PRIMARY", +): + print("Create Zone: ") + + zone = { + "name": name, + "email": email, + "ttl": ttl, + "description": description, + "type": zone_type, + } + + print(conn.dns.create_zone(**zone)) + + +def create_recordset( + conn, + name_or_id, + recordset_name, + recordset_type="A", + records=["192.168.1.1"], + ttl=3600, + description="Default description", +): + print("Create Recordset: ") + + zone = conn.dns.find_zone(name_or_id) + + if not zone: + print("Zone not found.") + return None + + zone_id = zone.id + + recordset_data = { + "name": recordset_name, + "type": recordset_type, + "records": records, + "ttl": ttl, + "description": description, + } + + print(conn.dns.create_recordset(zone_id, **recordset_data)) diff --git a/examples/dns/delete.py b/examples/dns/delete.py new file mode 100644 index 0000000000..9ad8f80f17 --- /dev/null +++ b/examples/dns/delete.py @@ -0,0 +1,47 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Delete resources from the DNS service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/dns.html +""" + + +def delete_zone(conn, name_or_id): + print(f"Delete Zone: {name_or_id}") + + zone = conn.dns.find_zone(name_or_id) + + if zone: + conn.dns.delete_zone(zone.id) + else: + return None + + +def delete_recordset(conn, name_or_id, recordset_name): + print(f"Deleting Recordset: {recordset_name} in Zone: {name_or_id}") + + zone = conn.dns.find_zone(name_or_id) + + if zone: + try: + recordset = conn.dns.find_recordset(zone.id, recordset_name) + if recordset: + conn.dns.delete_recordset(recordset, zone.id) + else: + print("Recordset not found") + except Exception as e: + print(f"{e}") + else: + return None diff --git a/examples/dns/find.py b/examples/dns/find.py new file mode 100644 index 0000000000..b5f0c26d86 --- /dev/null +++ b/examples/dns/find.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Find resources from the DNS service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/dns.html +""" + + +def find_zone(conn, name_or_id): + print(f"Find Zone: {name_or_id}") + + zone = conn.dns.find_zone(name_or_id) + + if zone: + print(zone) + return zone + else: + print("Zone not found.") + return None + + +def find_recordset(conn, name_or_id, recordset_name, recordset_type=None): + print(f"Find Recordset: {recordset_name} in Zone: {name_or_id}") + + zone = conn.dns.find_zone(name_or_id) + + if not zone: + print("Zone not found.") + return None + + zone_id = zone.id + + try: + if recordset_type: + recordset = conn.dns.find_recordset( + zone_id, recordset_name, type=recordset_type + ) + else: + recordset = conn.dns.find_recordset(zone_id, recordset_name) + + if recordset: + print(recordset) + return recordset + else: + print("Recordset not found in Zone.") + return None + + except Exception as e: + print(f"{e}") + return None diff --git a/examples/dns/list.py b/examples/dns/list.py new file mode 100644 index 0000000000..782334c4b9 --- /dev/null +++ b/examples/dns/list.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +List resources from the DNS service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/dns.html +""" + + +def list_zones(conn): + print("List Zones:") + + for zone in conn.dns.zones(): + print(zone) + + +def list_recordsets(conn, name_or_id): + print("List Recordsets for Zone") + + zone = conn.dns.find_zone(name_or_id) + + if zone: + zone_id = zone.id + recordsets = conn.dns.recordsets(zone_id) + + for recordset in recordsets: + print(recordset) + else: + print("Zone not found.") diff --git a/examples/identity/list.py b/examples/identity/list.py index e7662b0438..069480b452 100644 --- a/examples/identity/list.py +++ b/examples/identity/list.py @@ -13,7 +13,8 @@ """ List resources from the Identity service. -For a full guide see TODO(etoews):link to docs on developer.openstack.org +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/identity.html """ @@ -78,3 +79,31 @@ def list_roles(conn): for role in conn.identity.roles(): print(role) + + +def list_role_domain_group_assignments(conn): + print("List Roles assignments for a group on domain:") + + for role in conn.identity.role_domain_group_assignments(): + print(role) + + +def list_role_domain_user_assignments(conn): + print("List Roles assignments for a user on domain:") + + for role in conn.identity.role_project_user_assignments(): + print(role) + + +def list_role_project_group_assignments(conn): + print("List Roles assignments for a group on project:") + + for role in conn.identity.role_project_group_assignments(): + print(role) + + +def list_role_project_user_assignments(conn): + print("List Roles assignments for a user on project:") + + for role in conn.identity.role_project_user_assignments(): + print(role) diff --git a/examples/image/create.py b/examples/image/create.py index 45b4bc7d06..a80fc33f1e 100644 --- a/examples/image/create.py +++ b/examples/image/create.py @@ -16,7 +16,7 @@ Create resources with the Image service. For a full guide see -http://developer.openstack.org/sdks/python/openstacksdk/users/guides/image.html +https://docs.openstack.org/openstacksdk/latest/user/guides/image.html """ diff --git a/examples/image/delete.py b/examples/image/delete.py index dc6560366b..b3eb290f08 100644 --- a/examples/image/delete.py +++ b/examples/image/delete.py @@ -16,7 +16,7 @@ Delete resources with the Image service. For a full guide see -http://developer.openstack.org/sdks/python/openstacksdk/users/guides/image.html +https://docs.openstack.org/openstacksdk/latest/user/guides/image.html """ diff --git a/examples/image/download.py b/examples/image/download.py new file mode 100644 index 0000000000..e62804431d --- /dev/null +++ b/examples/image/download.py @@ -0,0 +1,63 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import hashlib + +""" +Download an image with the Image service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/image.html +""" + + +def download_image_stream(conn): + print("Download Image via streaming:") + + # Find the image you would like to download. + image = conn.image.find_image("myimage") + + # As the actual download now takes place outside of the library + # and in your own code, you are now responsible for checking + # the integrity of the data. Create an MD5 has to be computed + # after all of the data has been consumed. + md5 = hashlib.md5(usedforsecurity=False) + + with open("myimage.qcow2", "wb") as local_image: + response = conn.image.download_image(image, stream=True) + + # Read only 1 MiB of memory at a time until + # all of the image data has been consumed. + for chunk in response.iter_content(chunk_size=1024 * 1024): + # With each chunk, add it to the hash to be computed. + md5.update(chunk) + + local_image.write(chunk) + + # Now that you've consumed all of the data the response gave you, + # ensure that the checksums of what the server offered and + # what you downloaded are the same. + if response.headers["Content-MD5"] != md5.hexdigest(): + raise Exception("Checksum mismatch in downloaded content") + + +def download_image(conn): + print("Download Image:") + + # Find the image you would like to download. + image = conn.image.find_image("myimage") + + with open("myimage.qcow2", "w") as local_image: + response = conn.image.download_image(image) + + # Response will contain the entire contents of the Image. + local_image.write(response) diff --git a/examples/image/import.py b/examples/image/import.py new file mode 100644 index 0000000000..be03176ecc --- /dev/null +++ b/examples/image/import.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from examples.connect import EXAMPLE_IMAGE_NAME + +""" +Create resources with the Image service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/image.html +""" + + +def import_image(conn): + print("Import Image:") + + # Url where glance can download the image + uri = ( + 'https://download.cirros-cloud.net/0.4.0/cirros-0.4.0-x86_64-disk.img' + ) + + # Build the image attributes and import the image. + image_attrs = { + 'name': EXAMPLE_IMAGE_NAME, + 'disk_format': 'qcow2', + 'container_format': 'bare', + 'visibility': 'public', + } + image = conn.image.create_image(**image_attrs) + conn.image.import_image(image, method="web-download", uri=uri) diff --git a/examples/image/list.py b/examples/image/list.py index decb6e5d91..12eecbaa87 100644 --- a/examples/image/list.py +++ b/examples/image/list.py @@ -14,7 +14,7 @@ List resources from the Image service. For a full guide see -http://developer.openstack.org/sdks/python/openstacksdk/users/guides/image.html +https://docs.openstack.org/openstacksdk/latest/user/guides/image.html """ diff --git a/examples/key_manager/create.py b/examples/key_manager/create.py index c45e7dc2e0..b01387ba96 100644 --- a/examples/key_manager/create.py +++ b/examples/key_manager/create.py @@ -18,8 +18,10 @@ def create_secret(conn): print("Create a secret:") - conn.key_manager.create_secret(name="My public key", - secret_type="public", - expiration="2020-02-28T23:59:59", - payload="ssh rsa...", - payload_content_type="text/plain") + conn.key_manager.create_secret( + name="My public key", + secret_type="public", + expiration="2020-02-28T23:59:59", + payload="ssh rsa...", + payload_content_type="text/plain", + ) diff --git a/examples/key_manager/delete.py b/examples/key_manager/delete.py new file mode 100644 index 0000000000..fe01fa9832 --- /dev/null +++ b/examples/key_manager/delete.py @@ -0,0 +1,29 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Delete resources from the Key Manager service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/key_manager.html +""" + + +def delete_secret(conn, name_or_id): + print(f"Delete Secret: {name_or_id}") + + secret = conn.key_manager.find_secret(name_or_id) + + if secret: + conn.key_manager.delete_secret(secret) + else: + print("Secret not found") diff --git a/examples/key_manager/find.py b/examples/key_manager/find.py new file mode 100644 index 0000000000..c8682b1cc3 --- /dev/null +++ b/examples/key_manager/find.py @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Find resources from the Key Manager service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/key_manager.html +""" + + +def find_secret(conn, name_or_id): + print(f"Find Secret: {name_or_id}") + + secret = conn.key_manager.find_secret(name_or_id) + + if secret: + print(secret) + return secret + else: + print("Secret not found") + return None diff --git a/examples/key_manager/list.py b/examples/key_manager/list.py index b74e4df7d1..17f989e414 100644 --- a/examples/key_manager/list.py +++ b/examples/key_manager/list.py @@ -26,6 +26,6 @@ def list_secrets_query(conn): print("List Secrets:") for secret in conn.key_manager.secrets( - secret_type="symmetric", - expiration="gte:2020-01-01T00:00:00"): + secret_type="symmetric", expiration="gte:2020-01-01T00:00:00" + ): print(secret) diff --git a/examples/network/create.py b/examples/network/create.py index f89387b8d2..bb25a81a16 100644 --- a/examples/network/create.py +++ b/examples/network/create.py @@ -13,7 +13,8 @@ """ Create resources with the Network service. -For a full guide see TODO(etoews):link to docs on developer.openstack.org +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/network.html """ @@ -21,7 +22,8 @@ def create_network(conn): print("Create Network:") example_network = conn.network.create_network( - name='openstacksdk-example-project-network') + name='openstacksdk-example-project-network' + ) print(example_network) @@ -30,6 +32,7 @@ def create_network(conn): network_id=example_network.id, ip_version='4', cidr='10.0.2.0/24', - gateway_ip='10.0.2.1') + gateway_ip='10.0.2.1', + ) print(example_subnet) diff --git a/examples/network/delete.py b/examples/network/delete.py index 41e7a3bbdc..b43413ff5c 100644 --- a/examples/network/delete.py +++ b/examples/network/delete.py @@ -13,7 +13,8 @@ """ Delete resources with the Network service. -For a full guide see TODO(etoews):link to docs on developer.openstack.org +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/network.html """ @@ -21,7 +22,8 @@ def delete_network(conn): print("Delete Network:") example_network = conn.network.find_network( - 'openstacksdk-example-project-network') + 'openstacksdk-example-project-network' + ) for example_subnet in example_network.subnet_ids: conn.network.delete_subnet(example_subnet, ignore_missing=False) diff --git a/examples/network/find.py b/examples/network/find.py index 1d9005d138..95d3a118bf 100644 --- a/examples/network/find.py +++ b/examples/network/find.py @@ -15,7 +15,8 @@ """ Find a resource from the Network service. -For a full guide see TODO(etoews):link to docs on developer.openstack.org +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/network.html """ diff --git a/examples/network/list.py b/examples/network/list.py index ff85e1c275..2cedba5bee 100644 --- a/examples/network/list.py +++ b/examples/network/list.py @@ -13,7 +13,8 @@ """ List resources from the Network service. -For a full guide see TODO(etoews):link to docs on developer.openstack.org +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/network.html """ diff --git a/examples/network/security_group_rules.py b/examples/network/security_group_rules.py new file mode 100644 index 0000000000..b9eb8b8a08 --- /dev/null +++ b/examples/network/security_group_rules.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +Create resources with the Network service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/network.html +""" + + +def open_port(conn): + print("Open a port:") + + example_sec_group = conn.network.create_security_group( + name='openstacksdk-example-security-group' + ) + + print(example_sec_group) + + example_rule = conn.network.create_security_group_rule( + security_group_id=example_sec_group.id, + direction='ingress', + remote_ip_prefix='0.0.0.0/0', + protocol='tcp', + port_range_max='443', + port_range_min='443', + ethertype='IPv4', + ) + + print(example_rule) + + +def allow_ping(conn): + print("Allow pings:") + + example_sec_group = conn.network.create_security_group( + name='openstacksdk-example-security-group2' + ) + + print(example_sec_group) + + example_rule = conn.network.create_security_group_rule( + security_group_id=example_sec_group.id, + direction='ingress', + remote_ip_prefix='0.0.0.0/0', + protocol='icmp', + port_range_max=None, + port_range_min=None, + ethertype='IPv4', + ) + + print(example_rule) diff --git a/openstack/bare_metal/v1/__init__.py b/examples/shared_file_system/__init__.py similarity index 100% rename from openstack/bare_metal/v1/__init__.py rename to examples/shared_file_system/__init__.py diff --git a/examples/shared_file_system/availability_zones.py b/examples/shared_file_system/availability_zones.py new file mode 100644 index 0000000000..73c0e3fb59 --- /dev/null +++ b/examples/shared_file_system/availability_zones.py @@ -0,0 +1,24 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +List resources from the Shared File System service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/shared_file_system.html +""" + + +def list_availability_zones(conn): + print("List Shared File System Availability Zones:") + for az in conn.share.availability_zones(): + print(az) diff --git a/examples/shared_file_system/share_group_snapshots.py b/examples/shared_file_system/share_group_snapshots.py new file mode 100644 index 0000000000..33ea2e7c51 --- /dev/null +++ b/examples/shared_file_system/share_group_snapshots.py @@ -0,0 +1,66 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +List resources from the Shared File System service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/shared_file_system.html +""" + + +def list_share_group_snapshots(conn, **query): + print("List all share group snapshots:") + share_group_snapshots = conn.share.share_group_snapshots(**query) + for share_group_snapshot in share_group_snapshots: + print(share_group_snapshot) + + +def get_share_group_snapshot(conn, group_snapshot_id): + print("Show share group snapshot with given Id:") + share_group_snapshot = conn.share.get_share_group_snapshots( + group_snapshot_id + ) + print(share_group_snapshot) + + +def share_group_snapshot_members(conn, group_snapshot_id): + print("Show share group snapshot members with given Id:") + members = conn.share.share_group_snapshot_members(group_snapshot_id) + for member in members: + print(member) + + +def create_share_group_snapshot(conn, share_group_id, **attrs): + print("Creating a share group snapshot from given attributes:") + share_group_snapshot = conn.share.create_share_group_snapshot( + share_group_id, **attrs + ) + print(share_group_snapshot) + + +def reset_share_group_snapshot_status(conn, group_snapshot_id, status): + print("Reseting the share group snapshot status:") + conn.share.reset_share_group_snapshot_status(group_snapshot_id, status) + + +def update_share_group_snapshot(conn, group_snapshot_id, **attrs): + print("Updating a share group snapshot with given Id:") + share_group_snapshot = conn.share.update_share_group_snapshot( + group_snapshot_id, **attrs + ) + print(share_group_snapshot) + + +def delete_share_group_snapshot(conn, group_snapshot_id): + print("Deleting a share group snapshot with given Id:") + conn.share.delete_share_group_snapshot(group_snapshot_id) diff --git a/examples/shared_file_system/share_instances.py b/examples/shared_file_system/share_instances.py new file mode 100644 index 0000000000..972d2b632c --- /dev/null +++ b/examples/shared_file_system/share_instances.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +List resources from the Shared File System service. + +For a full guide see +https://docs.openstack.org/openstacksdk/latest/user/guides/shared_file_system.html +""" + + +def share_instances(conn, **query): + print('List all share instances:') + for si in conn.share.share_instances(**query): + print(si) + + +def get_share_instance(conn, share_instance_id): + print('Get share instance with given Id:') + share_instance = conn.share.get_share_instance(share_instance_id) + print(share_instance) + + +def reset_share_instance_status(conn, share_instance_id, status): + print( + 'Reset the status of the share instance with the given ' + 'share_instance_id to the given status' + ) + conn.share.reset_share_instance_status(share_instance_id, status) + + +def delete_share_instance(conn, share_instance_id): + print('Force-delete the share instance with the given share_instance_id') + conn.share.delete_share_instance(share_instance_id) diff --git a/examples/shared_file_system/share_metadata.py b/examples/shared_file_system/share_metadata.py new file mode 100644 index 0000000000..b1ca00c625 --- /dev/null +++ b/examples/shared_file_system/share_metadata.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def list_share_metadata(conn, share_id): + # Method returns the entire share with the metadata inside it. + returned_share = conn.get_share_metadata(share_id) + + # Access metadata of share + metadata = returned_share['metadata'] + + print("List All Share Metadata:") + for meta_key in metadata: + print(f"{meta_key}={metadata[meta_key]}") + + +def get_share_metadata_item(conn, share_id, key): + # Method returns the entire share with the metadata inside it. + returned_share = conn.get_share_metadata_item(share_id, key) + + # Access metadata of share + metadata = returned_share['metadata'] + + print("Get share metadata item given item key and share id:") + print(metadata[key]) + + +def create_share_metadata(conn, share_id, metadata): + # Method returns the entire share with the metadata inside it. + created_share = conn.create_share_metadata(share_id, metadata) + + # Access metadata of share + metadata = created_share['metadata'] + + print("Metadata created for given share:") + print(metadata) + + +def update_share_metadata(conn, share_id, metadata): + # Method returns the entire share with the metadata inside it. + updated_share = conn.update_share_metadata(share_id, metadata, True) + + # Access metadata of share + metadata = updated_share['metadata'] + + print("Updated metadata for given share:") + print(metadata) + + +def delete_share_metadata(conn, share_id, keys): + # Method doesn't return anything. + conn.delete_share_metadata(share_id, keys) diff --git a/examples/shared_file_system/shares.py b/examples/shared_file_system/shares.py new file mode 100644 index 0000000000..5f274ed7d4 --- /dev/null +++ b/examples/shared_file_system/shares.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +def resize_share(conn, share_id, share_size): + # Be explicit about not wanting to use force if the share + # will be extended. + use_force = False + print('Resize the share to the given size:') + conn.share.resize_share(share_id, share_size, use_force) + + +def resize_shares_without_shrink(conn, min_size): + # Sometimes, extending shares without shrinking + # them (effectively setting a min size) is desirable. + + # Get list of shares from the connection. + shares = conn.share.shares() + + # Loop over the shares: + for share in shares: + # Extend shares smaller than min_size to min_size, + # but don't shrink shares larger than min_size. + conn.share.resize_share(share.id, min_size, no_shrink=True) + + +def manage_share(conn, protocol, export_path, service_host, **params): + # Manage a share with the given protocol, export path, service host, and + # optional additional parameters + managed_share = conn.share.manage_share( + protocol, export_path, service_host, **params + ) + + # Can get the ID of the share, which is now being managed with Manila + managed_share_id = managed_share.id + print("The ID of the share which was managed: %s", managed_share_id) + + +def unmanage_share(conn, share_id): + # Unmanage the share with the given share ID + conn.share.unmanage_share(share_id) + + try: + # Getting the share will raise an exception as it has been unmanaged + conn.share.get_share(share_id) + except Exception: + pass diff --git a/extras/delete-network.sh b/extras/delete-network.sh new file mode 100644 index 0000000000..1d02959eac --- /dev/null +++ b/extras/delete-network.sh @@ -0,0 +1,14 @@ +neutron router-gateway-clear router1 +neutron router-interface-delete router1 +for subnet in private-subnet ipv6-private-subnet ; do + neutron router-interface-delete router1 $subnet + subnet_id=$(neutron subnet-show $subnet -f value -c id) + neutron port-list | grep $subnet_id | awk '{print $2}' | xargs -n1 neutron port-delete + neutron subnet-delete $subnet +done +neutron router-delete router1 +neutron net-delete private + +# Make the public network directly consumable +neutron subnet-update public-subnet --enable-dhcp=True +neutron net-update public --shared=True diff --git a/extras/run-ansible-tests.sh b/extras/run-ansible-tests.sh new file mode 100755 index 0000000000..14ed166f2f --- /dev/null +++ b/extras/run-ansible-tests.sh @@ -0,0 +1,100 @@ +#!/bin/bash +############################################################################# +# run-ansible-tests.sh +# +# Script used to setup a tox environment for running Ansible. This is meant +# to be called by tox (via tox.ini). To run the Ansible tests, use: +# +# tox -e ansible [TAG ...] +# or +# tox -e ansible -- -c cloudX [TAG ...] +# or to use the development version of Ansible: +# tox -e ansible -- -d -c cloudX [TAG ...] +# +# USAGE: +# run-ansible-tests.sh -e ENVDIR [-d] [-c CLOUD] [TAG ...] +# +# PARAMETERS: +# -d Use Ansible source repo development branch. +# -e ENVDIR Directory of the tox environment to use for testing. +# -c CLOUD Name of the cloud to use for testing. +# Defaults to "devstack-admin". +# [TAG ...] Optional list of space-separated tags to control which +# modules are tested. +# +# EXAMPLES: +# # Run all Ansible tests +# run-ansible-tests.sh -e ansible +# +# # Run auth, keypair, and network tests against cloudX +# run-ansible-tests.sh -e ansible -c cloudX auth keypair network +############################################################################# + + +CLOUD="devstack-admin" +ENVDIR= +USE_DEV=0 + +while getopts "c:de:" opt +do + case $opt in + d) USE_DEV=1 ;; + c) CLOUD=${OPTARG} ;; + e) ENVDIR=${OPTARG} ;; + ?) echo "Invalid option: -${OPTARG}" + exit 1;; + esac +done + +if [ -z ${ENVDIR} ] +then + echo "Option -e is required" + exit 1 +fi + +shift $((OPTIND-1)) +TAGS=$( echo "$*" | tr ' ' , ) + +# We need to source the current tox environment so that Ansible will +# be setup for the correct python environment. +source $ENVDIR/bin/activate + +if [ ${USE_DEV} -eq 1 ] +then + if [ -d ${ENVDIR}/ansible ] + then + echo "Using existing Ansible source repo" + else + echo "Installing Ansible source repo at $ENVDIR" + git clone --recursive https://github.com/ansible/ansible.git ${ENVDIR}/ansible + fi + source $ENVDIR/ansible/hacking/env-setup +fi + +# Run the shade Ansible tests +tag_opt="" +if [ ! -z ${TAGS} ] +then + tag_opt="--tags ${TAGS}" +fi + +# Loop through all ANSIBLE_VAR_ environment variables to allow passing the further +for var in $(env | grep -e '^ANSIBLE_VAR_'); do + VAR_NAME=${var%%=*} # split variable name from value + ANSIBLE_VAR_NAME=${VAR_NAME#ANSIBLE_VAR_} # cut ANSIBLE_VAR_ prefix from variable name + ANSIBLE_VAR_NAME=${ANSIBLE_VAR_NAME,,} # lowercase ansible variable + ANSIBLE_VAR_VALUE=${!VAR_NAME} # Get the variable value + ANSIBLE_VARS+="${ANSIBLE_VAR_NAME}=${ANSIBLE_VAR_VALUE} " # concat variables +done + +# Until we have a module that lets us determine the image we want from +# within a playbook, we have to find the image here and pass it in. +# We use the openstack client instead of nova client since it can use clouds.yaml. +IMAGE=`openstack --os-cloud=${CLOUD} image list -f value -c Name | grep cirros | grep -v -e ramdisk -e kernel` +if [ $? -ne 0 ] +then + echo "Failed to find Cirros image" + exit 1 +fi + +ansible-playbook -vvv ./openstack/tests/ansible/run.yml -e "cloud=${CLOUD} image=${IMAGE} ${ANSIBLE_VARS}" ${tag_opt} diff --git a/include-acceptance-regular-user.txt b/include-acceptance-regular-user.txt new file mode 100644 index 0000000000..62773e7673 --- /dev/null +++ b/include-acceptance-regular-user.txt @@ -0,0 +1,12 @@ +# This file contains list of tests that can work with regular user privileges +# Until all tests are modified to properly identify whether they are able to +# run or must skip the ones that are known to work are listed here. +### Block Storage +openstack.tests.functional.block_storage.v3.test_volume +# Do not enable test_backup for now, since it is not capable to determine +# backup capabilities of the cloud +# openstack.tests.functional.block_storage.v3.test_backup +### Cloud +openstack.tests.functional.cloud +### Network +openstack.tests.functional.network diff --git a/openstack/__init__.py b/openstack/__init__.py index f23de53122..5c4920f531 100644 --- a/openstack/__init__.py +++ b/openstack/__init__.py @@ -1,16 +1,109 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. # -# http://www.apache.org/licenses/LICENSE-2.0 +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import connection # NOQA -from openstack import exceptions # NOQA -from openstack import profile # NOQA -from openstack import utils # NOQA +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +"""The openstack SDK. + +:py:mod:`openstacksdk` is a client library for building applications to work +with OpenStack clouds. The project aims to provide a consistent and complete +set of interactions with OpenStack's many services, along with complete +documentation, examples, and tools. + +There are three ways to interact with :py:mod:`openstacksdk`. The *clouds +layer*, the *proxy layer*, and the *resource layer*. Most users will make use +of either the *cloud layer* or *proxy layer*. + +Listing flavours using the *cloud layer*:: + + >>> import openstack + >>> conn = openstack.connect(cloud='mordred') + >>> for server in conn.list_servers(): + ... print(server.to_dict()) + +Listing servers using the *proxy layer*:: + + >>> import openstack + >>> conn = openstack.connect(cloud='mordred') + >>> for server in conn.compute.servers(): + ... print(server.to_dict()) + +Listing servers using the *resource layer*:: + + >>> import openstack + >>> import openstack.compute.v2.server + >>> conn = openstack.connect(cloud='mordred') + >>> for server in openstack.compute.v2.server.Server.list( + ... session=conn.compute, + ... ): + ... print(server.to_dict()) + +For more information, refer to the documentation found in each submodule. +""" + +import argparse +import typing as ty + +from openstack._log import enable_logging +import openstack.config +import openstack.connection + +__all__ = [ + 'connect', + 'enable_logging', +] + + +def connect( + cloud: str | None = None, + app_name: str | None = None, + app_version: str | None = None, + options: argparse.ArgumentParser | None = None, + load_yaml_config: bool = True, + load_envvars: bool = True, + **kwargs: ty.Any, +) -> openstack.connection.Connection: + """Create a :class:`~openstack.connection.Connection` + + :param string cloud: + The name of the configuration to load from clouds.yaml. Defaults + to 'envvars' which will load configuration settings from environment + variables that start with ``OS_``. + :param argparse.ArgumentParser options: + An argparse ArgumentParser object. SDK-specific options will be + registered, parsed out and used to configure the connection. + :param bool load_yaml_config: + Whether or not to load config settings from clouds.yaml files. + Defaults to True. + :param bool load_envvars: + Whether or not to load config settings from environment variables. + Defaults to True. + :param kwargs: + Additional configuration options. + + :returns: openstack.connnection.Connection + :raises: keystoneauth1.exceptions.MissingRequiredOptions + on missing required auth parameters + """ + cloud_region = openstack.config.get_cloud_region( + cloud=cloud, + app_name=app_name, + app_version=app_version, + load_yaml_config=load_yaml_config, + load_envvars=load_envvars, + options=options, + **kwargs, + ) + return openstack.connection.Connection( + config=cloud_region, + vendor_hook=kwargs.get('vendor_hook'), + ) diff --git a/openstack/__main__.py b/openstack/__main__.py new file mode 100644 index 0000000000..631429cd4a --- /dev/null +++ b/openstack/__main__.py @@ -0,0 +1,43 @@ +# Copyright (c) 2018 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import sys + +import pbr.version + + +def show_version(args): + print( + "OpenstackSDK Version {}".format( + pbr.version.VersionInfo('openstacksdk').version_string_with_vcs() + ) + ) + + +parser = argparse.ArgumentParser(description="Openstack SDK") +subparsers = parser.add_subparsers(title='commands', dest='command') + +cmd_version = subparsers.add_parser( + 'version', help='show Openstack SDK version' +) +cmd_version.set_defaults(func=show_version) + +args = parser.parse_args() + +if not args.command: + parser.print_help() + sys.exit(1) + +args.func(args) diff --git a/openstack/_hacking/checks.py b/openstack/_hacking/checks.py new file mode 100644 index 0000000000..9bae7e45c9 --- /dev/null +++ b/openstack/_hacking/checks.py @@ -0,0 +1,65 @@ +# Copyright (c) 2019, Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import re + +from hacking import core + +""" +Guidelines for writing new hacking checks + + - Use only for openstacksdk specific tests. OpenStack general tests + should be submitted to the common 'hacking' module. + - Pick numbers in the range O3xx. Find the current test with + the highest allocated number and then pick the next value. + - Keep the test method code in the source file ordered based + on the O3xx value. + - List the new rule in the top level HACKING.rst file + - Add test cases for each new rule to openstack/tests/unit/test_hacking.py + +""" + +SETUPCLASS_RE = re.compile(r"def setUpClass\(") + + +@core.flake8ext +def assert_no_setupclass(logical_line): + """Check for use of setUpClass + + O300 + """ + if SETUPCLASS_RE.match(logical_line): + yield (0, "O300: setUpClass not allowed") + + +@core.flake8ext +def assert_no_deprecated_exceptions(logical_line, filename): + """Check for use of deprecated cloud-layer exceptions + + 0310 + """ + if filename.endswith(os.path.join('openstack', 'cloud', 'exc.py')): + return + + for exception in ( + 'OpenStackCloudCreateException', + 'OpenStackCloudTimeout', + 'OpenStackCloudHTTPError', + 'OpenStackCloudBadRequest', + 'OpenStackCloudURINotFound', + 'OpenStackCloudResourceNotFound', + ): + if re.search(rf'\b{exception}\b', logical_line): + yield (0, 'O310: Use of deprecated Exception class') diff --git a/openstack/_log.py b/openstack/_log.py new file mode 100644 index 0000000000..d9d2a842df --- /dev/null +++ b/openstack/_log.py @@ -0,0 +1,139 @@ +# Copyright (c) 2015 IBM Corp. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import logging +import sys +import typing as ty + + +def setup_logging( + name: str, + handlers: list[logging.Handler] | None = None, + level: int | None = None, +) -> logging.Logger: + """Set up logging for a named logger. + + Gets and initializes a named logger, ensuring it at least has a + `logging.NullHandler` attached. + + :param str name: + Name of the logger. + :param list handlers: + A list of `logging.Handler` objects to attach to the logger. + :param int level: + Log level to set the logger at. + + :returns: A `logging.Logger` object that can be used to emit log messages. + """ + handlers = handlers or [] + log = logging.getLogger(name) + if len(log.handlers) == 0 and not handlers: + log.addHandler(logging.NullHandler()) + for h in handlers: + log.addHandler(h) + if level: + log.setLevel(level) + return log + + +def enable_logging( + debug: bool = False, + http_debug: bool = False, + path: str | None = None, + stream: ty.TextIO | None = None, + format_stream: bool = False, + format_template: str = '%(asctime)s %(levelname)s: %(name)s %(message)s', + handlers: list[logging.Handler] | None = None, +) -> None: + """Enable logging output. + + Helper function to enable logging. This function is available for + debugging purposes and for folks doing simple applications who want an + easy 'just make it work for me'. For more complex applications or for + those who want more flexibility, the standard library ``logging`` package + will receive these messages in any handlers you create. + + :param bool debug: + Set this to ``True`` to receive debug messages. + :param bool http_debug: + Set this to ``True`` to receive debug messages including + HTTP requests and responses. This implies ``debug=True``. + :param str path: + If a *path* is specified, logging output will written to that file + in addition to sys.stderr. + The path is passed to logging.FileHandler, which will append messages + the file (and create it if needed). + :param stream: + One of ``None `` or ``sys.stdout`` or ``sys.stderr``. + If it is ``None``, nothing is logged to a stream. + If it isn't ``None``, console output is logged to this stream. + :param bool format_stream: + If format_stream is False, the default, apply ``format_template`` to + ``path`` but not to ``stream`` outputs. If True, apply + ``format_template`` to ``stream`` outputs as well. + :param str format_template: + Template to pass to :class:`logging.Formatter`. + + :rtype: None + """ + if not stream and not path: + stream = sys.stderr + + if http_debug: + debug = True + if debug: + level = logging.DEBUG + else: + level = logging.INFO + + formatter = logging.Formatter(format_template) + + if handlers: + for handler in handlers: + handler.setFormatter(formatter) + + else: + handlers = [] + + if stream is not None: + console = logging.StreamHandler(stream) + if format_stream: + console.setFormatter(formatter) + handlers.append(console) + + if path is not None: + file_handler = logging.FileHandler(path) + file_handler.setFormatter(formatter) + handlers.append(file_handler) + + setup_logging('openstack', handlers=handlers, level=level) + setup_logging('keystoneauth', handlers=handlers, level=level) + + # Turn off logging on these so that if loggers higher in the tree + # are more verbose we only get what we want out of the SDK. This is + # particularly useful when combined with tools like ansible which set + # debug logging level at the logging root. + # If more complex logging is desired including stevedore debug logging, + # enable_logging should not be used and instead python logging should + # be configured directly. + setup_logging( + 'urllib3', handlers=[logging.NullHandler()], level=logging.INFO + ) + setup_logging( + 'stevedore', handlers=[logging.NullHandler()], level=logging.INFO + ) + # Suppress warning about keystoneauth loggers + setup_logging('keystoneauth.discovery') + setup_logging('keystoneauth.identity.base') + setup_logging('keystoneauth.identity.generic.base') diff --git a/openstack/_services_mixin.py b/openstack/_services_mixin.py new file mode 100644 index 0000000000..7df757f7a0 --- /dev/null +++ b/openstack/_services_mixin.py @@ -0,0 +1,211 @@ +# Generated file, to change, run tools/print-services.py +import typing as ty + +from openstack import service_description +from openstack.accelerator import accelerator_service +from openstack.baremetal import baremetal_service +from openstack.baremetal_introspection import baremetal_introspection_service +from openstack.block_storage import block_storage_service +from openstack.clustering import clustering_service +from openstack.compute import compute_service +from openstack.container_infrastructure_management import ( + container_infrastructure_management_service, +) +from openstack.database import database_service +from openstack.dns import dns_service +from openstack.identity import identity_service +from openstack.image import image_service +from openstack.instance_ha import instance_ha_service +from openstack.key_manager import key_manager_service +from openstack.load_balancer import load_balancer_service +from openstack.message import message_service +from openstack.network import network_service +from openstack.object_store import object_store_service +from openstack.orchestration import orchestration_service +from openstack.placement import placement_service +from openstack.shared_file_system import shared_file_system_service +from openstack.workflow import workflow_service + +if ty.TYPE_CHECKING: + # the noqa is necessary as 'proxy' is only referenced in string subscripts + # and ruff doesn't scan for name usage since they're not in annotation + # positions + from openstack import proxy # noqa: F401 + + +class ServicesMixin: + identity = identity_service.IdentityService(service_type='identity') + + compute = compute_service.ComputeService(service_type='compute') + + image = image_service.ImageService(service_type='image') + + load_balancer = load_balancer_service.LoadBalancerService( + service_type='load-balancer' + ) + + object_store = object_store_service.ObjectStoreService( + service_type='object-store' + ) + + clustering = clustering_service.ClusteringService( + service_type='clustering' + ) + resource_cluster = clustering + cluster = clustering + + data_processing = service_description.ServiceDescription['proxy.Proxy']( + service_type='data-processing' + ) + + baremetal = baremetal_service.BaremetalService(service_type='baremetal') + bare_metal = baremetal + + baremetal_introspection = ( + baremetal_introspection_service.BaremetalIntrospectionService( + service_type='baremetal-introspection' + ) + ) + + key_manager = key_manager_service.KeyManagerService( + service_type='key-manager' + ) + + resource_optimization = service_description.ServiceDescription[ + 'proxy.Proxy' + ](service_type='resource-optimization') + infra_optim = resource_optimization + + message = message_service.MessageService(service_type='message') + messaging = message + + application_catalog = service_description.ServiceDescription[ + 'proxy.Proxy' + ](service_type='application-catalog') + + container_infrastructure_management = container_infrastructure_management_service.ContainerInfrastructureManagementService( + service_type='container-infrastructure-management' + ) + container_infra = container_infrastructure_management + container_infrastructure = container_infrastructure_management + + search = service_description.ServiceDescription['proxy.Proxy']( + service_type='search' + ) + + dns = dns_service.DnsService(service_type='dns') + + workflow = workflow_service.WorkflowService(service_type='workflow') + + rating = service_description.ServiceDescription['proxy.Proxy']( + service_type='rating' + ) + + operator_policy = service_description.ServiceDescription['proxy.Proxy']( + service_type='operator-policy' + ) + policy = operator_policy + + shared_file_system = shared_file_system_service.SharedFilesystemService( + service_type='shared-file-system' + ) + share = shared_file_system + + data_protection_orchestration = service_description.ServiceDescription[ + 'proxy.Proxy' + ](service_type='data-protection-orchestration') + + orchestration = orchestration_service.OrchestrationService( + service_type='orchestration' + ) + + block_storage = block_storage_service.BlockStorageService( + service_type='block-storage' + ) + block_store = block_storage + volume = block_storage + + alarm = service_description.ServiceDescription['proxy.Proxy']( + service_type='alarm' + ) + alarming = alarm + + meter = service_description.ServiceDescription['proxy.Proxy']( + service_type='meter' + ) + metering = meter + telemetry = meter + + event = service_description.ServiceDescription['proxy.Proxy']( + service_type='event' + ) + events = event + + application_deployment = service_description.ServiceDescription[ + 'proxy.Proxy' + ](service_type='application-deployment') + application_deployment = application_deployment + + multi_region_network_automation = service_description.ServiceDescription[ + 'proxy.Proxy' + ](service_type='multi-region-network-automation') + tricircle = multi_region_network_automation + + database = database_service.DatabaseService(service_type='database') + + application_container = service_description.ServiceDescription[ + 'proxy.Proxy' + ](service_type='application-container') + container = application_container + + root_cause_analysis = service_description.ServiceDescription[ + 'proxy.Proxy' + ](service_type='root-cause-analysis') + rca = root_cause_analysis + + nfv_orchestration = service_description.ServiceDescription['proxy.Proxy']( + service_type='nfv-orchestration' + ) + + network = network_service.NetworkService(service_type='network') + + backup = service_description.ServiceDescription['proxy.Proxy']( + service_type='backup' + ) + + monitoring_logging = service_description.ServiceDescription['proxy.Proxy']( + service_type='monitoring-logging' + ) + monitoring_log_api = monitoring_logging + + monitoring = service_description.ServiceDescription['proxy.Proxy']( + service_type='monitoring' + ) + + monitoring_events = service_description.ServiceDescription['proxy.Proxy']( + service_type='monitoring-events' + ) + + placement = placement_service.PlacementService(service_type='placement') + + instance_ha = instance_ha_service.InstanceHaService( + service_type='instance-ha' + ) + ha = instance_ha + + reservation = service_description.ServiceDescription['proxy.Proxy']( + service_type='reservation' + ) + + function_engine = service_description.ServiceDescription['proxy.Proxy']( + service_type='function-engine' + ) + + accelerator = accelerator_service.AcceleratorService( + service_type='accelerator' + ) + + admin_logic = service_description.ServiceDescription['proxy.Proxy']( + service_type='admin-logic' + ) + registration = admin_logic diff --git a/openstack/block_store/__init__.py b/openstack/accelerator/__init__.py similarity index 100% rename from openstack/block_store/__init__.py rename to openstack/accelerator/__init__.py diff --git a/openstack/accelerator/accelerator_service.py b/openstack/accelerator/accelerator_service.py new file mode 100644 index 0000000000..00a5b817c5 --- /dev/null +++ b/openstack/accelerator/accelerator_service.py @@ -0,0 +1,24 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.accelerator.v2 import _proxy +from openstack import service_description + + +class AcceleratorService( + service_description.ServiceDescription[_proxy.Proxy], +): + """The accelerator service.""" + + supported_versions = { + '2': _proxy.Proxy, + } diff --git a/openstack/block_store/v2/__init__.py b/openstack/accelerator/v2/__init__.py similarity index 100% rename from openstack/block_store/v2/__init__.py rename to openstack/accelerator/v2/__init__.py diff --git a/openstack/accelerator/v2/_proxy.py b/openstack/accelerator/v2/_proxy.py new file mode 100644 index 0000000000..6d82d0e8b5 --- /dev/null +++ b/openstack/accelerator/v2/_proxy.py @@ -0,0 +1,320 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +from openstack.accelerator.v2 import accelerator_request as _arq +from openstack.accelerator.v2 import attribute as _attribute +from openstack.accelerator.v2 import deployable as _deployable +from openstack.accelerator.v2 import device as _device +from openstack.accelerator.v2 import device_profile as _device_profile +from openstack import proxy +from openstack import resource + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['2']] = '2' + + # ========== Deployables ========== + + def deployables(self, **query): + """Retrieve a generator of deployables. + + :param kwargs query: Optional query parameters to be sent to + restrict the deployables to be returned. + :returns: A generator of deployable instances. + """ + return self._list(_deployable.Deployable, **query) + + def get_deployable(self, uuid, fields=None): + """Get a single deployable. + + :param uuid: The value can be the UUID of a deployable. + :returns: One :class:`~openstack.accelerator.v2.deployable.Deployable` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + deployable matching the criteria could be found. + """ + return self._get(_deployable.Deployable, uuid) + + def update_deployable(self, uuid, patch): + """Reconfig the FPGA with new bitstream. + + :param uuid: The value can be the UUID of a deployable + :param patch: The information to reconfig. + :returns: The results of FPGA reconfig. + """ + return self._get_resource(_deployable.Deployable, uuid).patch( + self, patch + ) + + # ========== Devices ========== + + def devices(self, **query): + """Retrieve a generator of devices. + + :param kwargs query: Optional query parameters to be sent to + restrict the devices to be returned. Available parameters include: + + * hostname: The hostname of the device. + * type: The type of the device. + * vendor: The vendor ID of the device. + * sort: A list of sorting keys separated by commas. Each sorting + key can optionally be attached with a sorting direction + modifier which can be ``asc`` or ``desc``. + * limit: Requests a specified size of returned items from the + query. Returns a number of items up to the specified limit + value. + * marker: Specifies the ID of the last-seen item. Use the limit + parameter to make an initial limited request and use the ID of + the last-seen item from the response as the marker parameter + value in a subsequent limited request. + :returns: A generator of device instances. + """ + return self._list(_device.Device, **query) + + def get_device(self, uuid, fields=None): + """Get a single device. + + :param uuid: The value can be the UUID of a device. + :returns: One :class:`~openstack.accelerator.v2.device.Device` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + device matching the criteria could be found. + """ + return self._get(_device.Device, uuid) + + # ========== Device profiles ========== + + def device_profiles(self, **query): + """Retrieve a generator of device profiles. + + :param kwargs query: Optional query parameters to be sent to + restrict the device profiles to be returned. + :returns: A generator of device profile instances. + """ + return self._list(_device_profile.DeviceProfile, **query) + + def create_device_profile(self, **attrs): + """Create a device_profile. + + :param kwargs attrs: a list of device_profiles. + :returns: The list of created device profiles + """ + return self._create(_device_profile.DeviceProfile, **attrs) + + def delete_device_profile(self, device_profile, ignore_missing=True): + """Delete a device profile + + :param device_profile: The value can be either the ID of a device + profile or a + :class:`~openstack.accelerator.v2.device_profile.DeviceProfile` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the device profile does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent device profile. + :returns: ``None`` + """ + return self._delete( + _device_profile.DeviceProfile, + device_profile, + ignore_missing=ignore_missing, + ) + + def get_device_profile(self, uuid, fields=None): + """Get a single device profile. + + :param uuid: The value can be the UUID of a device profile. + :returns: One :class: + `~openstack.accelerator.v2.device_profile.DeviceProfile` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + device profile matching the criteria could be found. + """ + return self._get(_device_profile.DeviceProfile, uuid) + + # ========== Accelerator requests ========== + + def accelerator_requests(self, **query): + """Retrieve a generator of accelerator requests. + + :param kwargs query: Optional query parameters to be sent to + restrict the accelerator requests to be returned. + :returns: A generator of accelerator request instances. + """ + return self._list(_arq.AcceleratorRequest, **query) + + def create_accelerator_request(self, **attrs): + """Create an ARQs for a single device profile. + + :param kwargs attrs: request body. + :returns: The created accelerator request instance. + """ + return self._create(_arq.AcceleratorRequest, **attrs) + + def delete_accelerator_request( + self, + accelerator_request, + ignore_missing=True, + ): + """Delete a device profile + + :param device_profile: The value can be either the ID of a device + profile or a + :class:`~openstack.accelerator.v2.device_profile.DeviceProfile` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the device profile does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + accelerator request. + :returns: ``None`` + """ + return self._delete( + _arq.AcceleratorRequest, + accelerator_request, + ignore_missing=ignore_missing, + ) + + def get_accelerator_request(self, uuid, fields=None): + """Get a single accelerator request. + + :param uuid: The value can be the UUID of a accelerator request. + :returns: One :class: + `~openstack.accelerator.v2.accelerator_request.AcceleratorRequest` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + accelerator request matching the criteria could be found. + """ + return self._get(_arq.AcceleratorRequest, uuid) + + def update_accelerator_request(self, uuid, properties): + """Bind/Unbind an accelerator to VM. + + :param uuid: The uuid of the accelerator_request to be bound/unbound. + :param properties: The info of VM + that will bind/unbind the accelerator. + :returns: True if bind/unbind succeeded, False otherwise. + """ + return self._get_resource(_arq.AcceleratorRequest, uuid).patch( + self, properties + ) + + # ========== Attributes ========== + + def attributes(self, **query): + """Retrieve a generator of attributes. + + :param kwargs query: Optional query parameters to be sent to + restrict the attributes to be returned. + :returns: A generator of attribute instances. + """ + return self._list(_attribute.Attribute, **query) + + def create_attribute(self, **attrs): + """Create a attribute. + + :param kwargs attrs: a list of attributes. + :returns: The list of created attributes + """ + return self._create(_attribute.Attribute, **attrs) + + def delete_attribute(self, attribute, ignore_missing=True): + """Delete a attribute + + :param attribute: The value can be either the ID of a attributes or a + :class:`~openstack.accelerator.v2.attribute.Attributes` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.ResourceNotFound` will be + raised when the device profile does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent device profile. + :returns: ``None`` + """ + return self._delete( + _attribute.Attribute, + attribute, + ignore_missing=ignore_missing, + ) + + def get_attribute(self, uuid, fields=None): + """Get a single device profile. + + :param uuid: The value can be the UUID of a attribute. + :returns: One :class: + `~openstack.accelerator.v2.attribute.Attribute` + :raises: :class:`~openstack.exceptions.ResourceNotFound` when no + device profile matching the criteria could be found. + """ + return self._get(_attribute.Attribute, uuid) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/accelerator/v2/accelerator_request.py b/openstack/accelerator/v2/accelerator_request.py new file mode 100644 index 0000000000..35b0940fcd --- /dev/null +++ b/openstack/accelerator/v2/accelerator_request.py @@ -0,0 +1,112 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource + + +class AcceleratorRequest(resource.Resource): + resource_key = 'arq' + resources_key = 'arqs' + base_path = '/accelerator_requests' + + # capabilities + allow_create = True + allow_fetch = True + allow_delete = True + allow_list = True + #: Allow patch operation for binding. + allow_patch = True + + #: The device address associated with this ARQ (if any) + attach_handle_info = resource.Body('attach_handle_info') + #: The type of attach handle (e.g. PCI, mdev...) + attach_handle_type = resource.Body('attach_handle_type') + #: The name of the device profile + device_profile_name = resource.Body('device_profile_name') + #: The id of the device profile group + device_profile_group_id = resource.Body('device_profile_group_id') + #: The UUID of the bound device RP (if any) + device_rp_uuid = resource.Body('device_rp_uuid') + #: The host name to which ARQ is bound. (if any) + hostname = resource.Body('hostname') + #: The UUID of the instance associated with this ARQ (if any) + instance_uuid = resource.Body('instance_uuid') + #: The state of the ARQ + state = resource.Body('state') + #: The UUID of the ARQ + uuid = resource.Body('uuid', alternate_id=True) + + def _convert_patch(self, patch): + # This overrides the default behavior of _convert_patch because + # the PATCH method consumes JSON, its key is the ARQ uuid + # and its value is an ordinary JSON patch. spec: + # https://specs.openstack.org/openstack/cyborg-specs/specs/train/implemented/cyborg-api + + converted = super()._convert_patch(patch) + converted = {self.id: converted} + return converted + + def patch( + self, + session, + patch=None, + prepend_key=True, + has_body=True, + retry_on_conflict=None, + base_path=None, + *, + microversion=None, + ): + # This overrides the default behavior of patch because + # the PATCH method consumes a dict rather than a list. spec: + # https://specs.openstack.org/openstack/cyborg-specs/specs/train/implemented/cyborg-api + + # The id cannot be dirty for an commit + self._body._dirty.discard("id") + + # Only try to update if we actually have anything to commit. + if not patch and not self.requires_commit: + return self + + if not self.allow_patch: + raise exceptions.MethodNotSupported(self, "patch") + + request = self._prepare_request( + prepend_key=prepend_key, base_path=base_path, patch=True + ) + microversion = self._get_microversion(session) + if patch: + request.body = self._convert_patch(patch) + + return self._commit( + session, + request, + 'PATCH', + microversion, + has_body=has_body, + retry_on_conflict=retry_on_conflict, + ) + + def _consume_attrs(self, mapping, attrs): + # This overrides the default behavior of _consume_attrs because + # cyborg api returns an ARQ as list. spec: + # https://specs.openstack.org/openstack/cyborg-specs/specs/train/implemented/cyborg-api + if isinstance(self, AcceleratorRequest): + if self.resources_key in attrs: + attrs = attrs[self.resources_key][0] + return super()._consume_attrs(mapping, attrs) + + def create(self, session, prepend_key=False, *args, **kwargs): + # This overrides the default behavior of resource creation because + # cyborg doesn't accept resource_key in its request. + return super().create(session, prepend_key, *args, **kwargs) diff --git a/openstack/accelerator/v2/attribute.py b/openstack/accelerator/v2/attribute.py new file mode 100644 index 0000000000..f27273c9cd --- /dev/null +++ b/openstack/accelerator/v2/attribute.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack import resource + + +class Attribute(resource.Resource): + resource_key = 'attribute' + resources_key = 'attributes' + base_path = '/attributes' + # capabilities + allow_create = True + allow_fetch = True + allow_commit = False + allow_delete = True + allow_list = True + + #: The timestamp when this attribute was created. + created_at = resource.Body('created_at') + #: The deployable_id of the attribute + deployable_id = resource.Body('deployable_id') + #: The key of the attribute + key = resource.Body('key') + #: The value of the attribute + value = resource.Body('value') + #: The timestamp when this attribute was updated. + updated_at = resource.Body('updated_at') + #: The uuid of the attribute + uuid = resource.Body('uuid', alternate_id=True) diff --git a/openstack/accelerator/v2/deployable.py b/openstack/accelerator/v2/deployable.py new file mode 100644 index 0000000000..910caa3e98 --- /dev/null +++ b/openstack/accelerator/v2/deployable.py @@ -0,0 +1,80 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack import exceptions +from openstack import resource + + +class Deployable(resource.Resource): + resource_key = 'deployable' + resources_key = 'deployables' + base_path = '/deployables' + # capabilities + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = True + allow_patch = True + #: The timestamp when this deployable was created. + created_at = resource.Body('created_at') + #: The device_id of the deployable. + device_id = resource.Body('device_id') + #: The UUID of the deployable. + id = resource.Body('uuid', alternate_id=True) + #: The name of the deployable. + name = resource.Body('name') + #: The num_accelerator of the deployable. + num_accelerators = resource.Body('num_accelerators') + #: The parent_id of the deployable. + parent_id = resource.Body('parent_id') + #: The root_id of the deployable. + root_id = resource.Body('root_id') + #: The timestamp when this deployable was updated. + updated_at = resource.Body('updated_at') + + def _commit( + self, + session, + request, + method, + microversion, + has_body=True, + retry_on_conflict=None, + ): + session = self._get_session(session) + kwargs = {} + retriable_status_codes = set(session.retriable_status_codes or ()) + if retry_on_conflict: + kwargs['retriable_status_codes'] = retriable_status_codes | {409} + elif retry_on_conflict is not None and retriable_status_codes: + # The baremetal proxy defaults to retrying on conflict, allow + # overriding it via an explicit retry_on_conflict=False. + kwargs['retriable_status_codes'] = retriable_status_codes - {409} + + try: + call = getattr(session, method.lower()) + except AttributeError: + raise exceptions.ResourceFailure( + f"Invalid commit method: {method}" + ) + + request.url = request.url + "/program" + response = call( + request.url, + json=request.body, + headers=request.headers, + microversion=microversion, + **kwargs, + ) + self.microversion = microversion + self._translate_response(response, has_body=has_body) + return self diff --git a/openstack/accelerator/v2/device.py b/openstack/accelerator/v2/device.py new file mode 100644 index 0000000000..73799669dc --- /dev/null +++ b/openstack/accelerator/v2/device.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack import resource + + +class Device(resource.Resource): + resource_key = 'device' + resources_key = 'devices' + base_path = '/devices' + # capabilities + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = True + #: The timestamp when this device was created. + created_at = resource.Body('created_at') + #: The hostname of the device. + hostname = resource.Body('hostname') + #: The ID of the device. + id = resource.Body('id') + #: The model of the device. + model = resource.Body('model') + #: The std board information of the device. + std_board_info = resource.Body('std_board_info') + #: The type of the device. + type = resource.Body('type') + #: The timestamp when this device was updated. + updated_at = resource.Body('updated_at') + #: The UUID of the device. + uuid = resource.Body('uuid', alternate_id=True) + #: The vendor ID of the device. + vendor = resource.Body('vendor') + #: The vendor board information of the device. + vendor_board_info = resource.Body('vendor_board_info') diff --git a/openstack/accelerator/v2/device_profile.py b/openstack/accelerator/v2/device_profile.py new file mode 100644 index 0000000000..dfa8a85ec8 --- /dev/null +++ b/openstack/accelerator/v2/device_profile.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack import resource + + +class DeviceProfile(resource.Resource): + resource_key = 'device_profile' + resources_key = 'device_profiles' + base_path = '/device_profiles' + # capabilities + allow_create = True + allow_fetch = True + allow_commit = False + allow_delete = True + allow_list = True + + #: The timestamp when this device_profile was created. + created_at = resource.Body('created_at') + #: The description of the device profile + description = resource.Body('description') + #: The groups of the device profile + groups = resource.Body('groups') + #: The name of the device profile + name = resource.Body('name') + #: The timestamp when this device_profile was updated. + updated_at = resource.Body('updated_at') + #: The uuid of the device profile + uuid = resource.Body('uuid', alternate_id=True) + + # TODO(s_shogo): This implementation only treat [ DeviceProfile ], and + # cannot treat multiple DeviceProfiles in list. + def _prepare_request_body( + self, + patch, + prepend_key, + *, + resource_request_key=None, + ): + body = super()._prepare_request_body( + patch, prepend_key, resource_request_key=resource_request_key + ) + return [body] + + def create(self, session, prepend_key=False, *args, **kwargs): + # This overrides the default behavior of resource creation because + # cyborg doesn't accept resource_key in its request. + return super().create(session, prepend_key, *args, **kwargs) diff --git a/openstack/accelerator/version.py b/openstack/accelerator/version.py new file mode 100644 index 0000000000..692230a198 --- /dev/null +++ b/openstack/accelerator/version.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import resource + + +class Version(resource.Resource): + resource_key = 'version' + resources_key = 'versions' + base_path = '/' + + # capabilities + allow_list = True + + # Properties + links = resource.Body('links') + status = resource.Body('status') diff --git a/openstack/bare_metal/bare_metal_service.py b/openstack/bare_metal/bare_metal_service.py deleted file mode 100644 index 71c3bca599..0000000000 --- a/openstack/bare_metal/bare_metal_service.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import service_filter - - -class BareMetalService(service_filter.ServiceFilter): - """The bare metal service.""" - - valid_versions = [service_filter.ValidVersion('v1')] - - def __init__(self, version=None): - """Create a bare metal service.""" - super(BareMetalService, self).__init__(service_type='baremetal', - version=version) diff --git a/openstack/bare_metal/v1/_proxy.py b/openstack/bare_metal/v1/_proxy.py deleted file mode 100644 index f9f8e5f337..0000000000 --- a/openstack/bare_metal/v1/_proxy.py +++ /dev/null @@ -1,490 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.bare_metal.v1 import chassis as _chassis -from openstack.bare_metal.v1 import driver as _driver -from openstack.bare_metal.v1 import node as _node -from openstack.bare_metal.v1 import port as _port -from openstack.bare_metal.v1 import port_group as _portgroup -from openstack import proxy2 - - -class Proxy(proxy2.BaseProxy): - - def chassis(self, details=False, **query): - """Retrieve a generator of chassis. - - :param details: A boolean indicating whether the detailed information - for every chassis should be returned. - :param dict query: Optional query parameters to be sent to - restrict the chassis to be returned. Available parameters include: - - * ``fields``: A list containing one or more fields to be returned - in the response. This may lead to some performance gain - because other fields of the resource are not refreshed. - * ``limit``: Requests at most the specified number of items be - returned from the query. - * ``marker``: Specifies the ID of the last-seen chassis. Use the - ``limit`` parameter to make an initial limited request and - use the ID of the last-seen chassis from the response as - the ``marker`` value in a subsequent limited request. - * ``sort_dir``: Sorts the response by the requested sort direction. - A valid value is ``asc`` (ascending) or ``desc`` - (descending). Default is ``asc``. You can specify multiple - pairs of sort key and sort direction query parameters. If - you omit the sort direction in a pair, the API uses the - natural sorting direction of the server attribute that is - provided as the ``sort_key``. - * ``sort_key``: Sorts the response by the this attribute value. - Default is ``id``. You can specify multiple pairs of sort - key and sort direction query parameters. If you omit the - sort direction in a pair, the API uses the natural sorting - direction of the server attribute that is provided as the - ``sort_key``. - - :returns: A generator of chassis instances. - """ - cls = _chassis.ChassisDetail if details else _chassis.Chassis - return self._list(cls, paginated=True, **query) - - def create_chassis(self, **attrs): - """Create a new chassis from attributes. - - :param dict attrs: Keyword arguments that will be used to create a - :class:`~openstack.bare_metal.v1.chassis.Chassis`, it comprised - of the properties on the ``Chassis`` class. - - :returns: The results of chassis creation. - :rtype: :class:`~openstack.bare_metal.v1.chassis.Chassis`. - """ - return self._create(_chassis.Chassis, **attrs) - - def find_chassis(self, name_or_id, ignore_missing=True): - """Find a single chassis. - - :param str name_or_id: The name or ID of a chassis. - :param bool ignore_missing: When set to ``False``, an exception of - :class:`~openstack.exceptions.ResourceNotFound` will be raised - when the chassis does not exist. When set to `True``, None will - be returned when attempting to find a nonexistent chassis. - :returns: One :class:`~openstack.bare_metal.v1.chassis.Chassis` object - or None. - """ - return self._find(_chassis.Chassis, name_or_id, - ignore_missing=ignore_missing) - - def get_chassis(self, chassis): - """Get a specific chassis. - - :param chassis: The value can be the name or ID of a chassis or a - :class:`~openstack.bare_metal.v1.chassis.Chassis` instance. - - :returns: One :class:`~openstack.bare_metal.v1.chassis.Chassis` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - chassis matching the name or ID could be found. - """ - return self._get(_chassis.Chassis, chassis) - - def update_chassis(self, chassis, **attrs): - """Update a chassis. - - :param chassis: Either the name or the ID of a chassis, or an instance - of :class:`~openstack.bare_metal.v1.chassis.Chassis`. - :param dict attrs: The attributes to update on the chassis represented - by the ``chassis`` parameter. - - :returns: The updated chassis. - :rtype: :class:`~openstack.bare_metal.v1.chassis.Chassis` - """ - return self._update(_chassis.Chassis, chassis, **attrs) - - def delete_chassis(self, chassis, ignore_missing=True): - """Delete a chassis. - - :param chassis: The value can be either the name or ID of a chassis or - a :class:`~openstack.bare_metal.v1.chassis.Chassis` instance. - :param bool ignore_missing: When set to ``False``, an exception - :class:`~openstack.exceptions.ResourceNotFound` will be raised - when the chassis could not be found. When set to ``True``, no - exception will be raised when attempting to delete a non-existent - chassis. - - :returns: The instance of the chassis which was deleted. - :rtype: :class:`~openstack.bare_metal.v1.chassis.Chassis`. - """ - return self._delete(_chassis.Chassis, chassis, - ignore_missing=ignore_missing) - - def drivers(self): - """Retrieve a generator of drivers. - - :returns: A generator of driver instances. - """ - return self._list(_driver.Driver, paginated=False) - - def get_driver(self, driver): - """Get a specific driver. - - :param driver: The value can be the name of a driver or a - :class:`~openstack.bare_metal.v1.driver.Driver` instance. - - :returns: One :class:`~openstack.bare_metal.v1.driver.Driver` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - driver matching the name could be found. - """ - return self._get(_driver.Driver, driver) - - def nodes(self, details=False, **query): - """Retrieve a generator of nodes. - - :param details: A boolean indicating whether the detailed information - for every node should be returned. - :param dict query: Optional query parameters to be sent to restrict - the nodes returned. Available parameters include: - - * ``associated``: Only return those which are, or are not, - associated with an ``instance_id``. - * ``driver``: Only return those with the specified ``driver``. - * ``fields``: A list containing one or more fields to be returned - in the response. This may lead to some performance gain - because other fields of the resource are not refreshed. - * ``instance_id``: Only return the node with this specific instance - UUID or an empty set if not found. - * ``is_maintenance``: Only return those with ``maintenance`` set to - ``True`` or ``False``. - * ``limit``: Requests at most the specified number of nodes be - returned from the query. - * ``marker``: Specifies the ID of the last-seen node. Use the - ``limit`` parameter to make an initial limited request and - use the ID of the last-seen node from the response as - the ``marker`` value in a subsequent limited request. - * ``provision_state``: Only return those nodes with the specified - ``provision_state``. - * ``sort_dir``: Sorts the response by the requested sort direction. - A valid value is ``asc`` (ascending) or ``desc`` - (descending). Default is ``asc``. You can specify multiple - pairs of sort key and sort direction query parameters. If - you omit the sort direction in a pair, the API uses the - natural sorting direction of the server attribute that is - provided as the ``sort_key``. - * ``sort_key``: Sorts the response by the this attribute value. - Default is ``id``. You can specify multiple pairs of sort - key and sort direction query parameters. If you omit the - sort direction in a pair, the API uses the natural sorting - direction of the server attribute that is provided as the - ``sort_key``. - - :returns: A generator of node instances. - """ - cls = _node.NodeDetail if details else _node.Node - return self._list(cls, paginated=True, **query) - - def create_node(self, **attrs): - """Create a new node from attributes. - - :param dict attrs: Keyword arguments that will be used to create a - :class:`~openstack.bare_metal.v1.node.Node`, it comprised - of the properties on the ``Node`` class. - - :returns: The results of node creation. - :rtype: :class:`~openstack.bare_metal.v1.node.Node`. - """ - return self._create(_node.Node, **attrs) - - def find_node(self, name_or_id, ignore_missing=True): - """Find a single node. - - :param str name_or_id: The name or ID of a node. - :param bool ignore_missing: When set to ``False``, an exception of - :class:`~openstack.exceptions.ResourceNotFound` will be raised - when the node does not exist. When set to `True``, None will - be returned when attempting to find a nonexistent node. - :returns: One :class:`~openstack.bare_metal.v1.node.Node` object - or None. - """ - return self._find(_node.Node, name_or_id, - ignore_missing=ignore_missing) - - def get_node(self, node): - """Get a specific node. - - :param node: The value can be the name or ID of a chassis or a - :class:`~openstack.bare_metal.v1.node.Node` instance. - - :returns: One :class:`~openstack.bare_metal.v1.node.Node` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - node matching the name or ID could be found. - """ - return self._get(_node.Node, node) - - def update_node(self, node, **attrs): - """Update a node. - - :param chassis: Either the name or the ID of a node or an instance - of :class:`~openstack.bare_metal.v1.node.Node`. - :param dict attrs: The attributes to update on the node represented - by the ``node`` parameter. - - :returns: The updated node. - :rtype: :class:`~openstack.bare_metal.v1.node.Node` - """ - return self._update(_node.Node, node, **attrs) - - def delete_node(self, node, ignore_missing=True): - """Delete a node. - - :param node: The value can be either the name or ID of a node or - a :class:`~openstack.bare_metal.v1.node.Node` instance. - :param bool ignore_missing: When set to ``False``, an exception - :class:`~openstack.exceptions.ResourceNotFound` will be raised - when the node could not be found. When set to ``True``, no - exception will be raised when attempting to delete a non-existent - node. - - :returns: The instance of the node which was deleted. - :rtype: :class:`~openstack.bare_metal.v1.node.Node`. - """ - return self._delete(_node.Node, node, ignore_missing=ignore_missing) - - def ports(self, details=False, **query): - """Retrieve a generator of ports. - - :param details: A boolean indicating whether the detailed information - for every port should be returned. - :param dict query: Optional query parameters to be sent to restrict - the ports returned. Available parameters include: - - * ``address``: Only return ports with the specified physical - hardware address, typically a MAC address. - * ``driver``: Only return those with the specified ``driver``. - * ``fields``: A list containing one or more fields to be returned - in the response. This may lead to some performance gain - because other fields of the resource are not refreshed. - * ``limit``: Requests at most the specified number of ports be - returned from the query. - * ``marker``: Specifies the ID of the last-seen port. Use the - ``limit`` parameter to make an initial limited request and - use the ID of the last-seen port from the response as - the ``marker`` value in a subsequent limited request. - * ``node``:only return the ones associated with this specific node - (name or UUID), or an empty set if not found. - * ``node_id``:only return the ones associated with this specific - node UUID, or an empty set if not found. - * ``portgroup``: only return the ports associated with this - specific Portgroup (name or UUID), or an empty set if not - found. Added in API microversion 1.24. - * ``sort_dir``: Sorts the response by the requested sort direction. - A valid value is ``asc`` (ascending) or ``desc`` - (descending). Default is ``asc``. You can specify multiple - pairs of sort key and sort direction query parameters. If - you omit the sort direction in a pair, the API uses the - natural sorting direction of the server attribute that is - provided as the ``sort_key``. - * ``sort_key``: Sorts the response by the this attribute value. - Default is ``id``. You can specify multiple pairs of sort - key and sort direction query parameters. If you omit the - sort direction in a pair, the API uses the natural sorting - direction of the server attribute that is provided as the - ``sort_key``. - - :returns: A generator of port instances. - """ - cls = _port.PortDetail if details else _port.Port - return self._list(cls, paginated=True, **query) - - def create_port(self, **attrs): - """Create a new port from attributes. - - :param dict attrs: Keyword arguments that will be used to create a - :class:`~openstack.bare_metal.v1.port.Port`, it comprises of the - properties on the ``Port`` class. - - :returns: The results of port creation. - :rtype: :class:`~openstack.bare_metal.v1.port.Port`. - """ - return self._create(_port.Port, **attrs) - - def find_port(self, name_or_id, ignore_missing=True): - """Find a single port. - - :param str name_or_id: The name or ID of a port. - :param bool ignore_missing: When set to ``False``, an exception of - :class:`~openstack.exceptions.ResourceNotFound` will be raised - when the port does not exist. When set to `True``, None will - be returned when attempting to find a nonexistent port. - :returns: One :class:`~openstack.bare_metal.v1.port.Port` object - or None. - """ - return self._find(_port.Port, name_or_id, - ignore_missing=ignore_missing) - - def get_port(self, port, **query): - """Get a specific port. - - :param port: The value can be the name or ID of a chassis or a - :class:`~openstack.bare_metal.v1.port.Port` instance. - :param dict query: Optional query parameters to be sent to restrict - the port properties returned. Available parameters include: - - * ``fields``: A list containing one or more fields to be returned - in the response. This may lead to some performance gain - because other fields of the resource are not refreshed. - - :returns: One :class:`~openstack.bare_metal.v1.port.Port` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - port matching the name or ID could be found. - """ - return self._get(_port.Port, port, **query) - - def update_port(self, port, **attrs): - """Update a port. - - :param chassis: Either the name or the ID of a port or an instance - of :class:`~openstack.bare_metal.v1.port.Port`. - :param dict attrs: The attributes to update on the port represented - by the ``port`` parameter. - - :returns: The updated port. - :rtype: :class:`~openstack.bare_metal.v1.port.Port` - """ - return self._update(_port.Port, port, **attrs) - - def delete_port(self, port, ignore_missing=True): - """Delete a port. - - :param port: The value can be either the name or ID of a port or - a :class:`~openstack.bare_metal.v1.port.Port` instance. - :param bool ignore_missing: When set to ``False``, an exception - :class:`~openstack.exceptions.ResourceNotFound` will be raised - when the port could not be found. When set to ``True``, no - exception will be raised when attempting to delete a non-existent - port. - - :returns: The instance of the port which was deleted. - :rtype: :class:`~openstack.bare_metal.v1.port.Port`. - """ - return self._delete(_port.Port, port, ignore_missing=ignore_missing) - - def portgroups(self, details=False, **query): - """Retrieve a generator of portgroups. - - :param details: A boolean indicating whether the detailed information - for every portgroup should be returned. - :param dict query: Optional query parameters to be sent to restrict - the portgroups returned. Available parameters include: - - * ``address``: Only return portgroups with the specified physical - hardware address, typically a MAC address. - * ``fields``: A list containing one or more fields to be returned - in the response. This may lead to some performance gain - because other fields of the resource are not refreshed. - * ``limit``: Requests at most the specified number of portgroups - returned from the query. - * ``marker``: Specifies the ID of the last-seen portgroup. Use the - ``limit`` parameter to make an initial limited request and - use the ID of the last-seen portgroup from the response as - the ``marker`` value in a subsequent limited request. - * ``node``:only return the ones associated with this specific node - (name or UUID), or an empty set if not found. - * ``sort_dir``: Sorts the response by the requested sort direction. - A valid value is ``asc`` (ascending) or ``desc`` - (descending). Default is ``asc``. You can specify multiple - pairs of sort key and sort direction query parameters. If - you omit the sort direction in a pair, the API uses the - natural sorting direction of the server attribute that is - provided as the ``sort_key``. - * ``sort_key``: Sorts the response by the this attribute value. - Default is ``id``. You can specify multiple pairs of sort - key and sort direction query parameters. If you omit the - sort direction in a pair, the API uses the natural sorting - direction of the server attribute that is provided as the - ``sort_key``. - - :returns: A generator of portgroup instances. - """ - cls = _portgroup.PortGroupDetail if details else _portgroup.PortGroup - return self._list(cls, paginated=True, **query) - - def create_portgroup(self, **attrs): - """Create a new portgroup from attributes. - - :param dict attrs: Keyword arguments that will be used to create a - :class:`~openstack.bare_metal.v1.portgroup.PortGroup`, it - comprises of the properties on the ``PortGroup`` class. - - :returns: The results of portgroup creation. - :rtype: :class:`~openstack.bare_metal.v1.portgroup.PortGroup`. - """ - return self._create(_portgroup.PortGroup, **attrs) - - def find_portgroup(self, name_or_id, ignore_missing=True): - """Find a single portgroup. - - :param str name_or_id: The name or ID of a portgroup. - :param bool ignore_missing: When set to ``False``, an exception of - :class:`~openstack.exceptions.ResourceNotFound` will be raised - when the portgroup does not exist. When set to `True``, None will - be returned when attempting to find a nonexistent portgroup. - :returns: One :class:`~openstack.bare_metal.v1.portgroup.PortGroup` - object or None. - """ - return self._find(_portgroup.PortGroup, name_or_id, - ignore_missing=ignore_missing) - - def get_portgroup(self, portgroup, **query): - """Get a specific portgroup. - - :param portgroup: The value can be the name or ID of a chassis or a - :class:`~openstack.bare_metal.v1.portgroup.PortGroup` instance. - :param dict query: Optional query parameters to be sent to restrict - the portgroup properties returned. Available parameters include: - - * ``fields``: A list containing one or more fields to be returned - in the response. This may lead to some performance gain - because other fields of the resource are not refreshed. - - :returns: One :class:`~openstack.bare_metal.v1.portgroup.PortGroup` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - portgroup matching the name or ID could be found. - """ - return self._get(_portgroup.PortGroup, portgroup, **query) - - def update_portgroup(self, portgroup, **attrs): - """Update a portgroup. - - :param chassis: Either the name or the ID of a portgroup or an instance - of :class:`~openstack.bare_metal.v1.portgroup.PortGroup`. - :param dict attrs: The attributes to update on the portgroup - represented by the ``portgroup`` parameter. - - :returns: The updated portgroup. - :rtype: :class:`~openstack.bare_metal.v1.portgroup.PortGroup` - """ - return self._update(_portgroup.PortGroup, portgroup, **attrs) - - def delete_portgroup(self, portgroup, ignore_missing=True): - """Delete a portgroup. - - :param portgroup: The value can be either the name or ID of a portgroup - or a :class:`~openstack.bare_metal.v1.portgroup.PortGroup` - instance. - :param bool ignore_missing: When set to ``False``, an exception - :class:`~openstack.exceptions.ResourceNotFound` will be raised - when the portgroup could not be found. When set to ``True``, no - exception will be raised when attempting to delete a non-existent - portgroup. - - :returns: The instance of the portgroup which was deleted. - :rtype: :class:`~openstack.bare_metal.v1.portgroup.PortGroup`. - """ - return self._delete(_portgroup.PortGroup, portgroup, - ignore_missing=ignore_missing) diff --git a/openstack/bare_metal/v1/chassis.py b/openstack/bare_metal/v1/chassis.py deleted file mode 100644 index 7440e2f46a..0000000000 --- a/openstack/bare_metal/v1/chassis.py +++ /dev/null @@ -1,63 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.bare_metal import bare_metal_service -from openstack import resource2 as resource - - -class Chassis(resource.Resource): - - resources_key = 'chassis' - base_path = '/chassis' - service = bare_metal_service.BareMetalService() - - # capabilities - allow_create = True - allow_get = True - allow_update = True - allow_delete = True - allow_list = True - patch_update = True - - _query_mapping = resource.QueryParameters( - 'fields' - ) - - #: Timestamp at which the chassis was created. - created_at = resource.Body('created_at') - #: A descriptive text about the service - description = resource.Body('description') - #: A set of one or more arbitrary metadata key and value pairs. - extra = resource.Body('extra') - #: The UUID for the chassis - id = resource.Body('uuid', alternate_id=True) - #: A list of relative links, including the self and bookmark links. - links = resource.Body('links', type=list) - #: Links to the collection of nodes contained in the chassis - nodes = resource.Body('nodes', type=list) - #: Timestamp at which the chassis was last updated. - updated_at = resource.Body('updated_at') - - -class ChassisDetail(Chassis): - - base_path = '/chassis/detail' - - # capabilities - allow_create = False - allow_get = False - allow_update = False - allow_delete = False - allow_list = True - - #: The UUID for the chassis - id = resource.Body('uuid', alternate_id=True) diff --git a/openstack/bare_metal/v1/driver.py b/openstack/bare_metal/v1/driver.py deleted file mode 100644 index fcf6229b3a..0000000000 --- a/openstack/bare_metal/v1/driver.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.bare_metal import bare_metal_service -from openstack import resource2 as resource - - -class Driver(resource.Resource): - - resources_key = 'drivers' - base_path = '/drivers' - service = bare_metal_service.BareMetalService() - - # capabilities - allow_create = False - allow_get = True - allow_update = False - allow_delete = False - allow_list = True - - # NOTE: Query mapping? - - #: The name of the driver - name = resource.Body('name', alternate_id=True) - #: A list of active hosts that support this driver. - hosts = resource.Body('hosts', type=list) - #: A list of relative links, including the self and bookmark links. - links = resource.Body('links', type=list) - #: A list of links to driver properties. - properties = resource.Body('properties', type=list) diff --git a/openstack/bare_metal/v1/node.py b/openstack/bare_metal/v1/node.py deleted file mode 100644 index 8d920e66fc..0000000000 --- a/openstack/bare_metal/v1/node.py +++ /dev/null @@ -1,135 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.bare_metal import bare_metal_service -from openstack import resource2 as resource - - -class Node(resource.Resource): - - resources_key = 'nodes' - base_path = '/nodes' - service = bare_metal_service.BareMetalService() - - # capabilities - allow_create = True - allow_get = True - allow_update = True - allow_delete = True - allow_list = True - patch_update = True - - _query_mapping = resource.QueryParameters( - 'associated', 'driver', 'fields', 'provision_state', 'resource_class', - instance_id='instance_uuid', - is_maintenance='maintenance', - ) - - # Properties - #: The UUID of the chassis associated wit this node. Can be empty or None. - chassis_id = resource.Body("chassis_uuid") - #: The current clean step. - clean_step = resource.Body("clean_step") - #: Timestamp at which the node was last updated. - created_at = resource.Body("created_at") - #: The name of the driver. - driver = resource.Body("driver") - #: All the metadata required by the driver to manage this node. List of - #: fields varies between drivers, and can be retrieved from the - #: :class:`openstack.bare_metal.v1.driver.Driver` resource. - driver_info = resource.Body("driver_info", type=dict) - #: Internal metadata set and stored by node's driver. This is read-only. - driver_internal_info = resource.Body("driver_internal_info", type=dict) - #: A set of one or more arbitrary metadata key and value pairs. - extra = resource.Body("extra") - #: The UUID of the node resource. - id = resource.Body("uuid", alternate_id=True) - #: Information used to customize the deployed image, e.g. size of root - #: partition, config drive in the form of base64 encoded string and other - #: metadata. - instance_info = resource.Body("instance_info") - #: UUID of the nova instance associated with this node. - instance_id = resource.Body("instance_uuid") - #: Whether console access is enabled on this node. - is_console_enabled = resource.Body("console_enabled", type=bool) - #: Whether node is currently in "maintenance mode". Nodes put into - #: maintenance mode are removed from the available resource pool. - is_maintenance = resource.Body("maintenance", type=bool) - #: Any error from the most recent transaction that started but failed to - #: finish. - last_error = resource.Body("last_error") - #: A list of relative links, including self and bookmark links. - links = resource.Body("links", type=list) - #: user settable description of the reason why the node was placed into - #: maintenance mode. - maintenance_reason = resource.Body("maintenance_reason") - #: Human readable identifier for the node. May be undefined. Certain words - #: are reserved. Added in API microversion 1.5 - name = resource.Body("name") - #: Network interface provider to use when plumbing the network connections - #: for this node. Introduced in API microversion 1.20. - network_interface = resource.Body("network_interface") - #: Links to the collection of ports on this node. - ports = resource.Body("ports", type=list) - #: Links to the collection of portgroups on this node. Available since - #: API microversion 1.24. - port_groups = resource.Body("portgroups", type=list) - #: The current power state. Usually "power on" or "power off", but may be - #: "None" if service is unable to determine the power state. - power_state = resource.Body("power_state") - #: Physical characteristics of the node. Content populated by the service - #: during inspection. - properties = resource.Body("properties", type=dict) - #: The current provisioning state of the node. - provision_state = resource.Body("provision_state") - #: The current RAID configuration of the node. - raid_config = resource.Body("raid_config") - #: The name of an service conductor host which is holding a lock on this - #: node, if a lock is held. - reservation = resource.Body("reservation") - #: A string to be used by external schedulers to identify this node as a - #: unit of a specific type of resource. Added in API microversion 1.21. - resource_class = resource.Body("resource_class") - #: Links to the collection of states. - states = resource.Body("states", type=list) - #: The requested state if a provisioning action has been requested. For - #: example, ``AVAILABLE``, ``DEPLOYING``, ``DEPLOYWAIT``, ``DEPLOYING``, - #: ``ACTIVE`` etc. - target_provision_state = resource.Body("target_provision_state") - #: The requested state during a state transition. - target_power_state = resource.Body("target_power_state") - #: The requested RAID configration of the node which will be applied when - #: the node next transitions through the CLEANING state. - target_raid_config = resource.Body("target_raid_config") - #: Timestamp at which the node was last updated. - updated_at = resource.Body("updated_at") - - -class NodeDetail(Node): - - base_path = '/nodes/detail' - - # capabilities - allow_create = False - allow_get = False - allow_update = False - allow_delete = False - allow_list = True - - _query_mapping = resource.QueryParameters( - 'associated', 'driver', 'fields', 'provision_state', 'resource_class', - instance_id='instance_uuid', - is_maintenance='maintenance', - ) - - #: The UUID of the node resource. - id = resource.Body("uuid", alternate_id=True) diff --git a/openstack/bare_metal/v1/port.py b/openstack/bare_metal/v1/port.py deleted file mode 100644 index 99aa0ffe12..0000000000 --- a/openstack/bare_metal/v1/port.py +++ /dev/null @@ -1,82 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.bare_metal import bare_metal_service -from openstack import resource2 as resource - - -class Port(resource.Resource): - - resources_key = 'ports' - base_path = '/ports' - service = bare_metal_service.BareMetalService() - - # capabilities - allow_create = True - allow_get = True - allow_update = True - allow_delete = True - allow_list = True - patch_update = True - - _query_mapping = resource.QueryParameters( - 'fields' - ) - - #: The physical hardware address of the network port, typically the - #: hardware MAC address. - address = resource.Body('address') - #: Timestamp at which the port was created. - created_at = resource.Body('created_at') - #: A set of one or more arbitrary metadata key and value pairs. - extra = resource.Body('extra') - #: The UUID of the port - id = resource.Body('uuid', alternate_id=True) - #: Internal metadata set and stored by the port. This field is read-only. - #: Added in API microversion 1.18. - internal_info = resource.Body('internal_info') - #: Whether PXE is enabled on the port. Added in API microversion 1.19. - is_pxe_enabled = resource.Body('pxe_enabled', type=bool) - #: A list of relative links, including the self and bookmark links. - links = resource.Body('links', type=list) - #: The port bindig profile. If specified, must contain ``switch_id`` and - #: ``port_id`` fields. ``switch_info`` field is an optional string field - #: to be used to store vendor specific information. Added in API - #: microversion 1.19. - local_link_connection = resource.Body('local_link_connection') - #: The UUID of node this port belongs to - node_id = resource.Body('node_uuid') - #: The UUID of PortGroup this port belongs to. Added in API microversion - #: 1.23. - port_group_id = resource.Body('portgroup_uuid') - #: Timestamp at which the port was last updated. - updated_at = resource.Body('updated_at') - - -class PortDetail(Port): - - base_path = '/ports/detail' - - # capabilities - allow_create = False - allow_get = False - allow_update = False - allow_delete = False - allow_list = True - - _query_mapping = resource.QueryParameters( - 'address', 'fields', 'node', 'portgroup', - node_id='node_uuid', - ) - - #: The UUID of the port - id = resource.Body('uuid', alternate_id=True) diff --git a/openstack/bare_metal/v1/port_group.py b/openstack/bare_metal/v1/port_group.py deleted file mode 100644 index 7cec820e97..0000000000 --- a/openstack/bare_metal/v1/port_group.py +++ /dev/null @@ -1,78 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.bare_metal import bare_metal_service -from openstack import resource2 as resource - - -class PortGroup(resource.Resource): - - resources_key = 'portgroups' - base_path = '/portgroups' - service = bare_metal_service.BareMetalService() - - # capabilities - allow_create = True - allow_get = True - allow_update = True - allow_delete = True - allow_list = True - patch_update = True - - _query_mapping = resource.QueryParameters( - 'node', 'address', 'fields', - ) - - #: The physical hardware address of the portgroup, typically the hardware - #: MAC address. Added in API microversion 1.23. - address = resource.Body('address') - #: Timestamp at which the portgroup was created. - created_at = resource.Body('created_at') - #: A set of one or more arbitrary metadata key and value pairs. - extra = resource.Body('extra', type=dict) - #: The name of the portgroup - name = resource.Body('name') - #: The UUID for the portgroup - id = resource.Body('uuid', alternate_id=True) - #: Internal metadaa set and stored by the portgroup. - internal_info = resource.Body('internal_info') - #: Whether ports that are members of this portgroup can be used as - #: standalone ports. Added in API microversion 1.23. - is_standalone_ports_supported = resource.Body('standalone_ports_supported', - type=bool) - #: A list of relative links, including the self and bookmark links. - links = resource.Body('links', type=list) - #: UUID of the node this portgroup belongs to. - node_id = resource.Body('node_uuid') - #: A list of links to the collection of ports belonging to this portgroup. - #: Added in API microversion 1.24. - ports = resource.Body('ports') - #: Timestamp at which the portgroup was last updated. - updated_at = resource.Body('updated_at') - - -class PortGroupDetail(PortGroup): - - base_path = '/portgroups/detail' - - allow_create = False - allow_get = False - allow_update = False - allow_delete = False - allow_list = True - - _query_mapping = resource.QueryParameters( - 'node', 'address', - ) - - #: The UUID for the portgroup - id = resource.Body('uuid', alternate_id=True) diff --git a/openstack/bare_metal/version.py b/openstack/bare_metal/version.py deleted file mode 100644 index f98f3b1f46..0000000000 --- a/openstack/bare_metal/version.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.bare_metal import bare_metal_service -from openstack import resource2 - - -class Version(resource2.Resource): - resource_key = 'version' - resources_key = 'versions' - base_path = '/' - service = bare_metal_service.BareMetalService( - version=bare_metal_service.BareMetalService.UNVERSIONED - ) - - # Capabilities - allow_list = True - - # Attributes - links = resource2.Body('links') - status = resource2.Body('status') - updated = resource2.Body('updated') diff --git a/openstack/cluster/__init__.py b/openstack/baremetal/__init__.py similarity index 100% rename from openstack/cluster/__init__.py rename to openstack/baremetal/__init__.py diff --git a/openstack/baremetal/baremetal_service.py b/openstack/baremetal/baremetal_service.py new file mode 100644 index 0000000000..78398a3a5e --- /dev/null +++ b/openstack/baremetal/baremetal_service.py @@ -0,0 +1,22 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import _proxy +from openstack import service_description + + +class BaremetalService(service_description.ServiceDescription[_proxy.Proxy]): + """The bare metal service.""" + + supported_versions = { + '1': _proxy.Proxy, + } diff --git a/openstack/baremetal/configdrive.py b/openstack/baremetal/configdrive.py new file mode 100644 index 0000000000..578802e074 --- /dev/null +++ b/openstack/baremetal/configdrive.py @@ -0,0 +1,167 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +"""Helpers for building configdrive compatible with the Bare Metal service.""" + +import base64 +import contextlib +import gzip +import json +import os +import shutil +import subprocess +import tempfile + + +@contextlib.contextmanager +def populate_directory( + metadata, + user_data=None, + versions=None, + network_data=None, + vendor_data=None, +): + """Populate a directory with configdrive files. + + :param dict metadata: Metadata. + :param bytes user_data: Vendor-specific user data. + :param versions: List of metadata versions to support. + :param dict network_data: Networking configuration. + :param dict vendor_data: Extra supplied vendor data. + :return: a context manager yielding a directory with files + """ + d = tempfile.mkdtemp() + versions = versions or ('2012-08-10', 'latest') + try: + for version in versions: + subdir = os.path.join(d, 'openstack', version) + if not os.path.exists(subdir): + os.makedirs(subdir) + + with open(os.path.join(subdir, 'meta_data.json'), 'w') as fp: + json.dump(metadata, fp) + + if network_data: + with open( + os.path.join(subdir, 'network_data.json'), 'w' + ) as fp: + json.dump(network_data, fp) + + if vendor_data: + with open( + os.path.join(subdir, 'vendor_data2.json'), 'w' + ) as fp: + json.dump(vendor_data, fp) + + if user_data: + # Strictly speaking, user data is binary, but in many cases + # it's actually a text (cloud-init, ignition, etc). + flag = 't' if isinstance(user_data, str) else 'b' + with open(os.path.join(subdir, 'user_data'), f'w{flag}') as fp: + fp.write(user_data) + + yield d + finally: + shutil.rmtree(d) + + +def build( + metadata, + user_data=None, + versions=None, + network_data=None, + vendor_data=None, +): + """Make a configdrive compatible with the Bare Metal service. + + Requires the genisoimage utility to be available. + + :param dict metadata: Metadata. + :param user_data: Vendor-specific user data. + :param versions: List of metadata versions to support. + :param dict network_data: Networking configuration. + :param dict vendor_data: Extra supplied vendor data. + :return: configdrive contents as a base64-encoded string. + """ + with populate_directory( + metadata, user_data, versions, network_data, vendor_data + ) as path: + return pack(path) + + +def pack(path: str) -> str: + """Pack a directory with files into a Bare Metal service configdrive. + + Creates an ISO image with the files and label "config-2". + + :param str path: Path to directory with files + :return: configdrive contents as a base64-encoded string. + """ + with tempfile.NamedTemporaryFile() as tmpfile: + # NOTE(toabctl): Luckily, genisoimage, mkisofs and xorrisofs understand + # the same parameters which are currently used. + error: Exception | None + for c in ['genisoimage', 'mkisofs', 'xorrisofs']: + try: + p = subprocess.Popen( # noqa: S603 + [ + c, + '-o', + tmpfile.name, + '-ldots', + '-allow-lowercase', + '-allow-multidot', + '-l', + '-publisher', + 'metalsmith', + '-quiet', + '-J', + '-r', + '-V', + 'config-2', + path, + ], + stdout=subprocess.PIPE, + stderr=subprocess.PIPE, + ) + except OSError as e: + error = e + else: + error = None + break + + if error: + raise RuntimeError( + 'Error generating the configdrive. Make sure the ' + '"genisoimage", "mkisofs" or "xorrisofs" tool is installed. ' + f'Error: {error}' + ) + + stdout, stderr = p.communicate() + if p.returncode != 0: + raise RuntimeError( + 'Error generating the configdrive.' + f'Stdout: "{stdout.decode()}". Stderr: "{stderr.decode()}"' + ) + + tmpfile.seek(0) + + with tempfile.NamedTemporaryFile() as tmpzipfile: + with gzip.GzipFile(fileobj=tmpzipfile, mode='wb') as gz_file: + shutil.copyfileobj(tmpfile, gz_file) + + tmpzipfile.seek(0) + # NOTE(dtantsur): Ironic expects configdrive to be a string, but + # base64 returns bytes on Python 3. + cd = base64.b64encode(tmpzipfile.read()).decode() + + return cd diff --git a/openstack/cluster/v1/__init__.py b/openstack/baremetal/v1/__init__.py similarity index 100% rename from openstack/cluster/v1/__init__.py rename to openstack/baremetal/v1/__init__.py diff --git a/openstack/baremetal/v1/_common.py b/openstack/baremetal/v1/_common.py new file mode 100644 index 0000000000..0ad7c6ee3c --- /dev/null +++ b/openstack/baremetal/v1/_common.py @@ -0,0 +1,264 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import fields +from openstack import resource + + +RETRIABLE_STATUS_CODES = [ + # HTTP Conflict - happens if a node is locked + 409, + # HTTP Service Unavailable happens if there's no free conductor + 503, +] +"""HTTP status codes that should be retried.""" + + +PROVISIONING_VERSIONS = { + 'abort': 13, + 'adopt': 17, + 'clean': 15, + 'inspect': 6, + 'manage': 4, + 'provide': 4, + 'rescue': 38, + 'unrescue': 38, + 'unhold': 85, + 'service': 87, +} +"""API microversions introducing provisioning verbs.""" + + +# Based on https://docs.openstack.org/ironic/latest/contributor/states.html +EXPECTED_STATES = { + 'active': 'active', + 'adopt': 'available', + 'clean': 'manageable', + 'deleted': 'available', + 'inspect': 'manageable', + 'manage': 'manageable', + 'provide': 'available', + 'rebuild': 'active', + 'rescue': 'rescue', +} +"""Mapping of provisioning actions to expected stable states.""" + +EXPECTED_POWER_STATES = { + 'power on': 'power on', + 'power off': 'power off', + 'rebooting': 'power on', + 'soft power off': 'power off', + 'soft rebooting': 'power on', +} +"""Mapping of target power states to expected power states.""" + +STATE_VERSIONS = { + 'available': '1.1', + 'enroll': '1.11', + 'manageable': '1.4', +} +"""API versions when certain states were introduced.""" + +VIF_VERSION = '1.28' +"""API version in which the VIF operations were introduced.""" + +VIF_OPTIONAL_PARAMS_VERSION = '1.67' +"""API version in which the VIF optional parameters were introduced.""" + +INJECT_NMI_VERSION = '1.29' +"""API vresion in which support for injecting NMI was introduced.""" + +CONFIG_DRIVE_REBUILD_VERSION = '1.35' +"""API version in which rebuild accepts a configdrive.""" + +RESET_INTERFACES_VERSION = '1.45' +"""API version in which the reset_interfaces parameter was introduced.""" + +CONFIG_DRIVE_DICT_VERSION = '1.56' +"""API version in which configdrive can be a dictionary.""" + +DEPLOY_STEPS_VERSION = '1.69' +"""API version in which deploy_steps was added to node provisioning.""" + +CHANGE_BOOT_MODE_VERSION = '1.76' +"""API version in which boot_mode and secure_boot states can be changed""" + +FIRMWARE_VERSION = '1.86' +"""API version in which firmware components of a node can be accessed""" + +VMEDIA_VERSION = '1.89' +"""API version in which the virtual media operations were introduced.""" + +RUNBOOKS_VERSION = '1.92' +"""API version in which a runbook can be used in place of arbitrary steps +for provisioning""" + + +class Resource(resource.Resource): + """A subclass for resources that use the path to request a detailed view. + + Two patterns exist for fetching the detailed view when listing resources. + + - As part of the path. For example: + + GET /v1/ports/detail + + - As a query parameter. For example: + + GET /v1/conductors?detail=True + + This handles resources that use the former pattern, namely: + + - chassis + - nodes + - ports + - portgroups + """ + + base_path: str + + @classmethod + def list( + cls, + session, + paginated=True, + base_path=None, + allow_unknown_params=False, + *, + microversion=None, + details=False, + **params, + ): + """This method is a generator which yields resource objects. + + This resource object list generator handles pagination and takes query + params for response filtering. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param bool paginated: ``True`` if a GET to this resource returns + a paginated series of responses, or ``False`` if a GET returns only + one page of data. **When paginated is False only one page of data + will be returned regardless of the API's support of pagination.** + :param str base_path: Base part of the URI for listing resources, if + different from :data:`~openstack.resource.Resource.base_path`. + :param bool allow_unknown_params: ``True`` to accept, but discard + unknown query parameters. This allows getting list of 'filters' and + passing everything known to the server. ``False`` will result in + validation exception when unknown query parameters are passed. + :param str microversion: API version to override the negotiated one. + :param bool details: Whether to return detailed resource records. + :param dict params: These keyword arguments are passed through the + :meth:`~openstack.resource.QueryParamter._transpose` method + to find if any of them match expected query parameters to be sent + in the *params* argument to + :meth:`~keystoneauth1.adapter.Adapter.get`. They are additionally + checked against the :data:`~openstack.resource.Resource.base_path` + format string to see if any path fragments need to be filled in by + the contents of this argument. + Parameters supported as filters by the server side are passed in + the API call, remaining parameters are applied as filters to the + retrieved results. + + :return: A generator of :class:`Resource` objects. + :raises: :exc:`~openstack.exceptions.MethodNotSupported` if + :data:`Resource.allow_list` is not set to ``True``. + :raises: :exc:`~openstack.exceptions.InvalidResourceQuery` if query + contains invalid params. + """ + if not base_path: + base_path = cls.base_path + if details: + base_path += '/detail' + + return super().list( + session, + paginated=paginated, + base_path=base_path, + allow_unknown_params=allow_unknown_params, + microversion=microversion, + **params, + ) + + @classmethod + def find( + cls, + session, + name_or_id, + ignore_missing=True, + list_base_path=None, + *, + microversion=None, + all_projects=None, + details=False, + **params, + ): + """Find a resource by its name or id. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param name_or_id: This resource's identifier, if needed by + the request. The default is ``None``. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.ResourceNotFound` will be raised when + the resource does not exist. When set to ``True``, None will be + returned when attempting to find a nonexistent resource. + :param str list_base_path: base_path to be used when need listing + resources. + :param str microversion: API version to override the negotiated one. + :param bool details: Whether to return detailed resource records. + :param dict params: Any additional parameters to be passed into + underlying methods, such as to + :meth:`~openstack.resource.Resource.existing` in order to pass on + URI parameters. + + :return: The :class:`Resource` object matching the given name or id + or None if nothing matches. + :raises: :class:`openstack.exceptions.DuplicateResource` if more + than one resource is found for this request. + :raises: :class:`openstack.exceptions.ResourceNotFound` if nothing + is found and ignore_missing is ``False``. + """ + if not list_base_path: + list_base_path = cls.base_path + if details: + list_base_path += '/detail' + + return super().find( + session, + name_or_id, + ignore_missing=ignore_missing, + list_base_path=list_base_path, + microversion=microversion, + all_projects=all_projects, + **params, + ) + + +def comma_separated_list(value): + if value is None: + return None + else: + return ','.join(value) + + +def fields_type(value, resource_type): + if value is None: + return None + + resource_mapping = { + key: value.name + for key, value in resource_type.__dict__.items() + if isinstance(value, fields.Body) + } + + return comma_separated_list(resource_mapping.get(x, x) for x in value) diff --git a/openstack/baremetal/v1/_proxy.py b/openstack/baremetal/v1/_proxy.py new file mode 100644 index 0000000000..ed023e6bc4 --- /dev/null +++ b/openstack/baremetal/v1/_proxy.py @@ -0,0 +1,2136 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +import requests + +from openstack.baremetal.v1 import _common +from openstack.baremetal.v1 import allocation as _allocation +from openstack.baremetal.v1 import chassis as _chassis +from openstack.baremetal.v1 import conductor as _conductor +from openstack.baremetal.v1 import deploy_templates as _deploytemplates +from openstack.baremetal.v1 import driver as _driver +from openstack.baremetal.v1 import inspection_rules as _inspectionrules +from openstack.baremetal.v1 import node as _node +from openstack.baremetal.v1 import port as _port +from openstack.baremetal.v1 import port_group as _portgroup +from openstack.baremetal.v1 import runbooks as _runbooks +from openstack.baremetal.v1 import volume_connector as _volumeconnector +from openstack.baremetal.v1 import volume_target as _volumetarget +from openstack import exceptions +from openstack import proxy +from openstack import resource +from openstack import utils + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['1']] = '1' + + retriable_status_codes = _common.RETRIABLE_STATUS_CODES + + _resource_registry = { + "allocation": _allocation.Allocation, + "chassis": _chassis.Chassis, + "conductor": _conductor.Conductor, + "deploy_template": _deploytemplates.DeployTemplate, + "driver": _driver.Driver, + "node": _node.Node, + "port": _port.Port, + "port_group": _portgroup.PortGroup, + "runbook": _runbooks.Runbook, + "volume_connector": _volumeconnector.VolumeConnector, + "volume_target": _volumetarget.VolumeTarget, + "inspection_rules": _inspectionrules.InspectionRule, + } + + def _get_with_fields(self, resource_type, value, fields=None): + """Fetch a bare metal resource. + + :param resource_type: The type of resource to get. + :type resource_type: :class:`~openstack.resource.Resource` + :param value: The value to get. Can be either the ID of a + resource or a :class:`~openstack.resource.Resource` + subclass. + :param fields: Limit the resource fields to fetch. + + :returns: The result of the ``fetch`` + :rtype: :class:`~openstack.resource.Resource` + """ + res = self._get_resource(resource_type, value) + kwargs = {} + if fields: + kwargs['fields'] = _common.fields_type(fields, resource_type) + return res.fetch( + self, + error_message=f"No {resource_type.__name__} found for {value}", + **kwargs, + ) + + # ========== Chassis ========== + + def chassis(self, details=False, **query): + """Retrieve a generator of chassis. + + :param details: A boolean indicating whether the detailed information + for every chassis should be returned. + :param dict query: Optional query parameters to be sent to + restrict the chassis to be returned. Available parameters include: + + * ``fields``: A list containing one or more fields to be returned + in the response. This may lead to some performance gain + because other fields of the resource are not refreshed. + * ``limit``: Requests at most the specified number of items be + returned from the query. + * ``marker``: Specifies the ID of the last-seen chassis. Use the + ``limit`` parameter to make an initial limited request and + use the ID of the last-seen chassis from the response as + the ``marker`` value in a subsequent limited request. + * ``sort_dir``: Sorts the response by the requested sort direction. + A valid value is ``asc`` (ascending) or ``desc`` + (descending). Default is ``asc``. You can specify multiple + pairs of sort key and sort direction query parameters. If + you omit the sort direction in a pair, the API uses the + natural sorting direction of the server attribute that is + provided as the ``sort_key``. + * ``sort_key``: Sorts the response by the this attribute value. + Default is ``id``. You can specify multiple pairs of sort + key and sort direction query parameters. If you omit the + sort direction in a pair, the API uses the natural sorting + direction of the server attribute that is provided as the + ``sort_key``. + + :returns: A generator of chassis instances. + """ + return _chassis.Chassis.list(self, details=details, **query) + + def create_chassis(self, **attrs): + """Create a new chassis from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.baremetal.v1.chassis.Chassis`. + + :returns: The results of chassis creation. + :rtype: :class:`~openstack.baremetal.v1.chassis.Chassis`. + """ + return self._create(_chassis.Chassis, **attrs) + + # TODO(stephenfin): Delete this. You can't lookup a chassis by name so this + # is identical to get_chassis + def find_chassis(self, name_or_id, ignore_missing=True, *, details=True): + """Find a single chassis. + + :param str name_or_id: The ID of a chassis. + :param bool ignore_missing: When set to ``False``, an exception of + :class:`~openstack.exceptions.NotFoundException` will be raised + when the chassis does not exist. When set to `True``, None will + be returned when attempting to find a nonexistent chassis. + :param details: A boolean indicating whether the detailed information + for the chassis should be returned. + + :returns: One :class:`~openstack.baremetal.v1.chassis.Chassis` object + or None. + """ + return self._find( + _chassis.Chassis, + name_or_id, + ignore_missing=ignore_missing, + details=details, + ) + + def get_chassis(self, chassis, fields=None): + """Get a specific chassis. + + :param chassis: The value can be the ID of a chassis or a + :class:`~openstack.baremetal.v1.chassis.Chassis` instance. + :param fields: Limit the resource fields to fetch. + + :returns: One :class:`~openstack.baremetal.v1.chassis.Chassis` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + chassis matching the name or ID could be found. + """ + return self._get_with_fields(_chassis.Chassis, chassis, fields=fields) + + def update_chassis(self, chassis, **attrs): + """Update a chassis. + + :param chassis: Either the ID of a chassis, or an instance + of :class:`~openstack.baremetal.v1.chassis.Chassis`. + :param dict attrs: The attributes to update on the chassis represented + by the ``chassis`` parameter. + + :returns: The updated chassis. + :rtype: :class:`~openstack.baremetal.v1.chassis.Chassis` + """ + return self._update(_chassis.Chassis, chassis, **attrs) + + def patch_chassis(self, chassis, patch): + """Apply a JSON patch to the chassis. + + :param chassis: The value can be the ID of a chassis or a + :class:`~openstack.baremetal.v1.chassis.Chassis` instance. + :param patch: JSON patch to apply. + + :returns: The updated chassis. + :rtype: :class:`~openstack.baremetal.v1.chassis.Chassis` + """ + return self._get_resource(_chassis.Chassis, chassis).patch(self, patch) + + def delete_chassis(self, chassis, ignore_missing=True): + """Delete a chassis. + + :param chassis: The value can be either the ID of a chassis or + a :class:`~openstack.baremetal.v1.chassis.Chassis` instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the chassis could not be found. When set to ``True``, no + exception will be raised when attempting to delete a non-existent + chassis. + + :returns: The instance of the chassis which was deleted. + :rtype: :class:`~openstack.baremetal.v1.chassis.Chassis`. + """ + return self._delete( + _chassis.Chassis, chassis, ignore_missing=ignore_missing + ) + + # ========== Drivers ========== + + def drivers(self, details=False, **query): + """Retrieve a generator of drivers. + + :param bool details: A boolean indicating whether the detailed + information for every driver should be returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + :returns: A generator of driver instances. + """ + # NOTE(dtantsur): details are available starting with API microversion + # 1.30. Thus we do not send any value if not needed. + if details: + query['details'] = True + return self._list(_driver.Driver, **query) + + def get_driver(self, driver): + """Get a specific driver. + + :param driver: The value can be the name of a driver or a + :class:`~openstack.baremetal.v1.driver.Driver` instance. + + :returns: One :class:`~openstack.baremetal.v1.driver.Driver` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + driver matching the name could be found. + """ + return self._get(_driver.Driver, driver) + + def list_driver_vendor_passthru(self, driver): + """Get driver's vendor_passthru methods. + + :param driver: The value can be the name of a driver or a + :class:`~openstack.baremetal.v1.driver.Driver` instance. + + :returns: One :dict: of vendor methods with corresponding usages + :raises: :class:`~openstack.exceptions.NotFoundException` when no + driver matching the name could be found. + """ + driver = self.get_driver(driver) + return driver.list_vendor_passthru(self) + + def call_driver_vendor_passthru( + self, + driver: str | _driver.Driver, + verb: str, + method: str, + body: object = None, + ) -> requests.Response: + """Call driver's vendor_passthru method. + + :param driver: The value can be the name of a driver or a + :class:`~openstack.baremetal.v1.driver.Driver` instance. + :param verb: One of GET, POST, PUT, DELETE, + depending on the driver and method. + :param method: Name of vendor method. + :param body: passed to the vendor function as json body. + + :returns: Server response + """ + return self.get_driver(driver).call_vendor_passthru( + self, verb, method, body + ) + + # ========== Nodes ========== + + def nodes(self, details=False, **query): + """Retrieve a generator of nodes. + + :param details: A boolean indicating whether the detailed information + for every node should be returned. + :param dict query: Optional query parameters to be sent to restrict + the nodes returned. Available parameters include: + + * ``associated``: Only return those which are, or are not, + associated with an ``instance_id``. + * ``conductor_group``: Only return those in the specified + ``conductor_group``. + * ``driver``: Only return those with the specified ``driver``. + * ``fault``: Only return those with the specified fault type. + * ``fields``: A list containing one or more fields to be returned + in the response. This may lead to some performance gain + because other fields of the resource are not refreshed. + * ``instance_id``: Only return the node with this specific instance + UUID or an empty set if not found. + * ``is_maintenance``: Only return those with ``maintenance`` set to + ``True`` or ``False``. + * ``limit``: Requests at most the specified number of nodes be + returned from the query. + * ``marker``: Specifies the ID of the last-seen node. Use the + ``limit`` parameter to make an initial limited request and + use the ID of the last-seen node from the response as + the ``marker`` value in a subsequent limited request. + * ``provision_state``: Only return those nodes with the specified + ``provision_state``. + * ``resource_class``: Only return those with the specified + ``resource_class``. + * ``shard``: Only return nodes matching the supplied shard key. + * ``sort_dir``: Sorts the response by the requested sort direction. + A valid value is ``asc`` (ascending) or ``desc`` + (descending). Default is ``asc``. You can specify multiple + pairs of sort key and sort direction query parameters. If + you omit the sort direction in a pair, the API uses the + natural sorting direction of the server attribute that is + provided as the ``sort_key``. + * ``sort_key``: Sorts the response by the this attribute value. + Default is ``id``. You can specify multiple pairs of sort + key and sort direction query pa rameters. If you omit the + sort direction in a pair, the API uses the natural sorting + direction of the server attribute that is provided as the + ``sort_key``. + + :returns: A generator of :class:`~openstack.baremetal.v1.node.Node` + """ + return _node.Node.list(self, details=details, **query) + + def create_node(self, **attrs): + """Create a new node from attributes. + + See :meth:`~openstack.baremetal.v1.node.Node.create` for an explanation + of the initial provision state. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.baremetal.v1.node.Node`. + + :returns: The results of node creation. + :rtype: :class:`~openstack.baremetal.v1.node.Node`. + """ + return self._create(_node.Node, **attrs) + + def find_node(self, name_or_id, ignore_missing=True, *, details=True): + """Find a single node. + + :param str name_or_id: The name or ID of a node. + :param bool ignore_missing: When set to ``False``, an exception of + :class:`~openstack.exceptions.NotFoundException` will be raised + when the node does not exist. When set to `True``, None will + be returned when attempting to find a nonexistent node. + :param details: A boolean indicating whether the detailed information + for the node should be returned. + :returns: One :class:`~openstack.baremetal.v1.node.Node` object + or None. + """ + return self._find( + _node.Node, + name_or_id, + ignore_missing=ignore_missing, + details=details, + ) + + def get_node(self, node, fields=None): + """Get a specific node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param fields: Limit the resource fields to fetch. + + :returns: One :class:`~openstack.baremetal.v1.node.Node` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + node matching the name or ID could be found. + """ + return self._get_with_fields(_node.Node, node, fields=fields) + + def get_node_inventory(self, node): + """Get a specific node's hardware inventory. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + + :returns: The node inventory + :raises: :class:`~openstack.exceptions.NotFoundException` when no + inventory could be found. + """ + res = self._get_resource(_node.Node, node) + return res.get_node_inventory(self, node) + + def update_node(self, node, retry_on_conflict=True, **attrs): + """Update a node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param bool retry_on_conflict: Whether to retry HTTP CONFLICT error. + Most of the time it can be retried, since it is caused by the node + being locked. However, when setting ``instance_id``, this is + a normal code and should not be retried. + :param dict attrs: The attributes to update on the node represented + by the ``node`` parameter. + + :returns: The updated node. + :rtype: :class:`~openstack.baremetal.v1.node.Node` + """ + res = self._get_resource(_node.Node, node, **attrs) + return res.commit(self, retry_on_conflict=retry_on_conflict) + + def patch_node( + self, node, patch, reset_interfaces=None, retry_on_conflict=True + ): + """Apply a JSON patch to the node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param patch: JSON patch to apply. + :param bool reset_interfaces: whether to reset the node hardware + interfaces to their defaults. This works only when changing + drivers. Added in API microversion 1.45. + :param bool retry_on_conflict: Whether to retry HTTP CONFLICT error. + Most of the time it can be retried, since it is caused by the node + being locked. However, when setting ``instance_id``, this is + a normal code and should not be retried. + + See `Update Node + `_ + for details. + + :returns: The updated node. + :rtype: :class:`~openstack.baremetal.v1.node.Node` + """ + res = self._get_resource(_node.Node, node) + return res.patch( + self, + patch, + retry_on_conflict=retry_on_conflict, + reset_interfaces=reset_interfaces, + ) + + def set_node_provision_state( + self, + node, + target, + config_drive=None, + clean_steps=None, + rescue_password=None, + wait=False, + timeout=None, + deploy_steps=None, + ): + """Run an action modifying node's provision state. + + This call is asynchronous, it will return success as soon as the Bare + Metal service acknowledges the request. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param target: Provisioning action, e.g. ``active``, ``provide``. + See the Bare Metal service documentation for available actions. + :param config_drive: Config drive to pass to the node, only valid + for ``active` and ``rebuild`` targets. You can use functions from + :mod:`openstack.baremetal.configdrive` to build it. + :param clean_steps: Clean steps to execute, only valid for ``clean`` + target. + :param rescue_password: Password for the rescue operation, only valid + for ``rescue`` target. + :param wait: Whether to wait for the node to get into the expected + state. The expected state is determined from a combination of + the current provision state and ``target``. + :param timeout: If ``wait`` is set to ``True``, specifies how much (in + seconds) to wait for the expected state to be reached. The value of + ``None`` (the default) means no client-side timeout. + :param deploy_steps: Deploy steps to execute, only valid for ``active`` + and ``rebuild`` target. + + :returns: The updated :class:`~openstack.baremetal.v1.node.Node` + :raises: ValueError if ``config_drive``, ``clean_steps``, + ``deploy_steps`` or ``rescue_password`` are provided with an + invalid ``target``. + """ + res = self._get_resource(_node.Node, node) + return res.set_provision_state( + self, + target, + config_drive=config_drive, + clean_steps=clean_steps, + rescue_password=rescue_password, + wait=wait, + timeout=timeout, + deploy_steps=deploy_steps, + ) + + def get_node_boot_device(self, node): + """Get node boot device + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :return: The node boot device + """ + res = self._get_resource(_node.Node, node) + return res.get_boot_device(self) + + def set_node_boot_device(self, node, boot_device, persistent=False): + """Set node boot device + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param boot_device: Boot device to assign to the node. + :param persistent: If the boot device change is maintained after node + reboot + :return: The updated :class:`~openstack.baremetal.v1.node.Node` + """ + res = self._get_resource(_node.Node, node) + return res.set_boot_device(self, boot_device, persistent=persistent) + + def get_node_supported_boot_devices(self, node): + """Get supported boot devices for node + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :return: The node boot device + """ + res = self._get_resource(_node.Node, node) + return res.get_supported_boot_devices(self) + + def set_node_boot_mode(self, node, target): + """Make a request to change node's boot mode + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param target: Boot mode to set for node, one of either 'uefi'/'bios'. + """ + res = self._get_resource(_node.Node, node) + return res.set_boot_mode(self, target) + + def set_node_secure_boot(self, node, target): + """Make a request to change node's secure boot state + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param target: Boolean indicating secure boot state to set. + True/False corresponding to 'on'/'off' respectively. + """ + res = self._get_resource(_node.Node, node) + return res.set_secure_boot(self, target) + + def inject_nmi_to_node(self, node): + """Inject NMI to node. + + Injects a non-maskable interrupt (NMI) message to the node. This is + used when response time is critical, such as during non-recoverable + hardware errors. In addition, virsh inject-nmi is useful for triggering + a crashdump in Windows guests. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :return: None + """ + res = self._get_resource(_node.Node, node) + res.inject_nmi(self) + + def wait_for_nodes_provision_state( + self, + nodes, + expected_state, + timeout=None, + abort_on_failed_state=True, + fail=True, + ): + """Wait for the nodes to reach the expected state. + + :param nodes: List of nodes - name, ID or + :class:`~openstack.baremetal.v1.node.Node` instance. + :param expected_state: The expected provisioning state to reach. + :param timeout: If ``wait`` is set to ``True``, specifies how much (in + seconds) to wait for the expected state to be reached. The value of + ``None`` (the default) means no client-side timeout. + :param abort_on_failed_state: If ``True`` (the default), abort waiting + if any node reaches a failure state which does not match the + expected one. Note that the failure state for ``enroll`` -> + ``manageable`` transition is ``enroll`` again. + :param fail: If set to ``False`` this call will not raise on timeouts + and provisioning failures. + + :return: If `fail` is ``True`` (the default), the list of + :class:`~openstack.baremetal.v1.node.Node` instances that reached + the requested state. If `fail` is ``False``, a + :class:`~openstack.baremetal.v1.node.WaitResult` named tuple. + :raises: :class:`~openstack.exceptions.ResourceFailure` if a node + reaches an error state and ``abort_on_failed_state`` is ``True``. + :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. + """ + log_nodes = ', '.join( + n.id if isinstance(n, _node.Node) else n for n in nodes + ) + + finished = [] + failed = [] + remaining = nodes + try: + for count in utils.iterate_timeout( + timeout, + f"Timeout waiting for nodes {log_nodes} to reach " + f"target state '{expected_state}'", + ): + nodes = [self.get_node(n) for n in remaining] + remaining = [] + for n in nodes: + try: + if n._check_state_reached( + self, expected_state, abort_on_failed_state + ): + finished.append(n) + else: + remaining.append(n) + except exceptions.ResourceFailure: + if fail: + raise + else: + failed.append(n) + + if not remaining: + if fail: + return finished + else: + return _node.WaitResult(finished, failed, []) + + self.log.debug( + 'Still waiting for nodes %(nodes)s to reach state ' + '"%(target)s"', + { + 'nodes': ', '.join(n.id for n in remaining), + 'target': expected_state, + }, + ) + except exceptions.ResourceTimeout: + if fail: + raise + else: + return _node.WaitResult(finished, failed, remaining) + + def set_node_power_state(self, node, target, wait=False, timeout=None): + """Run an action modifying node's power state. + + This call is asynchronous, it will return success as soon as the Bare + Metal service acknowledges the request. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param target: Target power state, one of + :class:`~openstack.baremetal.v1.node.PowerAction` or a string. + :param wait: Whether to wait for the node to get into the expected + state. + :param timeout: If ``wait`` is set to ``True``, specifies how much (in + seconds) to wait for the expected state to be reached. The value of + ``None`` (the default) means no client-side timeout. + """ + self._get_resource(_node.Node, node).set_power_state( + self, target, wait=wait, timeout=timeout + ) + + def wait_for_node_power_state(self, node, expected_state, timeout=None): + """Wait for the node to reach the power state. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param timeout: How much (in seconds) to wait for the target state + to be reached. The value of ``None`` (the default) means + no timeout. + + :returns: The updated :class:`~openstack.baremetal.v1.node.Node` + """ + res = self._get_resource(_node.Node, node) + return res.wait_for_power_state(self, expected_state, timeout=timeout) + + def wait_for_node_reservation(self, node, timeout=None): + """Wait for a lock on the node to be released. + + Bare metal nodes in ironic have a reservation lock that + is used to represent that a conductor has locked the node + while performing some sort of action, such as changing + configuration as a result of a machine state change. + + This lock can occur during power syncronization, and prevents + updates to objects attached to the node, such as ports. + + Note that nothing prevents a conductor from acquiring the lock again + after this call returns, so it should be treated as best effort. + + Returns immediately if there is no reservation on the node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param timeout: How much (in seconds) to wait for the lock to be + released. The value of ``None`` (the default) means no timeout. + + :returns: The updated :class:`~openstack.baremetal.v1.node.Node` + """ + res = self._get_resource(_node.Node, node) + return res.wait_for_reservation(self, timeout=timeout) + + def validate_node(self, node, required=('boot', 'deploy', 'power')): + """Validate required information on a node. + + :param node: The value can be either the name or ID of a node or + a :class:`~openstack.baremetal.v1.node.Node` instance. + :param required: List of interfaces that are required to pass + validation. The default value is the list of minimum required + interfaces for provisioning. + + :return: dict mapping interface names to + :class:`~openstack.baremetal.v1.node.ValidationResult` objects. + :raises: :exc:`~openstack.exceptions.ValidationException` if validation + fails for a required interface. + """ + res = self._get_resource(_node.Node, node) + return res.validate(self, required=required) + + def set_node_maintenance(self, node, reason=None): + """Enable maintenance mode on the node. + + :param node: The value can be either the name or ID of a node or + a :class:`~openstack.baremetal.v1.node.Node` instance. + :param reason: Optional reason for maintenance. + :return: This :class:`Node` instance. + """ + res = self._get_resource(_node.Node, node) + return res.set_maintenance(self, reason) + + def unset_node_maintenance(self, node): + """Disable maintenance mode on the node. + + :param node: The value can be either the name or ID of a node or + a :class:`~openstack.baremetal.v1.node.Node` instance. + :return: This :class:`Node` instance. + """ + res = self._get_resource(_node.Node, node) + return res.unset_maintenance(self) + + def delete_node(self, node, ignore_missing=True): + """Delete a node. + + :param node: The value can be either the name or ID of a node or + a :class:`~openstack.baremetal.v1.node.Node` instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the node could not be found. When set to ``True``, no + exception will be raised when attempting to delete a non-existent + node. + + :returns: The instance of the node which was deleted. + :rtype: :class:`~openstack.baremetal.v1.node.Node`. + """ + return self._delete(_node.Node, node, ignore_missing=ignore_missing) + + # ========== Node actions ========== + + def add_node_trait(self, node, trait): + """Add a trait to a node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param trait: trait to remove from the node. + :returns: The updated node + """ + res = self._get_resource(_node.Node, node) + return res.add_trait(self, trait) + + def remove_node_trait(self, node, trait, ignore_missing=True): + """Remove a trait from a node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param trait: trait to remove from the node. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the trait could not be found. When set to ``True``, no + exception will be raised when attempting to delete a non-existent + trait. + :returns: The updated :class:`~openstack.baremetal.v1.node.Node` + """ + res = self._get_resource(_node.Node, node) + return res.remove_trait(self, trait, ignore_missing=ignore_missing) + + def call_node_vendor_passthru(self, node, verb, method, body=None): + """Calls vendor_passthru for a node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param verb: The HTTP verb, one of GET, SET, POST, DELETE. + :param method: The method to call using vendor_passthru. + :param body: The JSON body in the HTTP call. + :returns: The raw response from the method. + """ + res = self._get_resource(_node.Node, node) + return res.call_vendor_passthru(self, verb, method, body) + + def list_node_vendor_passthru(self, node): + """Lists vendor_passthru for a node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :returns: A list of vendor_passthru methods for the node. + """ + res = self._get_resource(_node.Node, node) + return res.list_vendor_passthru(self) + + def get_node_console(self, node): + """Get the console for a node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :returns: Connection information for the console. + """ + res = self._get_resource(_node.Node, node) + return res.get_console(self) + + def enable_node_console(self, node): + """Enable the console for a node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :returns: None + """ + res = self._get_resource(_node.Node, node) + return res.set_console_mode(self, True) + + def disable_node_console(self, node): + """Disable the console for a node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :returns: None + """ + res = self._get_resource(_node.Node, node) + return res.set_console_mode(self, False) + + def set_node_traits(self, node, traits): + """Set traits for a node. + + Removes any existing traits and adds the traits passed in to this + method. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :param traits: list of traits to add to the node. + :returns: The updated :class:`~openstack.baremetal.v1.node.Node` + """ + res = self._get_resource(_node.Node, node) + return res.set_traits(self, traits) + + def list_node_firmware(self, node): + """Lists firmware components for a node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.baremetal.v1.node.Node` instance. + :returns: A list of the node's firmware components. + """ + res = self._get_resource(_node.Node, node) + return res.list_firmware(self) + + # ========== Ports ========== + + def ports(self, details=False, **query): + """Retrieve a generator of ports. + + :param details: A boolean indicating whether the detailed information + for every port should be returned. + :param dict query: Optional query parameters to be sent to restrict + the ports returned. Available parameters include: + + * ``address``: Only return ports with the specified physical + hardware address, typically a MAC address. + * ``conductor_groups``: Only return ports associated with nodes + in the specified conductor group(s). + * ``driver``: Only return those with the specified ``driver``. + * ``fields``: A list containing one or more fields to be returned + in the response. This may lead to some performance gain + because other fields of the resource are not refreshed. + * ``limit``: Requests at most the specified number of ports be + returned from the query. + * ``marker``: Specifies the ID of the last-seen port. Use the + ``limit`` parameter to make an initial limited request and + use the ID of the last-seen port from the response as + the ``marker`` value in a subsequent limited request. + * ``node``:only return the ones associated with this specific node + (name or UUID), or an empty set if not found. + * ``node_id``:only return the ones associated with this specific + node UUID, or an empty set if not found. + * ``portgroup``: only return the ports associated with this + specific Portgroup (name or UUID), or an empty set if not + found. Added in API microversion 1.24. + * ``sort_dir``: Sorts the response by the requested sort direction. + A valid value is ``asc`` (ascending) or ``desc`` + (descending). Default is ``asc``. You can specify multiple + pairs of sort key and sort direction query parameters. If + you omit the sort direction in a pair, the API uses the + natural sorting direction of the server attribute that is + provided as the ``sort_key``. + * ``sort_key``: Sorts the response by the this attribute value. + Default is ``id``. You can specify multiple pairs of sort + key and sort direction query parameters. If you omit the + sort direction in a pair, the API uses the natural sorting + direction of the server attribute that is provided as the + ``sort_key``. + + :returns: A generator of port instances. + """ + return _port.Port.list(self, details=details, **query) + + def create_port(self, **attrs): + """Create a new port from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.baremetal.v1.port.Port`. + + :returns: The results of port creation. + :rtype: :class:`~openstack.baremetal.v1.port.Port`. + """ + return self._create(_port.Port, **attrs) + + # TODO(stephenfin): Delete this. You can't lookup a port by name so this is + # identical to get_port + def find_port(self, name_or_id, ignore_missing=True, *, details=True): + """Find a single port. + + :param str name_or_id: The ID of a port. + :param bool ignore_missing: When set to ``False``, an exception of + :class:`~openstack.exceptions.NotFoundException` will be raised + when the port does not exist. When set to `True``, None will + be returned when attempting to find a nonexistent port. + :param details: A boolean indicating whether the detailed information + for every port should be returned. + :returns: One :class:`~openstack.baremetal.v1.port.Port` object + or None. + """ + return self._find( + _port.Port, + name_or_id, + ignore_missing=ignore_missing, + details=details, + ) + + def get_port(self, port, fields=None): + """Get a specific port. + + :param port: The value can be the ID of a port or a + :class:`~openstack.baremetal.v1.port.Port` instance. + :param fields: Limit the resource fields to fetch. + + :returns: One :class:`~openstack.baremetal.v1.port.Port` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + port matching the name or ID could be found. + """ + return self._get_with_fields(_port.Port, port, fields=fields) + + def update_port(self, port, **attrs): + """Update a port. + + :param port: Either the ID of a port or an instance + of :class:`~openstack.baremetal.v1.port.Port`. + :param dict attrs: The attributes to update on the port represented + by the ``port`` parameter. + + :returns: The updated port. + :rtype: :class:`~openstack.baremetal.v1.port.Port` + """ + return self._update(_port.Port, port, **attrs) + + def patch_port(self, port, patch): + """Apply a JSON patch to the port. + + :param port: The value can be the ID of a port or a + :class:`~openstack.baremetal.v1.port.Port` instance. + :param patch: JSON patch to apply. + + :returns: The updated port. + :rtype: :class:`~openstack.baremetal.v1.port.Port` + """ + return self._get_resource(_port.Port, port).patch(self, patch) + + def delete_port(self, port, ignore_missing=True): + """Delete a port. + + :param port: The value can be either the ID of a port or + a :class:`~openstack.baremetal.v1.port.Port` instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the port could not be found. When set to ``True``, no + exception will be raised when attempting to delete a non-existent + port. + + :returns: The instance of the port which was deleted. + :rtype: :class:`~openstack.baremetal.v1.port.Port`. + """ + return self._delete(_port.Port, port, ignore_missing=ignore_missing) + + # ========== Port groups ========== + + def port_groups(self, details=False, **query): + """Retrieve a generator of port groups. + + :param details: A boolean indicating whether the detailed information + for every port group should be returned. + :param dict query: Optional query parameters to be sent to restrict + the port groups returned. Available parameters include: + + * ``address``: Only return portgroups with the specified physical + hardware address, typically a MAC address. + * ``fields``: A list containing one or more fields to be returned + in the response. This may lead to some performance gain + because other fields of the resource are not refreshed. + * ``limit``: Requests at most the specified number of portgroups + returned from the query. + * ``marker``: Specifies the ID of the last-seen portgroup. Use the + ``limit`` parameter to make an initial limited request and + use the ID of the last-seen portgroup from the response as + the ``marker`` value in a subsequent limited request. + * ``node``:only return the ones associated with this specific node + (name or UUID), or an empty set if not found. + * ``sort_dir``: Sorts the response by the requested sort direction. + A valid value is ``asc`` (ascending) or ``desc`` + (descending). Default is ``asc``. You can specify multiple + pairs of sort key and sort direction query parameters. If + you omit the sort direction in a pair, the API uses the + natural sorting direction of the server attribute that is + provided as the ``sort_key``. + * ``sort_key``: Sorts the response by the this attribute value. + Default is ``id``. You can specify multiple pairs of sort + key and sort direction query parameters. If you omit the + sort direction in a pair, the API uses the natural sorting + direction of the server attribute that is provided as the + ``sort_key``. + + :returns: A generator of port group instances. + """ + return _portgroup.PortGroup.list(self, details=details, **query) + + def create_port_group(self, **attrs): + """Create a new portgroup from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.baremetal.v1.port_group.PortGroup`. + + :returns: The results of portgroup creation. + :rtype: :class:`~openstack.baremetal.v1.port_group.PortGroup`. + """ + return self._create(_portgroup.PortGroup, **attrs) + + def find_port_group( + self, + name_or_id, + ignore_missing=True, + *, + details=True, + ): + """Find a single port group. + + :param str name_or_id: The name or ID of a portgroup. + :param bool ignore_missing: When set to ``False``, an exception of + :class:`~openstack.exceptions.NotFoundException` will be raised + when the port group does not exist. When set to `True``, None will + be returned when attempting to find a nonexistent port group. + :param details: A boolean indicating whether the detailed information + for the port group should be returned. + :returns: One :class:`~openstack.baremetal.v1.port_group.PortGroup` + object or None. + """ + return self._find( + _portgroup.PortGroup, + name_or_id, + ignore_missing=ignore_missing, + details=details, + ) + + def get_port_group(self, port_group, fields=None): + """Get a specific port group. + + :param port_group: The value can be the name or ID of a chassis or a + :class:`~openstack.baremetal.v1.port_group.PortGroup` instance. + :param fields: Limit the resource fields to fetch. + + :returns: One :class:`~openstack.baremetal.v1.port_group.PortGroup` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + port group matching the name or ID could be found. + """ + return self._get_with_fields( + _portgroup.PortGroup, port_group, fields=fields + ) + + def update_port_group(self, port_group, **attrs): + """Update a port group. + + :param port_group: Either the name or the ID of a port group or + an instance of + :class:`~openstack.baremetal.v1.port_group.PortGroup`. + :param dict attrs: The attributes to update on the port group + represented by the ``port_group`` parameter. + + :returns: The updated port group. + :rtype: :class:`~openstack.baremetal.v1.port_group.PortGroup` + """ + return self._update(_portgroup.PortGroup, port_group, **attrs) + + def patch_port_group(self, port_group, patch): + """Apply a JSON patch to the port_group. + + :param port_group: The value can be the ID of a port group or a + :class:`~openstack.baremetal.v1.port_group.PortGroup` instance. + :param patch: JSON patch to apply. + + :returns: The updated port group. + :rtype: :class:`~openstack.baremetal.v1.port_group.PortGroup` + """ + res = self._get_resource(_portgroup.PortGroup, port_group) + return res.patch(self, patch) + + def delete_port_group(self, port_group, ignore_missing=True): + """Delete a port group. + + :param port_group: The value can be either the name or ID of + a port group or a + :class:`~openstack.baremetal.v1.port_group.PortGroup` + instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the port group could not be found. When set to ``True``, no + exception will be raised when attempting to delete a non-existent + port group. + + :returns: The instance of the port group which was deleted. + :rtype: :class:`~openstack.baremetal.v1.port_group.PortGroup`. + """ + return self._delete( + _portgroup.PortGroup, port_group, ignore_missing=ignore_missing + ) + + # ========== Virtual Media ========== + + def attach_vmedia_to_node( + self, + node, + device_type, + image_url, + image_download_source=None, + retry_on_conflict=True, + ): + """Attach virtual media device to a node. + + :param node: The value can be either the name or ID of a node or + a :class:`~openstack.baremetal.v1.node.Node` instance. + :param device_type: The type of virtual media device. + :param image_url: The URL of the image to attach. + :param image_download_source: The source of the image download. + :param retry_on_conflict: Whether to retry HTTP CONFLICT errors. + This can happen when either the virtual media is already used on + a node or the node is locked. Since the latter happens more often, + the default value is True. + :return: ``None`` + :raises: :exc:`~openstack.exceptions.NotSupported` if the server + does not support the VMEDIA API. + """ + res = self._get_resource(_node.Node, node) + res.attach_vmedia( + self, + device_type=device_type, + image_url=image_url, + image_download_source=image_download_source, + retry_on_conflict=retry_on_conflict, + ) + + def detach_vmedia_from_node(self, node, device_types=None): + """Detach virtual media from the node. + + :param node: The value can be either the name or ID of a node or + a :class:`~openstack.baremetal.v1.node.Node` instance. + :param device_types: A list with the types of virtual media + devices to detach. + :return: ``True`` if the virtual media was detached, + otherwise ``False``. + :raises: :exc:`~openstack.exceptions.NotSupported` if the server + does not support the VMEDIA API. + """ + res = self._get_resource(_node.Node, node) + return res.detach_vmedia(self, device_types=device_types) + + # ========== VIFs ========== + + def attach_vif_to_node( + self, + node: _node.Node | str, + vif_id: str, + retry_on_conflict: bool = True, + *, + port_id: str | None = None, + port_group_id: str | None = None, + ) -> None: + """Attach a VIF to the node. + + The exact form of the VIF ID depends on the network interface used by + the node. In the most common case it is a Network service port + (NOT a Bare Metal port) ID. A VIF can only be attached to one node + at a time. + + :param node: The value can be either the name or ID of a node or + a :class:`~openstack.baremetal.v1.node.Node` instance. + :param vif_id: Backend-specific VIF ID. + :param retry_on_conflict: Whether to retry HTTP CONFLICT errors. + This can happen when either the VIF is already used on a node or + the node is locked. Since the latter happens more often, the + default value is True. + :param port_id: The UUID of the port to attach the VIF to. Only one of + port_id or port_group_id can be provided. + :param port_group_id: The UUID of the portgroup to attach to. Only one + of port_group_id or port_id can be provided. + :return: None + :raises: :exc:`~openstack.exceptions.NotSupported` if the server + does not support the VIF API. + :raises: :exc:`~openstack.exceptions.InvalidRequest` if both port_id + and port_group_id are provided. + """ + res = self._get_resource(_node.Node, node) + res.attach_vif( + self, + vif_id=vif_id, + retry_on_conflict=retry_on_conflict, + port_id=port_id, + port_group_id=port_group_id, + ) + + def detach_vif_from_node(self, node, vif_id, ignore_missing=True): + """Detach a VIF from the node. + + The exact form of the VIF ID depends on the network interface used by + the node. In the most common case it is a Network service port + (NOT a Bare Metal port) ID. + + :param node: The value can be either the name or ID of a node or + a :class:`~openstack.baremetal.v1.node.Node` instance. + :param string vif_id: Backend-specific VIF ID. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the VIF does not exist. Otherwise, ``False`` + is returned. + :return: ``True`` if the VIF was detached, otherwise ``False``. + :raises: :exc:`~openstack.exceptions.NotSupported` if the server + does not support the VIF API. + """ + res = self._get_resource(_node.Node, node) + return res.detach_vif(self, vif_id, ignore_missing=ignore_missing) + + def list_node_vifs(self, node): + """List IDs of VIFs attached to the node. + + The exact form of the VIF ID depends on the network interface used by + the node. In the most common case it is a Network service port + (NOT a Bare Metal port) ID. + + :param node: The value can be either the name or ID of a node or + a :class:`~openstack.baremetal.v1.node.Node` instance. + :return: List of VIF IDs as strings. + :raises: :exc:`~openstack.exceptions.NotSupported` if the server + does not support the VIF API. + """ + res = self._get_resource(_node.Node, node) + return res.list_vifs(self) + + # ========== Allocations ========== + + def allocations(self, **query): + """Retrieve a generator of allocations. + + :param dict query: Optional query parameters to be sent to restrict + the allocation to be returned. Available parameters include: + + * ``fields``: A list containing one or more fields to be returned + in the response. This may lead to some performance gain + because other fields of the resource are not refreshed. + * ``limit``: Requests at most the specified number of items be + returned from the query. + * ``marker``: Specifies the ID of the last-seen allocation. Use the + ``limit`` parameter to make an initial limited request and + use the ID of the last-seen allocation from the response as + the ``marker`` value in a subsequent limited request. + * ``sort_dir``: Sorts the response by the requested sort direction. + A valid value is ``asc`` (ascending) or ``desc`` + (descending). Default is ``asc``. You can specify multiple + pairs of sort key and sort direction query parameters. If + you omit the sort direction in a pair, the API uses the + natural sorting direction of the server attribute that is + provided as the ``sort_key``. + * ``sort_key``: Sorts the response by the this attribute value. + Default is ``id``. You can specify multiple pairs of sort + key and sort direction query parameters. If you omit the + sort direction in a pair, the API uses the natural sorting + direction of the server attribute that is provided as the + ``sort_key``. + + :returns: A generator of allocation instances. + """ + return _allocation.Allocation.list(self, **query) + + def create_allocation(self, **attrs): + """Create a new allocation from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.baremetal.v1.allocation.Allocation`. + + :returns: The results of allocation creation. + :rtype: :class:`~openstack.baremetal.v1.allocation.Allocation`. + """ + return self._create(_allocation.Allocation, **attrs) + + def get_allocation(self, allocation, fields=None): + """Get a specific allocation. + + :param allocation: The value can be the name or ID of an allocation or + a :class:`~openstack.baremetal.v1.allocation.Allocation` instance. + :param fields: Limit the resource fields to fetch. + + :returns: One :class:`~openstack.baremetal.v1.allocation.Allocation` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + allocation matching the name or ID could be found. + """ + return self._get_with_fields( + _allocation.Allocation, allocation, fields=fields + ) + + def update_allocation(self, allocation, **attrs): + """Update an allocation. + + :param allocation: The value can be the name or ID of an allocation or + a :class:`~openstack.baremetal.v1.allocation.Allocation` instance. + :param dict attrs: The attributes to update on the allocation + represented by the ``allocation`` parameter. + + :returns: The updated allocation. + :rtype: :class:`~openstack.baremetal.v1.allocation.Allocation` + """ + return self._update(_allocation.Allocation, allocation, **attrs) + + def patch_allocation(self, allocation, patch): + """Apply a JSON patch to the allocation. + + :param allocation: The value can be the name or ID of an allocation or + a :class:`~openstack.baremetal.v1.allocation.Allocation` instance. + :param patch: JSON patch to apply. + + :returns: The updated allocation. + :rtype: :class:`~openstack.baremetal.v1.allocation.Allocation` + """ + return self._get_resource(_allocation.Allocation, allocation).patch( + self, patch + ) + + def delete_allocation(self, allocation, ignore_missing=True): + """Delete an allocation. + + :param allocation: The value can be the name or ID of an allocation or + a :class:`~openstack.baremetal.v1.allocation.Allocation` instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the allocation could not be found. When set to ``True``, no + exception will be raised when attempting to delete a non-existent + allocation. + + :returns: The instance of the allocation which was deleted. + :rtype: :class:`~openstack.baremetal.v1.allocation.Allocation`. + """ + return self._delete( + _allocation.Allocation, allocation, ignore_missing=ignore_missing + ) + + def wait_for_allocation( + self, allocation, timeout=None, ignore_error=False + ): + """Wait for the allocation to become active. + + :param allocation: The value can be the name or ID of an allocation or + a :class:`~openstack.baremetal.v1.allocation.Allocation` instance. + :param timeout: How much (in seconds) to wait for the allocation. + The value of ``None`` (the default) means no client-side timeout. + :param ignore_error: If ``True``, this call will raise an exception + if the allocation reaches the ``error`` state. Otherwise the error + state is considered successful and the call returns. + + :returns: The instance of the allocation. + :rtype: :class:`~openstack.baremetal.v1.allocation.Allocation`. + :raises: :class:`~openstack.exceptions.ResourceFailure` if allocation + fails and ``ignore_error`` is ``False``. + :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. + """ + res = self._get_resource(_allocation.Allocation, allocation) + return res.wait(self, timeout=timeout, ignore_error=ignore_error) + + # ========== Volume connectors ========== + + def volume_connectors(self, details=False, **query): + """Retrieve a generator of volume_connector. + + :param details: A boolean indicating whether the detailed information + for every volume_connector should be returned. + :param dict query: Optional query parameters to be sent to restrict + the volume_connectors returned. Available parameters include: + + * ``fields``: A list containing one or more fields to be returned + in the response. This may lead to some performance gain + because other fields of the resource are not refreshed. + * ``limit``: Requests at most the specified number of + volume_connector be returned from the query. + * ``marker``: Specifies the ID of the last-seen volume_connector. + Use the ``limit`` parameter to make an initial limited request + and use the ID of the last-seen volume_connector from the + response as the ``marker`` value in subsequent limited request. + * ``node``:only return the ones associated with this specific node + (name or UUID), or an empty set if not found. + * ``sort_dir``:Sorts the response by the requested sort direction. + A valid value is ``asc`` (ascending) or ``desc`` + (descending). Default is ``asc``. You can specify multiple + pairs of sort key and sort direction query parameters. If + you omit the sort direction in a pair, the API uses the + natural sorting direction of the server attribute that is + provided as the ``sort_key``. + * ``sort_key``: Sorts the response by the this attribute value. + Default is ``id``. You can specify multiple pairs of sort + key and sort direction query parameters. If you omit the + sort direction in a pair, the API uses the natural sorting + direction of the server attribute that is provided as the + ``sort_key``. + + :returns: A generator of volume_connector instances. + """ + if details: + query['detail'] = True + return _volumeconnector.VolumeConnector.list(self, **query) + + def create_volume_connector(self, **attrs): + """Create a new volume_connector from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector`. + + :returns: The results of volume_connector creation. + :rtype: + :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector`. + """ + return self._create(_volumeconnector.VolumeConnector, **attrs) + + # TODO(stephenfin): Delete this. You can't lookup a volume connector by + # name so this is identical to get_volume_connector + def find_volume_connector( + self, + vc_id, + ignore_missing=True, + *, + details=True, + ): + """Find a single volume connector. + + :param str vc_id: The ID of a volume connector. + :param bool ignore_missing: When set to ``False``, an exception of + :class:`~openstack.exceptions.NotFoundException` will be raised + when the volume connector does not exist. When set to `True``, + None will be returned when attempting to find a nonexistent + volume connector. + :param details: A boolean indicating whether the detailed information + for the volume connector should be returned. + + :returns: One + :class:`~openstack.baremetal.v1.volumeconnector.VolumeConnector` + object or None. + """ + return self._find( + _volumeconnector.VolumeConnector, + vc_id, + ignore_missing=ignore_missing, + details=details, + ) + + def get_volume_connector(self, volume_connector, fields=None): + """Get a specific volume_connector. + + :param volume_connector: The value can be the ID of a + volume_connector or a + :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector` + instance. + :param fields: Limit the resource fields to fetch.` + + :returns: One + :class: `~openstack.baremetal.v1.volume_connector.VolumeConnector` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + volume_connector matching the name or ID could be found.` + """ + return self._get_with_fields( + _volumeconnector.VolumeConnector, volume_connector, fields=fields + ) + + def update_volume_connector(self, volume_connector, **attrs): + """Update a volume_connector. + + :param volume_connector: Either the ID of a volume_connector + or an instance of + :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector`. + :param dict attrs: The attributes to update on the + volume_connector represented by the ``volume_connector`` + parameter. + + :returns: The updated volume_connector. + :rtype: + :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector` + """ + return self._update( + _volumeconnector.VolumeConnector, volume_connector, **attrs + ) + + def patch_volume_connector(self, volume_connector, patch): + """Apply a JSON patch to the volume_connector. + + :param volume_connector: The value can be the ID of a + volume_connector or a + :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector` + instance. + :param patch: JSON patch to apply. + + :returns: The updated volume_connector. + :rtype: + :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector.` + """ + return self._get_resource( + _volumeconnector.VolumeConnector, volume_connector + ).patch(self, patch) + + def delete_volume_connector(self, volume_connector, ignore_missing=True): + """Delete an volume_connector. + + :param volume_connector: The value can be either the ID of a + volume_connector.VolumeConnector or a + :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector` + instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the volume_connector could not be found. + When set to ``True``, no exception will be raised when + attempting to delete a non-existent volume_connector. + + :returns: The instance of the volume_connector which was deleted. + :rtype: + :class:`~openstack.baremetal.v1.volume_connector.VolumeConnector`. + """ + return self._delete( + _volumeconnector.VolumeConnector, + volume_connector, + ignore_missing=ignore_missing, + ) + + # ========== Volume targets ========== + + def volume_targets(self, details=False, **query): + """Retrieve a generator of volume_target. + + :param details: A boolean indicating whether the detailed information + for every volume_target should be returned. + :param dict query: Optional query parameters to be sent to restrict + the volume_targets returned. Available parameters include: + + * ``fields``: A list containing one or more fields to be returned + in the response. This may lead to some performance gain + because other fields of the resource are not refreshed. + * ``limit``: Requests at most the specified number of + volume_connector be returned from the query. + * ``marker``: Specifies the ID of the last-seen volume_target. + Use the ``limit`` parameter to make an initial limited request + and use the ID of the last-seen volume_target from the + response as the ``marker`` value in subsequent limited request. + * ``node``:only return the ones associated with this specific node + (name or UUID), or an empty set if not found. + * ``sort_dir``:Sorts the response by the requested sort direction. + A valid value is ``asc`` (ascending) or ``desc`` + (descending). Default is ``asc``. You can specify multiple + pairs of sort key and sort direction query parameters. If + you omit the sort direction in a pair, the API uses the + natural sorting direction of the server attribute that is + provided as the ``sort_key``. + * ``sort_key``: Sorts the response by the this attribute value. + Default is ``id``. You can specify multiple pairs of sort + key and sort direction query parameters. If you omit the + sort direction in a pair, the API uses the natural sorting + direction of the server attribute that is provided as the + ``sort_key``. + + :returns: A generator of volume_target instances. + """ + if details: + query['detail'] = True + return _volumetarget.VolumeTarget.list(self, **query) + + def create_volume_target(self, **attrs): + """Create a new volume_target from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.baremetal.v1.volume_target.VolumeTarget`. + + :returns: The results of volume_target creation. + :rtype: + :class:`~openstack.baremetal.v1.volume_target.VolumeTarget`. + """ + return self._create(_volumetarget.VolumeTarget, **attrs) + + # TODO(stephenfin): Delete this. You can't lookup a volume target by + # name so this is identical to get_volume_connector + def find_volume_target(self, vt_id, ignore_missing=True, *, details=True): + """Find a single volume target. + + :param str vt_id: The ID of a volume target. + :param bool ignore_missing: When set to ``False``, an exception of + :class:`~openstack.exceptions.NotFoundException` will be raised + when the volume connector does not exist. When set to `True``, + None will be returned when attempting to find a nonexistent + volume target. + :param details: A boolean indicating whether the detailed information + for the volume target should be returned. + + :returns: One + :class:`~openstack.baremetal.v1.volumetarget.VolumeTarget` + object or None. + """ + return self._find( + _volumetarget.VolumeTarget, + vt_id, + ignore_missing=ignore_missing, + details=details, + ) + + def get_volume_target(self, volume_target, fields=None): + """Get a specific volume_target. + + :param volume_target: The value can be the ID of a + volume_target or a + :class:`~openstack.baremetal.v1.volume_target.VolumeTarget` + instance. + :param fields: Limit the resource fields to fetch.` + + :returns: One + :class:`~openstack.baremetal.v1.volume_target.VolumeTarget` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + volume_target matching the name or ID could be found.` + """ + return self._get_with_fields( + _volumetarget.VolumeTarget, volume_target, fields=fields + ) + + def update_volume_target(self, volume_target, **attrs): + """Update a volume_target. + + :param volume_target: Either the ID of a volume_target + or an instance of + :class:`~openstack.baremetal.v1.volume_target.VolumeTarget`. + :param dict attrs: The attributes to update on the + volume_target represented by the ``volume_target`` parameter. + + :returns: The updated volume_target. + :rtype: + :class:`~openstack.baremetal.v1.volume_target.VolumeTarget` + """ + return self._update(_volumetarget.VolumeTarget, volume_target, **attrs) + + def patch_volume_target(self, volume_target, patch): + """Apply a JSON patch to the volume_target. + + :param volume_target: The value can be the ID of a + volume_target or a + :class:`~openstack.baremetal.v1.volume_target.VolumeTarget` + instance. + :param patch: JSON patch to apply. + + :returns: The updated volume_target. + :rtype: + :class:`~openstack.baremetal.v1.volume_target.VolumeTarget.` + """ + return self._get_resource( + _volumetarget.VolumeTarget, volume_target + ).patch(self, patch) + + def delete_volume_target(self, volume_target, ignore_missing=True): + """Delete an volume_target. + + :param volume_target: The value can be either the ID of a + volume_target.VolumeTarget or a + :class:`~openstack.baremetal.v1.volume_target.VolumeTarget` + instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the volume_target could not be found. + When set to ``True``, no exception will be raised when + attempting to delete a non-existent volume_target. + + :returns: The instance of the volume_target which was deleted. + :rtype: + :class:`~openstack.baremetal.v1.volume_target.VolumeTarget`. + """ + return self._delete( + _volumetarget.VolumeTarget, + volume_target, + ignore_missing=ignore_missing, + ) + + # ========== Deploy templates ========== + + def deploy_templates(self, details=False, **query): + """Retrieve a generator of deploy_templates. + + :param details: A boolean indicating whether the detailed information + for every deploy_templates should be returned. + :param dict query: Optional query parameters to be sent to + restrict the deploy_templates to be returned. + + :returns: A generator of Deploy templates instances. + """ + if details: + query['detail'] = True + return _deploytemplates.DeployTemplate.list(self, **query) + + def create_deploy_template(self, **attrs): + """Create a new deploy_template from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate`. + + :returns: The results of deploy_template creation. + :rtype: + :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate`. + """ + return self._create(_deploytemplates.DeployTemplate, **attrs) + + def update_deploy_template(self, deploy_template, **attrs): + """Update a deploy_template. + + :param deploy_template: Either the ID of a deploy_template, + or an instance of + :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate`. + :param dict attrs: The attributes to update on + the deploy_template represented + by the ``deploy_template`` parameter. + + :returns: The updated deploy_template. + :rtype: + :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` + """ + return self._update( + _deploytemplates.DeployTemplate, deploy_template, **attrs + ) + + def delete_deploy_template(self, deploy_template, ignore_missing=True): + """Delete a deploy_template. + + :param deploy_template:The value can be + either the ID of a deploy_template or a + :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` + instance. + + :param bool ignore_missing: When set to ``False``, + an exception:class:`~openstack.exceptions.NotFoundException` + will be raised when the deploy_template + could not be found. + When set to ``True``, no + exception will be raised when attempting + to delete a non-existent + deploy_template. + + :returns: The instance of the deploy_template which was deleted. + :rtype: + :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate`. + """ + + return self._delete( + _deploytemplates.DeployTemplate, + deploy_template, + ignore_missing=ignore_missing, + ) + + def get_deploy_template(self, deploy_template, fields=None): + """Get a specific deployment template. + + :param deploy_template: The value can be the name or ID + of a deployment template + :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` + instance. + + :param fields: Limit the resource fields to fetch. + + :returns: One + :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no deployment template matching the name or + ID could be found. + """ + return self._get_with_fields( + _deploytemplates.DeployTemplate, deploy_template, fields=fields + ) + + def find_deploy_template( + self, + name_or_id, + ignore_missing=True, + *, + details=True, + ): + """Find a single deployment template. + + :param str name_or_id: The name or ID of a deployment template. + :param bool ignore_missing: When set to ``False``, an exception of + :class:`~openstack.exceptions.ResourceNotFound` will be raised + when the deployment template does not exist. When set to `True``, + None will be returned when attempting to find a nonexistent + deployment template. + :param details: A boolean indicating whether the detailed information + for the deployment template should be returned. + + :returns: One + :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` or + None. + """ + return self._find( + _deploytemplates.DeployTemplate, + name_or_id, + ignore_missing=ignore_missing, + details=details, + ) + + def patch_deploy_template(self, deploy_template, patch): + """Apply a JSON patch to the deploy_templates. + + :param deploy_templates: The value can be the ID of a + deploy_template or a + :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` + instance. + + :param patch: JSON patch to apply. + + :returns: The updated deploy_template. + :rtype: + :class:`~openstack.baremetal.v1.deploy_templates.DeployTemplate` + """ + return self._get_resource( + _deploytemplates.DeployTemplate, deploy_template + ).patch(self, patch) + + # ========== Runbooks ========== + + def runbooks(self, details=False, **query): + """Retrieve a generator of runbooks. + + :param details: A boolean indicating whether the detailed information + for every runbook should be returned. + :param dict query: Optional query parameters to be sent to + restrict the runbooks to be returned. + + :returns: A generator of Runbooks instances. + """ + if details: + query['detail'] = True + return _runbooks.Runbook.list(self, **query) + + def create_runbook(self, **attrs): + """Create a new runbook from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.baremetal.v1.runbooks.Runbook`. + + :returns: The results of runbook creation. + :rtype: :class:`~openstack.baremetal.v1.runbooks.Runbook`. + """ + return self._create(_runbooks.Runbook, **attrs) + + def update_runbook(self, runbook, **attrs): + """Update a runbook. + + :param runbook: Either the ID of a runbook, + or an instance of + :class:`~openstack.baremetal.v1.runbooks.Runbook`. + :param dict attrs: The attributes to update on + the runbook represented + by the ``runbook`` parameter. + + :returns: The updated runbook. + :rtype: :class:`~openstack.baremetal.v1.runbooks.Runbook` + """ + return self._update(_runbooks.Runbook, runbook, **attrs) + + def delete_runbook(self, runbook, ignore_missing=True): + """Delete a runbook. + + :param runbook:The value can be + either the ID of a runbook or a + :class:`~openstack.baremetal.v1.runbooks.Runbook` + instance. + + :param bool ignore_missing: When set to ``False``, + an exception:class:`~openstack.exceptions.NotFoundException` + will be raised when the runbook could not be found. + When set to ``True``, no exception will be raised when attempting + to delete a non-existent runbook. + + :returns: The instance of the runbook which was deleted. + :rtype: :class:`~openstack.baremetal.v1.runbooks.Runbook`. + """ + + return self._delete( + _runbooks.Runbook, + runbook, + ignore_missing=ignore_missing, + ) + + def get_runbook(self, runbook, fields=None): + """Get a specific runbook. + + :param runbook: The value can be the name or ID + of a runbook + :class:`~openstack.baremetal.v1.runbooks.Runbook` + instance. + + :param fields: Limit the resource fields to fetch. + + :returns: One + :class:`~openstack.baremetal.v1.runbooks.Runbook` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no runbook matching the name or ID could be found. + """ + return self._get_with_fields(_runbooks.Runbook, runbook, fields=fields) + + def patch_runbook(self, runbook, patch): + """Apply a JSON patch to the runbook. + + :param runbooks: The value can be the ID of a + runbook or a :class:`~openstack.baremetal.v1.runbooks.Runbook` + instance. + + :param patch: JSON patch to apply. + + :returns: The updated runbook. + :rtype: + :class:`~openstack.baremetal.v1.runbooks.Runbook` + """ + return self._get_resource(_runbooks.Runbook, runbook).patch( + self, patch + ) + + # ========== Conductors ========== + + def conductors(self, details=False, **query): + """Retrieve a generator of conductors. + + :param bool details: A boolean indicating whether the detailed + information for every conductor should be returned. + + :returns: A generator of conductor instances. + """ + + if details: + query['details'] = True + return _conductor.Conductor.list(self, **query) + + # NOTE(stephenfin): There is no 'find_conductor' since conductors are + # identified by the host name, not an arbitrary UUID, meaning + # 'find_conductor' would be identical to 'get_conductor' + + def get_conductor(self, conductor, fields=None): + """Get a specific conductor. + + :param conductor: The value can be the name of a conductor or a + :class:`~openstack.baremetal.v1.conductor.Conductor` instance. + + :returns: One :class:`~openstack.baremetal.v1.conductor.Conductor` + + :raises: :class:`~openstack.exceptions.NotFoundException` when no + conductor matching the name could be found. + """ + return self._get_with_fields( + _conductor.Conductor, conductor, fields=fields + ) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) + + # ========== Inspection Rules ========== + + def inspection_rules(self, details=False, **query): + """Retrieve a generator of inspection rules. + + :param dict query: Optional query parameters to be sent to + restrict the inspection rules to be returned. + + :returns: A generator of InspectionRule instances. + """ + if details: + query['details'] = True + return _inspectionrules.InspectionRule.list(self, **query) + + def create_inspection_rule(self, **attrs): + """Create a new inspection rule from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.baremetal.v1.inspection_rules.InspectionRule`. + + :returns: The results of inspection rule creation. + :rtype: + :class:`~openstack.baremetal.v1.inspection_rules.InspectionRule`. + """ + return self._create(_inspectionrules.InspectionRule, **attrs) + + def get_inspection_rule(self, inspection_rule, fields=None): + """Get a specific inspection rule. + + :param inspection_rule: The ID of an inspection rule + :class:`~openstack.baremetal.v1.inspection_rules.InspectionRule` + instance. + + :param fields: Limit the resource fields to fetch. + + :returns: One + :class:`~openstack.baremetal.v1.inspection_rules.InspectionRule` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + inspection rule matching the ID could be found. + """ + return self._get_with_fields( + _inspectionrules.InspectionRule, inspection_rule, fields=fields + ) + + def update_inspection_rule(self, inspection_rule, **attrs): + """Update an inspection rule. + + :param inspection_rule: Either the ID of an inspection rule + or an instance of + :class:`~openstack.baremetal.v1.inspection_rules.InspectionRule`. + :param dict attrs: The attributes to update on the + inspection rule represented by the ``inspection_rule`` parameter. + + :returns: The updated inspection rule. + :rtype: + :class:`~openstack.baremetal.v1.inspection_rules.InspectionRule` + """ + return self._update( + _inspectionrules.InspectionRule, inspection_rule, **attrs + ) + + def delete_inspection_rule(self, inspection_rule, ignore_missing=True): + """Delete an inspection rule. + + :param inspection_rule: The value can be either the ID of a + inspection_rule or a + :class:`~openstack.baremetal.v1.inspection_rules.InspectionRule` + instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the inspection rule could not be found. + When set to ``True``, no exception will be raised when + attempting to delete a non-existent inspection rule. + + :returns: The instance of the inspection rule which was deleted. + :rtype: + :class:`~openstack.baremetal.v1.inspection_rules.InspectionRule`. + """ + return self._delete( + _inspectionrules.InspectionRule, + inspection_rule, + ignore_missing=ignore_missing, + ) + + def patch_inspection_rule(self, inspection_rule, patch): + """Apply a JSON patch to the inspection rule. + + :param inspection_rule: The value can be the ID of a + inspection_rule or a + :class:`~openstack.baremetal.v1.inspection_rules.InspectionRule` + instance. + + :param patch: JSON patch to apply. + + :returns: The updated inspection rule. + :rtype: + :class:`~openstack.baremetal.v1.inspection_rules.InspectionRule` + """ + return self._get_resource( + _inspectionrules.InspectionRule, inspection_rule + ).patch(self, patch) diff --git a/openstack/baremetal/v1/allocation.py b/openstack/baremetal/v1/allocation.py new file mode 100644 index 0000000000..b2f79fa6df --- /dev/null +++ b/openstack/baremetal/v1/allocation.py @@ -0,0 +1,111 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import _common +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Allocation(resource.Resource): + resources_key = 'allocations' + base_path = '/allocations' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_patch = True + commit_method = 'PATCH' + commit_jsonpatch = True + + _query_mapping = resource.QueryParameters( + 'node', + 'resource_class', + 'state', + 'owner', + fields={'type': _common.fields_type}, + ) + + # Allocation update is available since 1.57 + # Backfilling allocations is available since 1.58 + # owner attribute is available since 1.60 + _max_microversion = '1.60' + + #: The candidate nodes for this allocation. + candidate_nodes = resource.Body('candidate_nodes', type=list) + #: Timestamp at which the allocation was created. + created_at = resource.Body('created_at') + #: A set of one or more arbitrary metadata key and value pairs. + extra = resource.Body('extra', type=dict) + #: The UUID for the allocation. + id = resource.Body('uuid', alternate_id=True) + #: The last error for the allocation. + last_error = resource.Body("last_error") + #: A list of relative links, including the self and bookmark links. + links = resource.Body('links', type=list) + #: The name of the allocation. + name = resource.Body('name') + #: The node UUID or name to create the allocation against, + #: bypassing the normal allocation process. + node = resource.Body('node') + #: UUID of the node this allocation belongs to. + node_id = resource.Body('node_uuid') + #: The tenant who owns the object + owner = resource.Body('owner') + #: The requested resource class. + resource_class = resource.Body('resource_class') + #: The state of the allocation. + state = resource.Body('state') + #: The requested traits. + traits = resource.Body('traits', type=list) + #: Timestamp at which the allocation was last updated. + updated_at = resource.Body('updated_at') + + def wait(self, session, timeout=None, ignore_error=False): + """Wait for the allocation to become active. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param timeout: How much (in seconds) to wait for the allocation. + The value of ``None`` (the default) means no client-side timeout. + :param ignore_error: If ``True``, this call will raise an exception + if the allocation reaches the ``error`` state. Otherwise the error + state is considered successful and the call returns. + + :return: This :class:`Allocation` instance. + :raises: :class:`~openstack.exceptions.ResourceFailure` if allocation + fails and ``ignore_error`` is ``False``. + :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. + """ + if self.state == 'active': + return self + + for count in utils.iterate_timeout( + timeout, f"Timeout waiting for the allocation {self.id}" + ): + self.fetch(session) + + if self.state == 'error' and not ignore_error: + raise exceptions.ResourceFailure( + f"Allocation {self.id} failed: {self.last_error}" + ) + elif self.state != 'allocating': + return self + + session.log.debug( + 'Still waiting for the allocation %(allocation)s ' + 'to become active, the current state is %(state)s', + {'allocation': self.id, 'state': self.state}, + ) diff --git a/openstack/baremetal/v1/chassis.py b/openstack/baremetal/v1/chassis.py new file mode 100644 index 0000000000..daa03d5632 --- /dev/null +++ b/openstack/baremetal/v1/chassis.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import _common +from openstack import resource + + +class Chassis(_common.Resource): + resources_key = 'chassis' + base_path = '/chassis' + + # Specifying fields became possible in 1.8. + _max_microversion = '1.8' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_patch = True + commit_method = 'PATCH' + commit_jsonpatch = True + + _query_mapping = resource.QueryParameters( + fields={'type': _common.fields_type}, + ) + + #: Timestamp at which the chassis was created. + created_at = resource.Body('created_at') + #: A descriptive text about the service + description = resource.Body('description') + #: A set of one or more arbitrary metadata key and value pairs. + extra = resource.Body('extra') + #: The UUID for the chassis + id = resource.Body('uuid', alternate_id=True) + #: A list of relative links, including the self and bookmark links. + links = resource.Body('links', type=list) + #: Links to the collection of nodes contained in the chassis + nodes = resource.Body('nodes', type=list) + #: Timestamp at which the chassis was last updated. + updated_at = resource.Body('updated_at') + + +ChassisDetail = Chassis diff --git a/openstack/baremetal/v1/conductor.py b/openstack/baremetal/v1/conductor.py new file mode 100644 index 0000000000..febc0220af --- /dev/null +++ b/openstack/baremetal/v1/conductor.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import _common +from openstack import resource + + +class Conductor(resource.Resource): + resources_key = 'conductors' + base_path = '/conductors' + + # capabilities + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = True + allow_patch = False + + _query_mapping = resource.QueryParameters( + 'detail', + fields={'type': _common.fields_type}, + ) + + _max_microversion = '1.49' + created_at = resource.Body('created_at') + updated_at = resource.Body('updated_at') + hostname = resource.Body('hostname') + conductor_group = resource.Body('conductor_group') + alive = resource.Body('alive', type=bool) + links = resource.Body('links', type=list) + drivers = resource.Body('drivers', type=list) diff --git a/openstack/baremetal/v1/deploy_templates.py b/openstack/baremetal/v1/deploy_templates.py new file mode 100644 index 0000000000..d97747235e --- /dev/null +++ b/openstack/baremetal/v1/deploy_templates.py @@ -0,0 +1,50 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import _common +from openstack import resource + + +class DeployTemplate(resource.Resource): + resources_key = 'deploy_templates' + base_path = '/deploy_templates' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_patch = True + commit_method = 'PATCH' + commit_jsonpatch = True + + _query_mapping = resource.QueryParameters( + 'detail', + fields={'type': _common.fields_type}, + ) + + # Deploy Templates is available since 1.55 + _max_microversion = '1.55' + name = resource.Body('name') + #: Timestamp at which the deploy_template was created. + created_at = resource.Body('created_at') + #: A set of one or more arbitrary metadata key and value pairs. + extra = resource.Body('extra') + #: A list of relative links. Includes the self and bookmark links. + links = resource.Body('links', type=list) + #: A set of physical information of the deploy_template. + steps = resource.Body('steps', type=list) + #: Timestamp at which the deploy_template was last updated. + updated_at = resource.Body('updated_at') + #: The UUID of the resource. + id = resource.Body('uuid', alternate_id=True) diff --git a/openstack/baremetal/v1/driver.py b/openstack/baremetal/v1/driver.py new file mode 100644 index 0000000000..81dcaaad6a --- /dev/null +++ b/openstack/baremetal/v1/driver.py @@ -0,0 +1,199 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from keystoneauth1 import adapter +import requests + +from openstack.baremetal.v1 import _common +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Driver(resource.Resource): + resources_key = 'drivers' + base_path = '/drivers' + + # capabilities + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = True + + _query_mapping = resource.QueryParameters(details='detail') + + # The BIOS interface fields introduced in 1.40 (Rocky). + # The firmware interface fields introduced in 1.86. + _max_microversion = '1.86' + + #: A list of active hosts that support this driver. + hosts = resource.Body('hosts', type=list) + #: A list of relative links, including the self and bookmark links. + links = resource.Body('links', type=list) + #: The name of the driver + name = resource.Body('name', alternate_id=True) + #: A list of links to driver properties. + properties = resource.Body('properties', type=list) + + # Hardware interface properties grouped together for convenience, + # available with detail=True. + + #: Default BIOS interface implementation. + #: Introduced in API microversion 1.40. + default_bios_interface = resource.Body("default_bios_interface") + #: Default boot interface implementation. + #: Introduced in API microversion 1.30. + default_boot_interface = resource.Body("default_boot_interface") + #: Default console interface implementation. + #: Introduced in API microversion 1.30. + default_console_interface = resource.Body("default_console_interface") + #: Default deploy interface implementation. + #: Introduced in API microversion 1.30. + default_deploy_interface = resource.Body("default_deploy_interface") + #: Default firmware interface implementation. + #: Introduced in API microversion 1.86. + default_firmware_interface = resource.Body("default_firmware_interface") + #: Default inspect interface implementation. + #: Introduced in API microversion 1.30. + default_inspect_interface = resource.Body("default_inspect_interface") + #: Default management interface implementation. + #: Introduced in API microversion 1.30. + default_management_interface = resource.Body( + "default_management_interface" + ) + #: Default network interface implementation. + #: Introduced in API microversion 1.30. + default_network_interface = resource.Body("default_network_interface") + #: Default port interface implementation. + #: Introduced in API microversion 1.30. + default_power_interface = resource.Body("default_power_interface") + #: Default RAID interface implementation. + #: Introduced in API microversion 1.30. + default_raid_interface = resource.Body("default_raid_interface") + #: Default rescue interface implementation. + #: Introduced in API microversion 1.38. + default_rescue_interface = resource.Body("default_rescue_interface") + #: Default storage interface implementation. + #: Introduced in API microversion 1.33. + default_storage_interface = resource.Body("default_storage_interface") + #: Default vendor interface implementation. + #: Introduced in API microversion 1.30. + default_vendor_interface = resource.Body("default_vendor_interface") + + #: Enabled BIOS interface implementations. + #: Introduced in API microversion 1.40. + enabled_bios_interfaces = resource.Body("enabled_bios_interfaces") + #: Enabled boot interface implementations. + #: Introduced in API microversion 1.30. + enabled_boot_interfaces = resource.Body("enabled_boot_interfaces") + #: Enabled console interface implementations. + #: Introduced in API microversion 1.30. + enabled_console_interfaces = resource.Body("enabled_console_interfaces") + #: Enabled deploy interface implementations. + #: Introduced in API microversion 1.30. + enabled_deploy_interfaces = resource.Body("enabled_deploy_interfaces") + #: Enabled firmware interface implementations. + #: Introduced in API microversion 1.86. + enabled_firmware_interfaces = resource.Body("enabled_firmware_interfaces") + #: Enabled inspect interface implementations. + #: Introduced in API microversion 1.30. + enabled_inspect_interfaces = resource.Body("enabled_inspect_interfaces") + #: Enabled management interface implementations. + #: Introduced in API microversion 1.30. + enabled_management_interfaces = resource.Body( + "enabled_management_interfaces" + ) + #: Enabled network interface implementations. + #: Introduced in API microversion 1.30. + enabled_network_interfaces = resource.Body("enabled_network_interfaces") + #: Enabled port interface implementations. + #: Introduced in API microversion 1.30. + enabled_power_interfaces = resource.Body("enabled_power_interfaces") + #: Enabled RAID interface implementations. + #: Introduced in API microversion 1.30. + enabled_raid_interfaces = resource.Body("enabled_raid_interfaces") + #: Enabled rescue interface implementations. + #: Introduced in API microversion 1.38. + enabled_rescue_interfaces = resource.Body("enabled_rescue_interfaces") + #: Enabled storage interface implementations. + #: Introduced in API microversion 1.33. + enabled_storage_interfaces = resource.Body("enabled_storage_interfaces") + #: Enabled vendor interface implementations. + #: Introduced in API microversion 1.30. + enabled_vendor_interfaces = resource.Body("enabled_vendor_interfaces") + + def list_vendor_passthru(self, session): + """Fetch vendor specific methods exposed by driver + + :param session: The session to use for making this request. + :returns: A dict of the available vendor passthru methods for driver. + Method names keys and corresponding usages in dict form as values + Usage dict properties: + * ``async``: bool # Is passthru function invoked asynchronously + * ``attach``: bool # Is return value attached to response object + * ``description``: str # Description of what the method does + * ``http_methods``: list # List of HTTP methods supported + """ + session = self._get_session(session) + request = self._prepare_request() + request.url = utils.urljoin(request.url, 'vendor_passthru', 'methods') + response = session.get(request.url, headers=request.headers) + + msg = "Failed to list list vendor_passthru methods for {driver_name}" + exceptions.raise_from_response( + response, error_message=msg.format(driver_name=self.name) + ) + return response.json() + + def call_vendor_passthru( + self, + session: adapter.Adapter, + verb: str, + method: str, + body: dict | None = None, + ) -> requests.Response: + """Call a vendor specific passthru method + + Contents of body are params passed to the hardware driver + function. Validation happens there. Missing parameters, or + excess parameters will cause the request to be rejected + + :param session: The session to use for making this request. + :param method: Vendor passthru method name. + :param verb: One of GET, POST, PUT, DELETE, + depending on the driver and method. + :param body: passed to the vendor function as json body. + :raises: :exc:`ValueError` if :data:`verb` is not one of + GET, POST, PUT, DELETE + :returns: response of method call. + """ + if verb.upper() not in ['GET', 'PUT', 'POST', 'DELETE']: + raise ValueError(f'Invalid verb: {verb}') + + session = self._get_session(session) + request = self._prepare_request() + request.url = utils.urljoin( + request.url, f'vendor_passthru?method={method}' + ) + call = getattr(session, verb.lower()) + response = call( + request.url, + json=body, + headers=request.headers, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed call to method {method} on driver {self.name}" + exceptions.raise_from_response(response, error_message=msg) + return response diff --git a/openstack/baremetal/v1/inspection_rules.py b/openstack/baremetal/v1/inspection_rules.py new file mode 100644 index 0000000000..10460933f1 --- /dev/null +++ b/openstack/baremetal/v1/inspection_rules.py @@ -0,0 +1,57 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import _common +from openstack import resource + + +class InspectionRule(resource.Resource): + resources_key = 'inspection_rules' + base_path = '/inspection_rules' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_patch = True + commit_method = 'PATCH' + commit_jsonpatch = True + + _query_mapping = resource.QueryParameters( + 'detail', + fields={'type': _common.fields_type}, + ) + + # Inspection rules is available since 1.96 + _max_microversion = '1.96' + #: The actions to be executed when the rule conditions are met. + actions = resource.Body('actions', type=list) + #: A brief explanation about the inspection rule. + description = resource.Body('description') + #: The conditions under which the rule should be triggered. + conditions = resource.Body('conditions', type=list) + #: Timestamp at which the resource was created. + created_at = resource.Body('created_at') + #: A list of relative links. Includes the self and bookmark links. + links = resource.Body('links', type=list) + #: Specifies the phase when the rule should run, defaults to 'main'. + phase = resource.Body('phase') + #: Specifies the rule's precedence level during execution. + priority = resource.Body('priority') + #: Indicates whether the rule contains sensitive information. + sensitive = resource.Body('sensitive', type=bool) + #: Timestamp at which the resource was last updated. + updated_at = resource.Body('updated_at') + #: The UUID of the resource. + id = resource.Body('uuid', alternate_id=True) diff --git a/openstack/baremetal/v1/node.py b/openstack/baremetal/v1/node.py new file mode 100644 index 0000000000..538bf99732 --- /dev/null +++ b/openstack/baremetal/v1/node.py @@ -0,0 +1,1609 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import enum +import warnings + +from keystoneauth1 import adapter + +from openstack.baremetal.v1 import _common +from openstack import exceptions +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +class ValidationResult: + """Result of a single interface validation. + + :ivar result: Result of a validation, ``True`` for success, ``False`` for + failure, ``None`` for unsupported interface. + :ivar reason: If ``result`` is ``False`` or ``None``, explanation of + the result. + """ + + def __init__(self, result, reason): + self.result = result + self.reason = reason + + +class PowerAction(enum.Enum): + """Mapping from an action to a target power state.""" + + POWER_ON = 'power on' + """Power on the node.""" + + POWER_OFF = 'power off' + """Power off the node (using hard power off).""" + REBOOT = 'rebooting' + """Reboot the node (using hard power off).""" + + SOFT_POWER_OFF = 'soft power off' + """Power off the node using soft power off.""" + + SOFT_REBOOT = 'soft rebooting' + """Reboot the node using soft power off.""" + + +class WaitResult( + collections.namedtuple('WaitResult', ['success', 'failure', 'timeout']) +): + """A named tuple representing a result of waiting for several nodes. + + Each component is a list of :class:`~openstack.baremetal.v1.node.Node` + objects: + + :ivar ~.success: a list of :class:`~openstack.baremetal.v1.node.Node` + objects that reached the state. + :ivar ~.timeout: a list of :class:`~openstack.baremetal.v1.node.Node` + objects that reached timeout. + :ivar ~.failure: a list of :class:`~openstack.baremetal.v1.node.Node` + objects that hit a failure. + """ + + __slots__ = () + + +class Node(_common.Resource): + resources_key = 'nodes' + base_path = '/nodes' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_patch = True + commit_method = 'PATCH' + commit_jsonpatch = True + + _query_mapping = resource.QueryParameters( + 'associated', + 'conductor_group', + 'driver', + 'fault', + 'include_children', + 'instance_name', + 'parent_node', + 'provision_state', + 'resource_class', + 'shard', + fields={'type': _common.fields_type}, + instance_id='instance_uuid', + is_maintenance='maintenance', + ) + + # Add health field to node object. + _max_microversion = '1.109' + + # Properties + #: The UUID of the allocation associated with this node. Added in API + #: microversion 1.52. + allocation_id = resource.Body("allocation_uuid") + #: A string or UUID of the tenant who owns the baremetal node. Added in API + #: microversion 1.50. + owner = resource.Body("owner") + #: The current boot mode state (uefi/bios). Added in API microversion 1.75. + boot_mode = resource.Body("boot_mode") + #: The UUID of the chassis associated wit this node. Can be empty or None. + chassis_id = resource.Body("chassis_uuid") + #: The current clean step. + clean_step = resource.Body("clean_step") + #: Hostname of the conductor currently handling this node. Added in API + # microversion 1.49. + conductor = resource.Body("conductor") + #: Conductor group this node is managed by. Added in API microversion 1.46. + conductor_group = resource.Body("conductor_group") + #: Timestamp at which the node was last updated. + created_at = resource.Body("created_at") + #: The current deploy step. Added in API microversion 1.44. + deploy_step = resource.Body("deploy_step") + #: The description of the node. Added in API microversion 1.51. + description = resource.Body("description") + #: The name of the driver. + driver = resource.Body("driver") + #: All the metadata required by the driver to manage this node. List of + #: fields varies between drivers, and can be retrieved from the + #: :class:`openstack.baremetal.v1.driver.Driver` resource. + driver_info = resource.Body("driver_info", type=dict) + #: Internal metadata set and stored by node's driver. This is read-only. + driver_internal_info = resource.Body("driver_internal_info", type=dict) + #: A set of one or more arbitrary metadata key and value pairs. + extra = resource.Body("extra") + #: Fault type that caused the node to enter maintenance mode. + #: Introduced in API microversion 1.42. + fault = resource.Body("fault") + #: The health status of the node from the BMC (e.g. 'OK', 'Warning', + #: 'Critical'). Introduced in API microversion 1.109. + health = resource.Body("health") + #: The UUID of the node resource. + id = resource.Body("uuid", alternate_id=True) + #: Information used to customize the deployed image, e.g. size of root + #: partition, config drive in the form of base64 encoded string and other + #: metadata. + instance_info = resource.Body("instance_info") + #: UUID of the nova instance associated with this node. + instance_id = resource.Body("instance_uuid") + #: The name of the instance associated with this node. Added in API + #: microversion 1.104. + instance_name = resource.Body("instance_name") + #: Override enabling of automated cleaning. Added in API microversion 1.47. + is_automated_clean_enabled = resource.Body("automated_clean", type=bool) + #: Whether console access is enabled on this node. + is_console_enabled = resource.Body("console_enabled", type=bool) + #: Whether node is currently in "maintenance mode". Nodes put into + #: maintenance mode are removed from the available resource pool. + is_maintenance = resource.Body("maintenance", type=bool) + # Whether the node is protected from undeploying. Added in API microversion + # 1.48. + is_protected = resource.Body("protected", type=bool) + #: Whether the node is marked for retirement. Added in API microversion + #: 1.61. + is_retired = resource.Body("retired", type=bool) + #: Whether the node is currently booted with secure boot turned on. + #: Added in API microversion 1.75. + is_secure_boot = resource.Body("secure_boot", type=bool) + #: Any error from the most recent transaction that started but failed to + #: finish. + last_error = resource.Body("last_error") + #: Field indicating if the node is leased to a specific project. + #: Added in API version 1.65 + lessee = resource.Body("lessee") + #: A list of relative links, including self and bookmark links. + links = resource.Body("links", type=list) + #: user settable description of the reason why the node was placed into + #: maintenance mode. + maintenance_reason = resource.Body("maintenance_reason") + #: Human readable identifier for the node. May be undefined. Certain words + #: are reserved. Added in API microversion 1.5 + name = resource.Body("name") + #: The node which serves as the parent_node for this node. + #: Added in API version 1.83 + parent_node = resource.Body("parent_node") + #: Links to the collection of ports on this node. + ports = resource.Body("ports", type=list) + #: Links to the collection of portgroups on this node. Available since + #: API microversion 1.24. + port_groups = resource.Body("portgroups", type=list) + #: The current power state. Usually "power on" or "power off", but may be + #: "None" if service is unable to determine the power state. + power_state = resource.Body("power_state") + #: Physical characteristics of the node. Content populated by the service + #: during inspection. + properties = resource.Body("properties", type=dict) + # The reason why this node is protected. Added in API microversion 1.48. + protected_reason = resource.Body("protected_reason") + #: The current provisioning state of the node. + provision_state = resource.Body("provision_state") + #: The reason why the node is marked for retirement. Added in API + #: microversion 1.61. + retired_reason = resource.Body("retired_reason") + #: The current RAID configuration of the node. + raid_config = resource.Body("raid_config") + #: The name of an service conductor host which is holding a lock on this + #: node, if a lock is held. + reservation = resource.Body("reservation") + #: A string to be used by external schedulers to identify this node as a + #: unit of a specific type of resource. Added in API microversion 1.21. + resource_class = resource.Body("resource_class") + #: A string representing the current service step being executed upon. + #: Added in API microversion 1.89. + service_step = resource.Body("service_step") + #: A string representing the uuid or logical name of a runbook as an + #: alternative to providing ``clean_steps`` or ``service_steps``. + #: Added in API microversion 1.92. + runbook = resource.Body("runbook") + #: A string indicating the shard this node belongs to. Added in API + #: microversion 1,82. + shard = resource.Body("shard") + #: Links to the collection of states. + states = resource.Body("states", type=list) + #: The requested state if a provisioning action has been requested. For + #: example, ``AVAILABLE``, ``DEPLOYING``, ``DEPLOYWAIT``, ``DEPLOYING``, + #: ``ACTIVE`` etc. + target_provision_state = resource.Body("target_provision_state") + #: The requested state during a state transition. + target_power_state = resource.Body("target_power_state") + #: The requested RAID configuration of the node which will be applied when + #: the node next transitions through the CLEANING state. + target_raid_config = resource.Body("target_raid_config") + #: Traits of the node. Introduced in API microversion 1.37. + traits = resource.Body("traits", type=list) + #: Timestamp at which the node was last updated. + updated_at = resource.Body("updated_at") + + # Hardware interfaces grouped together for convenience. + + #: BIOS interface to use when setting BIOS properties of the node. + #: Introduced in API microversion 1.40. + bios_interface = resource.Body("bios_interface") + #: Boot interface to use when configuring boot of the node. + #: Introduced in API microversion 1.31. + boot_interface = resource.Body("boot_interface") + #: Console interface to use when working with serial console. + #: Introduced in API microversion 1.31. + console_interface = resource.Body("console_interface") + #: Deploy interface to use when deploying the node. + #: Introduced in API microversion 1.31. + deploy_interface = resource.Body("deploy_interface") + #: Firmware interface to be used when managing the node. + #: Introduced in API microversion 1.86 + firmware_interface = resource.Body("firmware_interface") + #: Inspect interface to use when inspecting the node. + #: Introduced in API microversion 1.31. + inspect_interface = resource.Body("inspect_interface") + #: Management interface to use for management actions on the node. + #: Introduced in API microversion 1.31. + management_interface = resource.Body("management_interface") + #: Network interface provider to use when plumbing the network connections + #: for this node. Introduced in API microversion 1.20. + network_interface = resource.Body("network_interface") + #: Power interface to use for power actions on the node. + #: Introduced in API microversion 1.31. + power_interface = resource.Body("power_interface") + #: RAID interface to use for configuring RAID on the node. + #: Introduced in API microversion 1.31. + raid_interface = resource.Body("raid_interface") + #: Rescue interface to use for rescuing of the node. + #: Introduced in API microversion 1.38. + rescue_interface = resource.Body("rescue_interface") + #: Storage interface to use when attaching remote storage. + #: Introduced in API microversion 1.33. + storage_interface = resource.Body("storage_interface") + #: Vendor interface to use for vendor-specific actions on the node. + #: Introduced in API microversion 1.31. + vendor_interface = resource.Body("vendor_interface") + + def _consume_body_attrs(self, attrs): + if 'provision_state' in attrs and attrs['provision_state'] is None: + # API version 1.1 uses None instead of "available". Make it + # consistent. + attrs['provision_state'] = 'available' + return super()._consume_body_attrs(attrs) + + def create(self, session, *args, **kwargs): + """Create a remote resource based on this instance. + + The overridden version is capable of handling the populated + ``provision_state`` field of one of three values: ``enroll``, + ``manageable`` or ``available``. If not provided, the server default + is used (``enroll`` in newer versions of Ironic). + + This call does not cause a node to go through automated cleaning. + If you need it, use ``provision_state=manageable`` followed by + a call to :meth:`set_provision_state`. + + Note that Bare Metal API 1.4 is required for ``manageable`` and + 1.11 is required for ``enroll``. + + .. warning:: + Using ``provision_state=available`` is only possible with API + versions 1.1 to 1.10 and thus is incompatible with setting any + fields that appeared after 1.11. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + + :return: This :class:`Resource` instance. + :raises: ValueError if the Node's ``provision_state`` is not one of + ``None``, ``enroll``, ``manageable`` or ``available``. + :raises: :exc:`~openstack.exceptions.NotSupported` if + the ``provision_state`` cannot be reached with any API version + supported by the server. + """ + expected_provision_state = self.provision_state + + session = self._get_session(session) + if expected_provision_state is not None: + # Verify that the requested provision state is reachable with + # the API version we are going to use. + try: + microversion = _common.STATE_VERSIONS[expected_provision_state] + except KeyError: + raise ValueError( + "Node's provision_state must be one of {} for creation, " + "got {}".format( + ', '.join(_common.STATE_VERSIONS), + expected_provision_state, + ) + ) + else: + error_message = ( + f"Cannot create a node with initial provision " + f"state {expected_provision_state}" + ) + # Nodes cannot be created as available using new API versions + maximum = ( + '1.10' if expected_provision_state == 'available' else None + ) + microversion = self._assert_microversion_for( + session, + microversion, + maximum=maximum, + error_message=error_message, + ) + else: + microversion = None # use the base negotiation + + # Ironic cannot set provision_state itself, so marking it as unchanged + self._clean_body_attrs({'provision_state'}) + + super().create(session, *args, microversion=microversion, **kwargs) + + if ( + expected_provision_state == 'manageable' + and self.provision_state != 'manageable' + ): + # Manageable is not reachable directly + self.set_provision_state(session, 'manage', wait=True) + + return self + + def commit(self, session, *args, **kwargs): + """Commit the state of the instance to the remote resource. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + + :return: This :class:`Node` instance. + """ + # These fields have to be set through separate API. + if ( + 'maintenance_reason' in self._body.dirty + or 'maintenance' in self._body.dirty + ): + if not self.is_maintenance and self.maintenance_reason: + if 'maintenance' in self._body.dirty: + self.maintenance_reason = None + else: + raise ValueError( + 'Maintenance reason cannot be set when ' + 'maintenance is False' + ) + if self.is_maintenance: + self._do_maintenance_action( + session, 'put', {'reason': self.maintenance_reason} + ) + else: + # This corresponds to setting maintenance=False and + # maintenance_reason=None in the same request. + self._do_maintenance_action(session, 'delete') + + self._clean_body_attrs({'maintenance', 'maintenance_reason'}) + if not self.requires_commit: + # Other fields are not updated, re-fetch the node to reflect + # the new status. + return self.fetch(session) + + return super().commit(session, *args, **kwargs) + + def set_provision_state( + self, + session, + target, + config_drive=None, + clean_steps=None, + rescue_password=None, + wait=False, + timeout=None, + deploy_steps=None, + service_steps=None, + runbook=None, + ): + """Run an action modifying this node's provision state. + + This call is asynchronous, it will return success as soon as the Bare + Metal service acknowledges the request. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param target: Provisioning action, e.g. ``active``, ``provide``. + See the Bare Metal service documentation for available actions. + :param config_drive: Config drive to pass to the node, only valid + for ``active` and ``rebuild`` targets. You can use functions from + :mod:`openstack.baremetal.configdrive` to build it. + :param clean_steps: Clean steps to execute, only valid for ``clean`` + target. + :param rescue_password: Password for the rescue operation, only valid + for ``rescue`` target. + :param wait: Whether to wait for the target state to be reached. + :param timeout: Timeout (in seconds) to wait for the target state to be + reached. If ``None``, wait without timeout. + :param deploy_steps: Deploy steps to execute, only valid for ``active`` + and ``rebuild`` target. + :param service_steps: Service steps to execute, only valid for + ``service`` target. + :param ``runbook``: UUID or logical name of a runbook. + + :return: This :class:`Node` instance. + :raises: ValueError if ``config_drive``, ``clean_steps``, + ``deploy_steps`` or ``rescue_password`` are provided with an + invalid ``target``. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the node + reaches an error state while waiting for the state. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if timeout + is reached while waiting for the state. + """ + session = self._get_session(session) + + microversion = None + if target in _common.PROVISIONING_VERSIONS: + microversion = f'1.{_common.PROVISIONING_VERSIONS[target]}' + + if config_drive: + # Some config drive actions require a higher version. + if isinstance(config_drive, dict): + microversion = _common.CONFIG_DRIVE_DICT_VERSION + elif target == 'rebuild': + microversion = _common.CONFIG_DRIVE_REBUILD_VERSION + + if deploy_steps: + microversion = _common.DEPLOY_STEPS_VERSION + + microversion = self._assert_microversion_for(session, microversion) + + body = {'target': target} + if runbook: + microversion = self._assert_microversion_for( + session, _common.RUNBOOKS_VERSION + ) + + if clean_steps is not None: + raise ValueError( + 'Please provide either clean steps or a ' + 'runbook, but not both.' + ) + if service_steps is not None: + raise ValueError( + 'Please provide either service steps or a ' + 'runbook, but not both.' + ) + + if target != 'clean' and target != 'service': + msg = ( + 'A runbook can only be provided when setting target ' + 'provision state to any of "[clean, service]"' + ) + raise ValueError(msg) + + body['runbook'] = runbook + + if config_drive: + if target not in ('active', 'rebuild'): + raise ValueError( + 'Config drive can only be provided with ' + '"active" and "rebuild" targets' + ) + if isinstance(config_drive, bytes): + try: + config_drive = config_drive.decode('utf-8') + except UnicodeError: + raise ValueError( + 'Config drive must be a dictionary or a base64 ' + 'encoded string' + ) + # Not a typo - ironic accepts "configdrive" (without underscore) + body['configdrive'] = config_drive + + if clean_steps is not None: + if target != 'clean': + raise ValueError( + 'Clean steps can only be provided with "clean" target' + ) + body['clean_steps'] = clean_steps + + if deploy_steps is not None: + if target not in ('active', 'rebuild'): + raise ValueError( + 'Deploy steps can only be provided with ' + '"deploy" and "rebuild" target' + ) + body['deploy_steps'] = deploy_steps + + if service_steps is not None: + if target != 'service': + raise ValueError( + 'Service steps can only be provided with "service" target' + ) + body['service_steps'] = service_steps + + if rescue_password is not None: + if target != 'rescue': + raise ValueError( + 'Rescue password can only be provided with "rescue" target' + ) + body['rescue_password'] = rescue_password + + if wait: + try: + expected_state = _common.EXPECTED_STATES[target] + except KeyError: + raise ValueError( + f'For target {target} the expected state is not ' + f'known, cannot wait for it' + ) + + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'states', 'provision') + response = session.put( + request.url, + json=body, + headers=request.headers, + microversion=microversion, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = ( + f"Failed to set provision state for bare metal node {self.id} " + f"to {target}" + ) + exceptions.raise_from_response(response, error_message=msg) + + if wait: + return self.wait_for_provision_state( + session, expected_state, timeout=timeout + ) + else: + return self.fetch(session) + + def wait_for_power_state(self, session, expected_state, timeout=None): + """Wait for the node to reach the expected power state. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param expected_state: The expected power state to reach. + :param timeout: If ``wait`` is set to ``True``, specifies how much (in + seconds) to wait for the expected state to be reached. The value of + ``None`` (the default) means no client-side timeout. + + :return: This :class:`Node` instance. + :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. + """ + for count in utils.iterate_timeout( + timeout, + f"Timeout waiting for node {self.id} to reach " + f"power state '{expected_state}'", + ): + self.fetch(session) + if self.power_state == expected_state: + return self + + session.log.debug( + 'Still waiting for node %(node)s to reach power state ' + '"%(target)s", the current state is "%(state)s"', + { + 'node': self.id, + 'target': expected_state, + 'state': self.power_state, + }, + ) + + def wait_for_provision_state( + self, session, expected_state, timeout=None, abort_on_failed_state=True + ): + """Wait for the node to reach the expected state. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param expected_state: The expected provisioning state to reach. + :param timeout: If ``wait`` is set to ``True``, specifies how much (in + seconds) to wait for the expected state to be reached. The value of + ``None`` (the default) means no client-side timeout. + :param abort_on_failed_state: If ``True`` (the default), abort waiting + if the node reaches a failure state which does not match the + expected one. Note that the failure state for ``enroll`` -> + ``manageable`` transition is ``enroll`` again. + + :return: This :class:`Node` instance. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the node + reaches an error state and ``abort_on_failed_state`` is ``True``. + :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. + """ + for count in utils.iterate_timeout( + timeout, + f"Timeout waiting for node {self.id} to reach " + f"target state '{expected_state}'", + ): + self.fetch(session) + if self._check_state_reached( + session, expected_state, abort_on_failed_state + ): + return self + + session.log.debug( + 'Still waiting for node %(node)s to reach state ' + '"%(target)s", the current state is "%(state)s"', + { + 'node': self.id, + 'target': expected_state, + 'state': self.provision_state, + }, + ) + + def wait_for_reservation(self, session, timeout=None): + """Wait for a lock on the node to be released. + + Bare metal nodes in ironic have a reservation lock that + is used to represent that a conductor has locked the node + while performing some sort of action, such as changing + configuration as a result of a machine state change. + + This lock can occur during power syncronization, and prevents + updates to objects attached to the node, such as ports. + + Note that nothing prevents a conductor from acquiring the lock again + after this call returns, so it should be treated as best effort. + + Returns immediately if there is no reservation on the node. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param timeout: How much (in seconds) to wait for the lock to be + released. The value of ``None`` (the default) means no timeout. + + :return: This :class:`Node` instance. + """ + if self.reservation is None: + return self + + for count in utils.iterate_timeout( + timeout, + f"Timeout waiting for the lock to be released on node {self.id}", + ): + self.fetch(session) + if self.reservation is None: + return self + + session.log.debug( + 'Still waiting for the lock to be released on node ' + '%(node)s, currently locked by conductor %(host)s', + {'node': self.id, 'host': self.reservation}, + ) + + def _check_state_reached( + self, session, expected_state, abort_on_failed_state=True + ): + """Wait for the node to reach the expected state. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param expected_state: The expected provisioning state to reach. + :param abort_on_failed_state: If ``True`` (the default), abort waiting + if the node reaches a failure state which does not match the + expected one. Note that the failure state for ``enroll`` -> + ``manageable`` transition is ``enroll`` again. + + :return: ``True`` if the target state is reached + :raises: :class:`~openstack.exceptions.ResourceFailure` if the node + reaches an error state and ``abort_on_failed_state`` is ``True``. + """ + # NOTE(dtantsur): microversion 1.2 changed None to available + if self.provision_state == expected_state or ( + expected_state == 'available' and self.provision_state is None + ): + return True + elif not abort_on_failed_state: + return False + + if ( + self.provision_state.endswith(' failed') + or self.provision_state == 'error' + ): + raise exceptions.ResourceFailure( + f"Node {self.id} reached failure state " + f"'{self.provision_state}'; the last error is " + f"{self.last_error}" + ) + # Special case: a failure state for "manage" transition can be + # "enroll" + elif ( + expected_state == 'manageable' + and self.provision_state == 'enroll' + and self.last_error + ): + raise exceptions.ResourceFailure( + f"Node {self.id} could not reach state manageable: " + "failed to verify management credentials; " + f"the last error is {self.last_error}" + ) + + def inject_nmi(self, session): + """Inject NMI. + + :param session: The session to use for making this request. + :return: None + """ + session = self._get_session(session) + version = self._assert_microversion_for( + session, _common.INJECT_NMI_VERSION + ) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'management', 'inject_nmi') + + response = session.put( + request.url, + json={}, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to inject NMI to node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + def set_power_state(self, session, target, wait=False, timeout=None): + """Run an action modifying this node's power state. + + This call is asynchronous, it will return success as soon as the Bare + Metal service acknowledges the request. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param target: Target power state, as a :class:`PowerAction` or + a string. + :param wait: Whether to wait for the expected power state to be + reached. + :param timeout: Timeout (in seconds) to wait for the target state to be + reached. If ``None``, wait without timeout. + """ + if isinstance(target, PowerAction): + target = target.value + if wait: + try: + expected = _common.EXPECTED_POWER_STATES[target] + except KeyError: + raise ValueError( + f"Cannot use target power state {target} with wait, " + f"the expected state is not known" + ) + + session = self._get_session(session) + + if target.startswith("soft "): + microversion = '1.27' + else: + microversion = None + + microversion = self._assert_microversion_for(session, microversion) + + # TODO(dtantsur): server timeout support + body = {'target': target} + + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'states', 'power') + response = session.put( + request.url, + json=body, + headers=request.headers, + microversion=microversion, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = ( + f"Failed to set power state for bare metal node {self.id} " + f"to {target}" + ) + exceptions.raise_from_response(response, error_message=msg) + + if wait: + self.wait_for_power_state(session, expected, timeout=timeout) + + def attach_vmedia( + self, + session, + device_type, + image_url, + image_download_source=None, + retry_on_conflict=True, + ): + """Attach virtual media device to a node. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param device_type: The type of virtual media device. + :param image_url: The URL of the image to attach. + :param image_download_source: The source of the image download. + :param retry_on_conflict: Whether to retry HTTP CONFLICT errors. + This can happen when either the virtual media is already used on + a node or the node is locked. Since the latter happens more often, + the default value is True. + :return: ``None`` + :raises: :exc:`~openstack.exceptions.NotSupported` if the server + does not support the VMEDIA API. + + """ + session = self._get_session(session) + version = self._assert_microversion_for( + session, + _common.VMEDIA_VERSION, + error_message=("Cannot use virtual media API"), + ) + # Prepare the request and create the request body + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'vmedia') + body = {"device_type": device_type, "image_url": image_url} + if image_download_source: + body["image_download_source"] = image_download_source + retriable_status_codes = _common.RETRIABLE_STATUS_CODES + if not retry_on_conflict: + retriable_status_codes = list(set(retriable_status_codes) - {409}) + response = session.post( + request.url, + json=body, + headers=request.headers, + microversion=version, + retriable_status_codes=retriable_status_codes, + ) + + msg = f"Failed to attach Virtual Media to bare metal node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + def detach_vmedia(self, session, device_types=None): + """Detach virtual media from a node + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param device_types: A list with the types of virtual media + devices to detach. + :return: ``True`` if the virtual media was detached, + otherwise ``False``. + :raises: :exc:`~openstack.exceptions.NotSupported` if the server + does not support the VMEDIA API + """ + session = self._get_session(session) + version = self._assert_microversion_for( + session, + _common.VMEDIA_VERSION, + error_message=("Cannot use virtual media API"), + ) + + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'vmedia') + + delete_kwargs = { + 'headers': request.headers, + 'microversion': version, + 'retriable_status_codes': _common.RETRIABLE_STATUS_CODES, + } + + if device_types: + delete_kwargs['json'] = { + 'device_types': _common.comma_separated_list(device_types) + } + + response = session.delete(request.url, **delete_kwargs) + + if response.status_code == 400: + session.log.debug( + "Virtual media doesn't exist for node %(node)s", + {'node': self.id}, + ) + + msg = f"Failed to detach virtual media from bare metal node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + def attach_vif( + self, + session: adapter.Adapter, + vif_id: str, + retry_on_conflict: bool = True, + *, + port_id: str | None = None, + port_group_id: str | None = None, + ) -> None: + """Attach a VIF to the node. + + The exact form of the VIF ID depends on the network interface used by + the node. In the most common case it is a Network service port + (NOT a Bare Metal port) ID. A VIF can only be attached to one node + at a time. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param vif_id: Backend-specific VIF ID. + :param retry_on_conflict: Whether to retry HTTP CONFLICT errors. + This can happen when either the VIF is already used on a node or + the node is locked. Since the latter happens more often, the + default value is True. + :param port_id: The UUID of the port to attach the VIF to. Only one of + port_id or port_group_id can be provided. + :param port_group_id: The UUID of the portgroup to attach to. Only one + of port_group_id or port_id can be provided. + :return: None + :raises: :exc:`~openstack.exceptions.NotSupported` if the server + does not support the VIF API. + :raises: :exc:`~openstack.exceptions.InvalidRequest` if both port_id + and port_group_id are provided. + """ + if port_id and port_group_id: + msg = ( + 'Only one of vif_port_id and vif_portgroup_id can be provided' + ) + raise exceptions.InvalidRequest(msg) + + session = self._get_session(session) + if port_id or port_group_id: + microversion = _common.VIF_OPTIONAL_PARAMS_VERSION + else: + microversion = _common.VIF_VERSION + microversion = self._assert_microversion_for( + session, + microversion, + error_message=("Cannot use VIF attachment API"), + ) + + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'vifs') + body = {'id': vif_id} + if port_id: + body['port_uuid'] = port_id + elif port_group_id: + body['portgroup_uuid'] = port_group_id + retriable_status_codes = _common.RETRIABLE_STATUS_CODES + if not retry_on_conflict: + retriable_status_codes = list(set(retriable_status_codes) - {409}) + response = session.post( + request.url, + json=body, + headers=request.headers, + microversion=microversion, + retriable_status_codes=retriable_status_codes, + ) + + msg = f"Failed to attach VIF {vif_id} to bare metal node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + def detach_vif(self, session, vif_id, ignore_missing=True): + """Detach a VIF from the node. + + The exact form of the VIF ID depends on the network interface used by + the node. In the most common case it is a Network service port + (NOT a Bare Metal port) ID. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param string vif_id: Backend-specific VIF ID. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the VIF does not exist. Otherwise, ``False`` + is returned. + :return: ``True`` if the VIF was detached, otherwise ``False``. + :raises: :exc:`~openstack.exceptions.NotSupported` if the server + does not support the VIF API. + """ + session = self._get_session(session) + version = self._assert_microversion_for( + session, + _common.VIF_VERSION, + error_message=("Cannot use VIF attachment API"), + ) + + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'vifs', vif_id) + response = session.delete( + request.url, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + if ignore_missing and response.status_code == 400: + session.log.debug( + 'VIF %(vif)s was already removed from node %(node)s', + {'vif': vif_id, 'node': self.id}, + ) + return False + + msg = f"Failed to detach VIF {vif_id} from bare metal node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + return True + + def list_vifs(self, session): + """List IDs of VIFs attached to the node. + + The exact form of the VIF ID depends on the network interface used by + the node. In the most common case it is a Network service port + (NOT a Bare Metal port) ID. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :return: List of VIF IDs as strings. + :raises: :exc:`~openstack.exceptions.NotSupported` if the server + does not support the VIF API. + """ + session = self._get_session(session) + version = self._assert_microversion_for( + session, + _common.VIF_VERSION, + error_message=("Cannot use VIF attachment API"), + ) + + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'vifs') + response = session.get( + request.url, headers=request.headers, microversion=version + ) + + msg = f"Failed to list VIFs attached to bare metal node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + return [vif['id'] for vif in response.json()['vifs']] + + def validate(self, session, required=('boot', 'deploy', 'power')): + """Validate required information on the node. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param required: List of interfaces that are required to pass + validation. The default value is the list of minimum required + interfaces for provisioning. + + :return: dict mapping interface names to :class:`ValidationResult` + objects. + :raises: :exc:`~openstack.exceptions.ValidationException` if validation + fails for a required interface. + """ + session = self._get_session(session) + version = self._get_microversion(session) + + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'validate') + response = session.get( + request.url, headers=request.headers, microversion=version + ) + + msg = f"Failed to validate node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + result = response.json() + + if required: + failed = [ + '{} ({})'.format(key, value.get('reason', 'no reason')) + for key, value in result.items() + if key in required and not value.get('result') + ] + + if failed: + raise exceptions.ValidationException( + 'Validation failed for required interfaces of node ' + '{node}: {failures}'.format( + node=self.id, failures=', '.join(failed) + ) + ) + + return { + key: ValidationResult(value.get('result'), value.get('reason')) + for key, value in result.items() + } + + def set_maintenance(self, session, reason=None): + """Enable maintenance mode on the node. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param reason: Optional reason for maintenance. + :return: This :class:`Node` instance. + """ + self._do_maintenance_action(session, 'put', {'reason': reason}) + return self.fetch(session) + + def unset_maintenance(self, session): + """Disable maintenance mode on the node. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :return: This :class:`Node` instance. + """ + self._do_maintenance_action(session, 'delete') + return self.fetch(session) + + def _do_maintenance_action(self, session, verb, body=None): + session = self._get_session(session) + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'maintenance') + response = getattr(session, verb)( + request.url, + json=body, + headers=request.headers, + microversion=version, + ) + msg = f"Failed to change maintenance mode for node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + def get_boot_device(self, session): + """Get node boot device. + + :param session: The session to use for making this request. + :returns: The HTTP response. + """ + session = self._get_session(session) + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'management', 'boot_device') + + response = session.get( + request.url, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to get boot device for node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + return response.json() + + def set_boot_device(self, session, boot_device, persistent=False): + """Set node boot device + + :param session: The session to use for making this request. + :param boot_device: Boot device to assign to the node. + :param persistent: If the boot device change is maintained after node + reboot + :returns: ``None`` + """ + session = self._get_session(session) + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'management', 'boot_device') + + body = {'boot_device': boot_device, 'persistent': persistent} + + response = session.put( + request.url, + json=body, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to set boot device for node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + def get_supported_boot_devices(self, session): + """Get supported boot devices for the node. + + :param session: The session to use for making this request. + :returns: The HTTP response. + """ + session = self._get_session(session) + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin( + request.url, + 'management', + 'boot_device', + 'supported', + ) + + response = session.get( + request.url, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to get supported boot devices for node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + return response.json() + + def set_boot_mode(self, session, target): + """Make a request to change node's boot mode + + This call is asynchronous, it will return success as soon as the Bare + Metal service acknowledges the request. + + :param session: The session to use for making this request. + :param target: Boot mode to set for node, one of either 'uefi'/'bios'. + :returns: ``None`` + :raises: ValueError if ``target`` is not one of 'uefi or 'bios'. + """ + session = self._get_session(session) + version = utils.pick_microversion( + session, _common.CHANGE_BOOT_MODE_VERSION + ) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'states', 'boot_mode') + if target not in ('uefi', 'bios'): + raise ValueError( + f"Unrecognized boot mode {target}." + f"Boot mode should be one of 'uefi' or 'bios'." + ) + body = {'target': target} + + response = session.put( + request.url, + json=body, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to change boot mode for node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + def set_secure_boot(self, session, target): + """Make a request to change node's secure boot state + + This call is asynchronous, it will return success as soon as the Bare + Metal service acknowledges the request. + + :param session: The session to use for making this request. + :param bool target: Boolean indicating secure boot state to set. + True/False corresponding to 'on'/'off' respectively. + :returns: ``None`` + :raises: ValueError if ``target`` is not boolean. + """ + session = self._get_session(session) + version = utils.pick_microversion( + session, _common.CHANGE_BOOT_MODE_VERSION + ) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'states', 'secure_boot') + if not isinstance(target, bool): + raise ValueError( + f"Invalid target {target}. It should be True or False " + f"corresponding to secure boot state 'on' or 'off'" + ) + body = {'target': target} + + response = session.put( + request.url, + json=body, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to change secure boot state for {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + def add_trait(self, session, trait): + """Add a trait to the node. + + :param session: The session to use for making this request. + :param trait: The trait to add to the node. + :returns: ``None`` + """ + session = self._get_session(session) + version = utils.pick_microversion(session, '1.37') + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'traits', trait) + response = session.put( + request.url, + json=None, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to add trait {trait} for node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + self.traits = list(set(self.traits or ()) | {trait}) + + def remove_trait(self, session, trait, ignore_missing=True): + """Remove a trait from the node. + + :param session: The session to use for making this request. + :param trait: The trait to remove from the node. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the trait does not exist. + Otherwise, ``False`` is returned. + :returns bool: True on success removing the trait. + False when the trait does not exist already. + """ + session = self._get_session(session) + version = utils.pick_microversion(session, '1.37') + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'traits', trait) + + response = session.delete( + request.url, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + if ignore_missing and response.status_code == 400: + session.log.debug( + 'Trait %(trait)s was already removed from node %(node)s', + {'trait': trait, 'node': self.id}, + ) + return False + + msg = "Failed to remove trait {trait} from bare metal node {node}" + exceptions.raise_from_response( + response, + error_message=msg.format(node=self.id, trait=trait), + ) + + if self.traits: + self.traits = list(set(self.traits) - {trait}) + + return True + + def set_traits(self, session, traits): + """Set traits for the node. + + Removes any existing traits and adds the traits passed in to this + method. + + :param session: The session to use for making this request. + :param traits: list of traits to add to the node. + :returns: ``None`` + """ + session = self._get_session(session) + version = utils.pick_microversion(session, '1.37') + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'traits') + + body = {'traits': traits} + + response = session.put( + request.url, + json=body, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to set traits for node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + self.traits = traits + + def call_vendor_passthru(self, session, verb, method, body=None): + """Call a vendor passthru method. + + :param session: The session to use for making this request. + :param verb: The HTTP verb, one of GET, SET, POST, DELETE. + :param method: The method to call using vendor_passthru. + :param body: The JSON body in the HTTP call. + :returns: The HTTP response. + """ + session = self._get_session(session) + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin( + request.url, f'vendor_passthru?method={method}' + ) + + call = getattr(session, verb.lower()) + response = call( + request.url, + json=body, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = ( + f"Failed to call vendor_passthru for node {self.id}, verb {verb} " + f"and method {method}" + ) + exceptions.raise_from_response(response, error_message=msg) + + return response + + def list_vendor_passthru(self, session): + """List vendor passthru methods for the node. + + :param session: The session to use for making this request. + :returns: The HTTP response. + """ + session = self._get_session(session) + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'vendor_passthru/methods') + + response = session.get( + request.url, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to list vendor_passthru methods for node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + return response.json() + + def get_console(self, session): + """Get the node console. + + :param session: The session to use for making this request. + :returns: The HTTP response. + """ + session = self._get_session(session) + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'states', 'console') + + response = session.get( + request.url, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to get console for node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + return response.json() + + def set_console_mode(self, session, enabled): + """Set the node console mode. + + :param session: The session to use for making this request. + :param enabled: Whether the console should be enabled or not. + :return: ``None`` + """ + session = self._get_session(session) + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'states', 'console') + if not isinstance(enabled, bool): + raise ValueError( + f"Invalid enabled {enabled}. It should be True or False " + f"corresponding to console enabled or disabled" + ) + body = {'enabled': enabled} + + response = session.put( + request.url, + json=body, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to change console mode for {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + def get_node_inventory(self, session, node_id): + """Get a node's inventory. + + :param session: The session to use for making this request. + :param node_id: **DEPRECATED** The ID of the node. + :returns: The HTTP response. + """ + if node_id is not None: + warnings.warn( + "The 'node_id' field is unnecessary and will be removed in " + "a future release.", + os_warnings.RemovedInSDK60Warning, + ) + session = self._get_session(session) + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'inventory') + + response = session.get( + request.url, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to get inventory for node {node_id}" + exceptions.raise_from_response(response, error_message=msg) + return response.json() + + def list_firmware(self, session): + """List firmware components associated with the node. + + :param session: The session to use for making this request. + :returns: The HTTP response. + """ + session = self._get_session(session) + version = self._assert_microversion_for( + session, + _common.FIRMWARE_VERSION, + error_message=("Cannot use node list firmware components API"), + ) + + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'firmware') + + response = session.get( + request.url, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + msg = f"Failed to list firmware components for node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + return response.json() + + def patch( + self, + session, + patch=None, + prepend_key=True, + has_body=True, + retry_on_conflict=None, + base_path=None, + *, + microversion=None, + reset_interfaces=None, + ): + if reset_interfaces is not None: + # The id cannot be dirty for an commit + self._body._dirty.discard("id") + + # Only try to update if we actually have anything to commit. + if not patch and not self.requires_commit: + return self + + if not self.allow_patch: + raise exceptions.MethodNotSupported(self, "patch") + + session = self._get_session(session) + microversion = self._assert_microversion_for( + session, _common.RESET_INTERFACES_VERSION + ) + params = [('reset_interfaces', reset_interfaces)] + + request = self._prepare_request( + requires_id=True, + prepend_key=prepend_key, + base_path=base_path, + patch=True, + params=params, + ) + + if patch: + request.body += self._convert_patch(patch) + + return self._commit( + session, + request, + 'PATCH', + microversion, + has_body=has_body, + retry_on_conflict=retry_on_conflict, + ) + + else: + return super().patch( + session, patch=patch, retry_on_conflict=retry_on_conflict + ) + + +NodeDetail = Node diff --git a/openstack/baremetal/v1/port.py b/openstack/baremetal/v1/port.py new file mode 100644 index 0000000000..f0d5c55ac7 --- /dev/null +++ b/openstack/baremetal/v1/port.py @@ -0,0 +1,98 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import _common +from openstack import resource + + +class Port(_common.Resource): + resources_key = 'ports' + base_path = '/ports' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_patch = True + commit_method = 'PATCH' + commit_jsonpatch = True + + _query_mapping = resource.QueryParameters( + 'address', + 'conductor_groups', + 'node', + 'portgroup', + 'shard', + fields={'type': _common.fields_type}, + node_id='node_uuid', + ) + + # The physical_network field introduced in 1.34 + # The is_smartnic field added in 1.53 + # Query filter by shard added in 1.82 + # The name field added in 1.88 + # The description field added in 1.97 + # Filter by conductor_groups added in 1.99 + # The vendor field added in 1.100 + # The category field added in 1.101 + _max_microversion = '1.101' + + #: The physical hardware address of the network port, typically the + #: hardware MAC address. + address = resource.Body('address') + #: The category for the port + category = resource.Body('category') + #: Timestamp at which the port was created. + created_at = resource.Body('created_at') + #: The description for the port + description = resource.Body('description') + #: A set of one or more arbitrary metadata key and value pairs. + extra = resource.Body('extra') + #: The UUID of the port + id = resource.Body('uuid', alternate_id=True) + #: Internal metadata set and stored by the port. This field is read-only. + #: Added in API microversion 1.18. + internal_info = resource.Body('internal_info') + #: Whether PXE is enabled on the port. Added in API microversion 1.19. + is_pxe_enabled = resource.Body('pxe_enabled', type=bool) + #: Whether the port is a Smart NIC port. Added in API microversion 1.53. + is_smartnic = resource.Body('is_smartnic', type=bool) + #: A list of relative links, including the self and bookmark links. + links = resource.Body('links', type=list) + #: The port bindig profile. If specified, must contain ``switch_id`` and + #: ``port_id`` fields. ``switch_info`` field is an optional string field + #: to be used to store vendor specific information. Added in API + #: microversion 1.19. + local_link_connection = resource.Body('local_link_connection') + #: The name of the port + name = resource.Body('name') + #: The UUID of node this port belongs to + node_id = resource.Body('node_uuid') + #: The name of physical network this port is attached to. + #: Added in API microversion 1.34. + physical_network = resource.Body('physical_network') + #: The UUID of PortGroup this port belongs to. Added in API microversion + #: 1.24. + port_group_id = resource.Body('portgroup_uuid') + #: Read-only. The parent port trunk details dictionary, with the trunk ID + # and the subports information (port ID, segmentation ID and segmentation + # type). + trunk_details = resource.Body('trunk_details', type=dict) + #: Timestamp at which the port was last updated. + updated_at = resource.Body('updated_at') + #: The vendor for the port + vendor = resource.Body('vendor') + + +PortDetail = Port diff --git a/openstack/baremetal/v1/port_group.py b/openstack/baremetal/v1/port_group.py new file mode 100644 index 0000000000..eba7a077bf --- /dev/null +++ b/openstack/baremetal/v1/port_group.py @@ -0,0 +1,73 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import _common +from openstack import resource + + +class PortGroup(_common.Resource): + resources_key = 'portgroups' + base_path = '/portgroups' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_patch = True + commit_method = 'PATCH' + commit_jsonpatch = True + + _query_mapping = resource.QueryParameters( + 'node', + 'address', + fields={'type': _common.fields_type}, + ) + + # The mode and properties field introduced in 1.26. + _max_microversion = '1.26' + + #: The physical hardware address of the portgroup, typically the hardware + #: MAC address. Added in API microversion 1.23. + address = resource.Body('address') + #: Timestamp at which the portgroup was created. + created_at = resource.Body('created_at') + #: A set of one or more arbitrary metadata key and value pairs. + extra = resource.Body('extra', type=dict) + #: The name of the portgroup + name = resource.Body('name') + #: The UUID for the portgroup + id = resource.Body('uuid', alternate_id=True) + #: Internal metadaa set and stored by the portgroup. + internal_info = resource.Body('internal_info') + #: Whether ports that are members of this portgroup can be used as + #: standalone ports. Added in API microversion 1.23. + is_standalone_ports_supported = resource.Body( + 'standalone_ports_supported', type=bool + ) + #: A list of relative links, including the self and bookmark links. + links = resource.Body('links', type=list) + #: Port bonding mode. Added in API microversion 1.26. + mode = resource.Body('mode') + #: UUID of the node this portgroup belongs to. + node_id = resource.Body('node_uuid') + #: A list of links to the collection of ports belonging to this portgroup. + #: Added in API microversion 1.24. + ports = resource.Body('ports') + #: Port group properties. Added in API microversion 1.26. + properties = resource.Body('properties', type=dict) + #: Timestamp at which the portgroup was last updated. + updated_at = resource.Body('updated_at') + + +PortGroupDetail = PortGroup diff --git a/openstack/baremetal/v1/runbooks.py b/openstack/baremetal/v1/runbooks.py new file mode 100644 index 0000000000..e2ae0dc023 --- /dev/null +++ b/openstack/baremetal/v1/runbooks.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import _common +from openstack import resource + + +class Runbook(_common.Resource): + resources_key = 'runbooks' + base_path = '/runbooks' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_patch = True + commit_method = 'PATCH' + commit_jsonpatch = True + + _query_mapping = resource.QueryParameters( + 'detail', + fields={'type': _common.fields_type}, + ) + + # Runbooks is available since 1.92 + _max_microversion = '1.92' + name = resource.Body('name') + #: Timestamp at which the runbook was created. + created_at = resource.Body('created_at') + #: A set of one or more arbitrary metadata key and value pairs. + extra = resource.Body('extra') + #: A list of relative links. Includes the self and bookmark links. + links = resource.Body('links', type=list) + #: A set of physical information of the runbook. + steps = resource.Body('steps', type=list) + #: Indicates whether the runbook is publicly accessible. + public = resource.Body('public', type=bool) + #: The name or ID of the project that owns the runbook. + owner = resource.Body('owner', type=str) + #: Timestamp at which the runbook was last updated. + updated_at = resource.Body('updated_at') + #: The UUID of the resource. + id = resource.Body('uuid', alternate_id=True) diff --git a/openstack/baremetal/v1/volume_connector.py b/openstack/baremetal/v1/volume_connector.py new file mode 100644 index 0000000000..60f20b634e --- /dev/null +++ b/openstack/baremetal/v1/volume_connector.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import _common +from openstack import resource + + +class VolumeConnector(resource.Resource): + resources_key = 'connectors' + base_path = '/volume/connectors' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_patch = True + commit_method = 'PATCH' + commit_jsonpatch = True + + _query_mapping = resource.QueryParameters( + 'node', + 'detail', + fields={'type': _common.fields_type}, + ) + + # Volume Connectors is available since 1.32 + _max_microversion = '1.32' + + #: The identifier of Volume connector and this field depends on the "type" + # of the volume_connector + connector_id = resource.Body('connector_id') + #: Timestamp at which the port was created. + created_at = resource.Body('created_at') + #: A set of one or more arbitrary metadata key and value pairs. + extra = resource.Body('extra') + #: A list of relative links, including the self and bookmark links. + links = resource.Body('links', type=list) + #: The UUID of node this port belongs to + node_id = resource.Body('node_uuid') + #: The types of Volume connector + type = resource.Body('type') + #: Timestamp at which the port was last updated. + updated_at = resource.Body('updated_at') + #: The UUID of the port + id = resource.Body('uuid', alternate_id=True) diff --git a/openstack/baremetal/v1/volume_target.py b/openstack/baremetal/v1/volume_target.py new file mode 100644 index 0000000000..e5050adf6f --- /dev/null +++ b/openstack/baremetal/v1/volume_target.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import _common +from openstack import resource + + +class VolumeTarget(resource.Resource): + resources_key = 'targets' + base_path = '/volume/targets' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_patch = True + commit_method = 'PATCH' + commit_jsonpatch = True + + _query_mapping = resource.QueryParameters( + 'node', + 'detail', + fields={'type': _common.fields_type}, + ) + + # Volume Targets is available since 1.32 + _max_microversion = '1.32' + + #: The boot index of the Volume target. “0” indicates that this volume is + # used as a boot volume. + boot_index = resource.Body('boot_index') + #: Timestamp at which the port was created. + created_at = resource.Body('created_at') + #: A set of one or more arbitrary metadata key and value pairs. + extra = resource.Body('extra') + #: A list of relative links. Includes the self and bookmark links. + links = resource.Body('links', type=list) + #: The UUID of the Node this resource belongs to. + node_id = resource.Body('node_uuid') + #: A set of physical information of the volume. + properties = resource.Body('properties') + #: Timestamp at which the port was last updated. + updated_at = resource.Body('updated_at') + #: The UUID of the resource. + id = resource.Body('uuid', alternate_id=True) + #: The identifier of the volume. + volume_id = resource.Body('volume_id') + #: The type of Volume target. + volume_type = resource.Body('volume_type') diff --git a/openstack/baremetal/version.py b/openstack/baremetal/version.py new file mode 100644 index 0000000000..9311893e5e --- /dev/null +++ b/openstack/baremetal/version.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Version(resource.Resource): + resource_key = 'version' + resources_key = 'versions' + base_path = '/' + + # Capabilities + allow_list = True + + # Attributes + links = resource.Body('links') + status = resource.Body('status') + updated = resource.Body('updated') diff --git a/openstack/message/v1/__init__.py b/openstack/baremetal_introspection/__init__.py similarity index 100% rename from openstack/message/v1/__init__.py rename to openstack/baremetal_introspection/__init__.py diff --git a/openstack/baremetal_introspection/baremetal_introspection_service.py b/openstack/baremetal_introspection/baremetal_introspection_service.py new file mode 100644 index 0000000000..6ca452e876 --- /dev/null +++ b/openstack/baremetal_introspection/baremetal_introspection_service.py @@ -0,0 +1,24 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal_introspection.v1 import _proxy +from openstack import service_description + + +class BaremetalIntrospectionService( + service_description.ServiceDescription[_proxy.Proxy] +): + """The bare metal introspection service.""" + + supported_versions = { + '1': _proxy.Proxy, + } diff --git a/openstack/metric/__init__.py b/openstack/baremetal_introspection/v1/__init__.py similarity index 100% rename from openstack/metric/__init__.py rename to openstack/baremetal_introspection/v1/__init__.py diff --git a/openstack/baremetal_introspection/v1/_proxy.py b/openstack/baremetal_introspection/v1/_proxy.py new file mode 100644 index 0000000000..b55963ea0a --- /dev/null +++ b/openstack/baremetal_introspection/v1/_proxy.py @@ -0,0 +1,301 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +from openstack import _log +from openstack.baremetal.v1 import node as _node +from openstack.baremetal_introspection.v1 import introspection as _introspect +from openstack.baremetal_introspection.v1 import ( + introspection_rule as _introspection_rule, +) +from openstack import exceptions +from openstack import proxy +from openstack import resource + + +_logger = _log.setup_logging('openstack') + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['1']] = '1' + + _resource_registry = { + "introspection": _introspect.Introspection, + "introspection_rule": _introspection_rule.IntrospectionRule, + } + + # ========== Introspections ========== + + def introspections(self, **query): + """Retrieve a generator of introspection records. + + :param dict query: Optional query parameters to be sent to restrict + the records to be returned. Available parameters include: + + * ``fields``: A list containing one or more fields to be returned + in the response. This may lead to some performance gain + because other fields of the resource are not refreshed. + * ``limit``: Requests at most the specified number of items be + returned from the query. + * ``marker``: Specifies the ID of the last-seen introspection. Use + the ``limit`` parameter to make an initial limited request and + use the ID of the last-seen introspection from the response as + the ``marker`` value in a subsequent limited request. + * ``sort_dir``: Sorts the response by the requested sort direction. + A valid value is ``asc`` (ascending) or ``desc`` + (descending). Default is ``asc``. You can specify multiple + pairs of sort key and sort direction query parameters. If + you omit the sort direction in a pair, the API uses the + natural sorting direction of the server attribute that is + provided as the ``sort_key``. + * ``sort_key``: Sorts the response by the this attribute value. + Default is ``id``. You can specify multiple pairs of sort + key and sort direction query parameters. If you omit the + sort direction in a pair, the API uses the natural sorting + direction of the server attribute that is provided as the + ``sort_key``. + + :returns: A generator of :class:`~.introspection.Introspection` + objects + """ + return _introspect.Introspection.list(self, **query) + + def start_introspection(self, node, manage_boot=None): + """Create a new introspection from attributes. + + :param node: The value can be either the name or ID of a node or + a :class:`~openstack.baremetal.v1.node.Node` instance. + :param bool manage_boot: Whether to manage boot parameters for the + node. Defaults to the server default (which is `True`). + + :returns: :class:`~.introspection.Introspection` instance. + """ + node = self._get_resource(_node.Node, node) + res = _introspect.Introspection.new( + connection=self._get_connection(), id=node.id + ) + kwargs = {} + if manage_boot is not None: + kwargs['manage_boot'] = manage_boot + return res.create(self, **kwargs) + + def get_introspection(self, introspection): + """Get a specific introspection. + + :param introspection: The value can be the name or ID of an + introspection (matching bare metal node name or ID) or + an :class:`~.introspection.Introspection` instance. + :returns: :class:`~.introspection.Introspection` instance. + :raises: :class:`~openstack.exceptions.NotFoundException` when no + introspection matching the name or ID could be found. + """ + return self._get(_introspect.Introspection, introspection) + + def get_introspection_data(self, introspection, processed=True): + """Get introspection data. + + :param introspection: The value can be the name or ID of an + introspection (matching bare metal node name or ID) or + an :class:`~.introspection.Introspection` instance. + :param processed: Whether to fetch the final processed data (the + default) or the raw unprocessed data as received from the ramdisk. + :returns: introspection data from the most recent successful run. + :rtype: dict + """ + res = self._get_resource(_introspect.Introspection, introspection) + return res.get_data(self, processed=processed) + + def abort_introspection(self, introspection, ignore_missing=True): + """Abort an introspection. + + Note that the introspection is not aborted immediately, you may use + `wait_for_introspection` with `ignore_error=True`. + + :param introspection: The value can be the name or ID of an + introspection (matching bare metal node name or ID) or + an :class:`~.introspection.Introspection` instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the introspection could not be found. When set to ``True``, no + exception will be raised when attempting to abort a non-existent + introspection. + :returns: nothing + """ + res = self._get_resource(_introspect.Introspection, introspection) + try: + res.abort(self) + except exceptions.NotFoundException: + if not ignore_missing: + raise + + def wait_for_introspection( + self, + introspection, + timeout=None, + ignore_error=False, + ): + """Wait for the introspection to finish. + + :param introspection: The value can be the name or ID of an + introspection (matching bare metal node name or ID) or + an :class:`~.introspection.Introspection` instance. + :param timeout: How much (in seconds) to wait for the introspection. + The value of ``None`` (the default) means no client-side timeout. + :param ignore_error: If ``True``, this call will raise an exception + if the introspection reaches the ``error`` state. Otherwise the + error state is considered successful and the call returns. + :returns: :class:`~.introspection.Introspection` instance. + :raises: :class:`~openstack.exceptions.ResourceFailure` if + introspection fails and ``ignore_error`` is ``False``. + :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. + """ + res = self._get_resource(_introspect.Introspection, introspection) + return res.wait(self, timeout=timeout, ignore_error=ignore_error) + + # ========== Introspection ruless ========== + + def create_introspection_rule(self, **attrs): + """Create a new introspection rules from attributes. + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~.introspection_rule.IntrospectionRule`, + comprised of the properties on the IntrospectionRule class. + + :returns: :class:`~.introspection_rule.IntrospectionRule` instance. + """ + return self._create(_introspection_rule.IntrospectionRule, **attrs) + + def delete_introspection_rule( + self, + introspection_rule, + ignore_missing=True, + ): + """Delete an introspection rule. + + :param introspection_rule: The value can be either the ID of an + introspection rule or a + :class:`~.introspection_rule.IntrospectionRule` instance. + :param bool ignore_missing: When set to ``False``, an + exception:class:`~openstack.exceptions.NotFoundException` will be + raised when the introspection rule could not be found. When set to + ``True``, no exception will be raised when attempting to delete a + non-existent introspection rule. + + :returns: ``None`` + """ + self._delete( + _introspection_rule.IntrospectionRule, + introspection_rule, + ignore_missing=ignore_missing, + ) + + def get_introspection_rule(self, introspection_rule): + """Get a specific introspection rule. + + :param introspection_rule: The value can be the name or ID of an + introspection rule or a + :class:`~.introspection_rule.IntrospectionRule` instance. + + :returns: :class:`~.introspection_rule.IntrospectionRule` instance. + :raises: :class:`~openstack.exceptions.NotFoundException` when no + introspection rule matching the name or ID could be found. + """ + return self._get( + _introspection_rule.IntrospectionRule, + introspection_rule, + ) + + def introspection_rules(self, **query): + """Retrieve a generator of introspection rules. + + :param dict query: Optional query parameters to be sent to restrict + the records to be returned. Available parameters include: + + * ``uuid``: The UUID of the Ironic Inspector rule. + * ``limit``: List of a logic statementd or operations in rules, + that can be evaluated as True or False. + * ``actions``: List of operations that will be performed + if conditions of this rule are fulfilled. + * ``description``: Rule human-readable description. + * ``scope``: Scope of an introspection rule. If set, the rule + is only applied to nodes that have + matching inspection_scope property. + + :returns: A generator of + :class:`~.introspection_rule.IntrospectionRule` + objects + """ + return self._list(_introspection_rule.IntrospectionRule, **query) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/baremetal_introspection/v1/introspection.py b/openstack/baremetal_introspection/v1/introspection.py new file mode 100644 index 0000000000..d268a5c989 --- /dev/null +++ b/openstack/baremetal_introspection/v1/introspection.py @@ -0,0 +1,142 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import _log +from openstack.baremetal.v1 import _common +from openstack import exceptions +from openstack import resource +from openstack import utils + + +_logger = _log.setup_logging('openstack') + + +class Introspection(resource.Resource): + resources_key = 'introspection' + base_path = '/introspection' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = False + allow_delete = True + allow_list = True + + # created via POST with ID + create_method = 'POST' + create_requires_id = True + create_returns_body = False + + #: Timestamp at which the introspection was finished. + finished_at = resource.Body('finished_at') + #: The last error message (if any). + error = resource.Body('error') + #: The UUID of the introspection (matches the node UUID). + id = resource.Body('uuid', alternate_id=True) + #: Whether introspection is finished. + is_finished = resource.Body('finished', type=bool) + #: A list of relative links, including the self and bookmark links. + links = resource.Body('links', type=list) + #: Timestamp at which the introspection was started. + started_at = resource.Body('started_at') + #: The current introspection state. + state = resource.Body('state') + + def abort(self, session): + """Abort introspection. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + """ + if self.is_finished: + return + + session = self._get_session(session) + + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'abort') + response = session.post( + request.url, + headers=request.headers, + microversion=version, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + msg = f"Failed to abort introspection for node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + def get_data(self, session, processed=True): + """Get introspection data. + + Note that the introspection data format is not stable and can vary + from environment to environment. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param processed: Whether to fetch the final processed data (the + default) or the raw unprocessed data as received from the ramdisk. + :type processed: bool + :returns: introspection data from the most recent successful run. + :rtype: dict + """ + session = self._get_session(session) + + version = self._get_microversion(session) if processed else '1.17' + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'data') + if not processed: + request.url = utils.urljoin(request.url, 'unprocessed') + response = session.get( + request.url, headers=request.headers, microversion=version + ) + msg = f"Failed to fetch introspection data for node {self.id}" + exceptions.raise_from_response(response, error_message=msg) + return response.json() + + def wait(self, session, timeout=None, ignore_error=False): + """Wait for the node to reach the expected state. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param timeout: How much (in seconds) to wait for the introspection. + The value of ``None`` (the default) means no client-side timeout. + :param ignore_error: If ``True``, this call will raise an exception + if the introspection reaches the ``error`` state. Otherwise the + error state is considered successful and the call returns. + :return: This :class:`Introspection` instance. + :raises: :class:`~openstack.exceptions.ResourceFailure` if + introspection fails and ``ignore_error`` is ``False``. + :raises: :class:`~openstack.exceptions.ResourceTimeout` on timeout. + """ + if self._check_state(ignore_error): + return self + + for count in utils.iterate_timeout( + timeout, f"Timeout waiting for introspection on node {self.id}" + ): + self.fetch(session) + if self._check_state(ignore_error): + return self + + _logger.debug( + 'Still waiting for introspection of node %(node)s, ' + 'the current state is "%(state)s"', + {'node': self.id, 'state': self.state}, + ) + + def _check_state(self, ignore_error): + if self.state == 'error' and not ignore_error: + raise exceptions.ResourceFailure( + f"Introspection of node {self.id} failed: {self.error}" + ) + else: + return self.is_finished diff --git a/openstack/baremetal_introspection/v1/introspection_rule.py b/openstack/baremetal_introspection/v1/introspection_rule.py new file mode 100644 index 0000000000..129426e2f2 --- /dev/null +++ b/openstack/baremetal_introspection/v1/introspection_rule.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class IntrospectionRule(resource.Resource): + resources_key = 'rules' + base_path = '/rules' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = False + allow_delete = True + allow_list = True + + # created via POST with ID + create_method = 'POST' + create_requires_id = True + + #: The UUID of the resource. + id = resource.Body('uuid', alternate_id=True) + #: List of a logic statementd or operations in rules + conditions = resource.Body('conditions', type=list) + #: List of operations that will be performed if conditions of this rule + #: are fulfilled. + actions = resource.Body('actions', type=list) + #: Rule human-readable description + description = resource.Body('description') + #: Scope of an introspection rule + scope = resource.Body('scope') + #: A list of relative links, including the self and bookmark links. + links = resource.Body('links', type=list) diff --git a/openstack/metric/v1/__init__.py b/openstack/block_storage/__init__.py similarity index 100% rename from openstack/metric/v1/__init__.py rename to openstack/block_storage/__init__.py diff --git a/openstack/block_storage/block_storage_service.py b/openstack/block_storage/block_storage_service.py new file mode 100644 index 0000000000..e3d35e40fa --- /dev/null +++ b/openstack/block_storage/block_storage_service.py @@ -0,0 +1,26 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v2 import _proxy as _v2_proxy +from openstack.block_storage.v3 import _proxy as _v3_proxy +from openstack import service_description + + +class BlockStorageService( + service_description.ServiceDescription[_v2_proxy.Proxy | _v3_proxy.Proxy] +): + """The block storage service.""" + + supported_versions = { + '3': _v3_proxy.Proxy, + '2': _v2_proxy.Proxy, + } diff --git a/openstack/telemetry/__init__.py b/openstack/block_storage/v2/__init__.py similarity index 100% rename from openstack/telemetry/__init__.py rename to openstack/block_storage/v2/__init__.py diff --git a/openstack/block_storage/v2/_proxy.py b/openstack/block_storage/v2/_proxy.py new file mode 100644 index 0000000000..c7c036d820 --- /dev/null +++ b/openstack/block_storage/v2/_proxy.py @@ -0,0 +1,1406 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty +import warnings + +from openstack.block_storage.v2 import backup as _backup +from openstack.block_storage.v2 import capabilities as _capabilities +from openstack.block_storage.v2 import extension as _extension +from openstack.block_storage.v2 import limits as _limits +from openstack.block_storage.v2 import quota_class_set as _quota_class_set +from openstack.block_storage.v2 import quota_set as _quota_set +from openstack.block_storage.v2 import service as _service +from openstack.block_storage.v2 import snapshot as _snapshot +from openstack.block_storage.v2 import stats as _stats +from openstack.block_storage.v2 import transfer as _transfer +from openstack.block_storage.v2 import type as _type +from openstack.block_storage.v2 import volume as _volume +from openstack import exceptions +from openstack.identity.v3 import project as _project +from openstack import proxy +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['2']] = '2' + + # ========== Extensions ========== + + def extensions(self): + """Return a generator of extensions + + :returns: A generator of extension + :rtype: :class:`~openstack.block_storage.v2.extension.Extension` + """ + return self._list(_extension.Extension) + + # ========== Images ========== + + # TODO(stephenfin): Convert to use resources/proxy rather than direct calls + def create_image( + self, + name, + volume, + allow_duplicates, + container_format, + disk_format, + wait, + timeout, + ): + if not disk_format: + disk_format = self._connection.config.config['image_format'] + if not container_format: + # https://docs.openstack.org/image-guide/image-formats.html + container_format = 'bare' + + if 'id' in volume: + volume_id = volume['id'] + else: + volume_obj = self.get_volume(volume) + if not volume_obj: + raise exceptions.SDKException( + f"Volume {volume} given to create_image could not be found" + ) + volume_id = volume_obj['id'] + data = self.post( + f'/volumes/{volume_id}/action', + json={ + 'os-volume_upload_image': { + 'force': allow_duplicates, + 'image_name': name, + 'container_format': container_format, + 'disk_format': disk_format, + } + }, + ) + response = self._connection._get_and_munchify( + 'os-volume_upload_image', data + ) + return self._connection.image._existing_image(id=response['image_id']) + + # ========== Snapshots ========== + + def get_snapshot(self, snapshot): + """Get a single snapshot + + :param snapshot: The value can be the ID of a snapshot or a + :class:`~openstack.block_storage.v2.snapshot.Snapshot` + instance. + + :returns: One :class:`~openstack.block_storage.v2.snapshot.Snapshot` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_snapshot.Snapshot, snapshot) + + def find_snapshot( + self, + name_or_id, + ignore_missing=True, + *, + details=True, + all_projects=False, + ): + """Find a single snapshot + + :param snapshot: The name or ID a snapshot + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the snapshot does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param bool details: When set to ``False``, an + :class:`~openstack.block_storage.v2.snapshot.Snapshot` object will + be returned. The default, ``True``, will cause an + :class:`~openstack.block_storage.v2.snapshot.SnapshotDetail` object + to be returned. + :param bool all_projects: When set to ``True``, search for snapshot by + name across all projects. Note that this will likely result in + a higher chance of duplicates. Admin-only by default. + + :returns: One :class:`~openstack.block_storage.v2.snapshot.Snapshot`, + one :class:`~openstack.block_storage.v2.snapshot.SnapshotDetail` + object, or None. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + query = {} + if all_projects: + query['all_projects'] = True + list_base_path = '/snapshots/detail' if details else None + return self._find( + _snapshot.Snapshot, + name_or_id, + ignore_missing=ignore_missing, + list_base_path=list_base_path, + **query, + ) + + def snapshots(self, *, details=True, all_projects=False, **query): + """Retrieve a generator of snapshots + + :param bool details: When set to ``False`` + :class:`~openstack.block_storage.v2.snapshot.Snapshot` + objects will be returned. The default, ``True``, will cause + :class:`~openstack.block_storage.v2.snapshot.SnapshotDetail` + objects to be returned. + :param bool all_projects: When set to ``True``, list snapshots from all + projects. Admin-only by default. + :param kwargs query: Optional query parameters to be sent to limit + the snapshots being returned. Available parameters include: + + * name: Name of the snapshot as a string. + * volume_id: volume id of a snapshot. + * status: Value of the status of the snapshot so that you can + filter on "available" for example. + + :returns: A generator of snapshot objects. + """ + if all_projects: + query['all_projects'] = True + base_path = '/snapshots/detail' if details else None + return self._list(_snapshot.Snapshot, base_path=base_path, **query) + + def create_snapshot(self, **attrs): + """Create a new snapshot from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v2.snapshot.Snapshot`, + comprised of the properties on the Snapshot class. + + :returns: The results of snapshot creation + :rtype: :class:`~openstack.block_storage.v2.snapshot.Snapshot` + """ + return self._create(_snapshot.Snapshot, **attrs) + + def update_snapshot(self, snapshot, **attrs): + """Update a snapshot + + :param snapshot: Either the ID of a snapshot or a + :class:`~openstack.block_storage.v2.snapshot.Snapshot` instance. + :param dict attrs: The attributes to update on the snapshot. + + :returns: The updated snapshot + :rtype: :class:`~openstack.block_storage.v2.snapshot.Snapshot` + """ + return self._update(_snapshot.Snapshot, snapshot, **attrs) + + def delete_snapshot(self, snapshot, ignore_missing=True): + """Delete a snapshot + + :param snapshot: The value can be either the ID of a snapshot or a + :class:`~openstack.block_storage.v2.snapshot.Snapshot` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the snapshot does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent snapshot. + + :returns: ``None`` + """ + self._delete( + _snapshot.Snapshot, snapshot, ignore_missing=ignore_missing + ) + + # ========== Snapshot actions ========== + + def reset_snapshot_status(self, snapshot, status): + """Reset status of the snapshot + + :param snapshot: The value can be either the ID of a backup or a + :class:`~openstack.block_storage.v2.snapshot.Snapshot` instance. + :param str status: New snapshot status + + :returns: None + """ + snapshot = self._get_resource(_snapshot.Snapshot, snapshot) + snapshot.reset_status(self, status) + + def reset_snapshot(self, snapshot, status): + warnings.warn( + "reset_snapshot is a deprecated alias for reset_snapshot_status " + "and will be removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + return self.reset_snapshot_status(snapshot, status) + + def manage_snapshot(self, **attrs): + """Creates a snapshot by using existing storage rather than + allocating new storage. + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v2.snapshot.Snapshot`, + comprised of the properties on the Snapshot class. + + :returns: The results of snapshot creation + :rtype: :class:`~openstack.block_storage.v2.snapshot.Snapshot` + """ + return _snapshot.Snapshot.manage(self, **attrs) + + def unmanage_snapshot(self, snapshot): + """Unmanage a snapshot from block storage provisioning. + + :param snapshot: Either the ID of a snapshot or a + :class:`~openstack.block_storage.v2.snapshot.Snapshot`. + + :returns: None + """ + snapshot_obj = self._get_resource(_snapshot.Snapshot, snapshot) + snapshot_obj.unmanage(self) + + # ========== Types ========== + + def get_type(self, type): + """Get a single type + + :param type: The value can be the ID of a type or a + :class:`~openstack.block_storage.v2.type.Type` instance. + + :returns: One :class:`~openstack.block_storage.v2.type.Type` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_type.Type, type) + + def find_type(self, name_or_id, ignore_missing=True): + """Find a single volume type + + :param snapshot: The name or ID a volume type + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.ResourceNotFound` will be raised + when the volume type does not exist. + + :returns: One :class:`~openstack.block_storage.v2.type.Type` + :raises: :class:`~openstack.exceptions.ResourceNotFound` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + return self._find( + _type.Type, + name_or_id, + ignore_missing=ignore_missing, + ) + + def types(self, **query): + """Retrieve a generator of volume types + + :returns: A generator of volume type objects. + """ + return self._list(_type.Type, **query) + + def create_type(self, **attrs): + """Create a new type from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v2.type.Type`, + comprised of the properties on the Type class. + + :returns: The results of type creation + :rtype: :class:`~openstack.block_storage.v2.type.Type` + """ + return self._create(_type.Type, **attrs) + + def delete_type(self, type, ignore_missing=True): + """Delete a type + + :param type: The value can be either the ID of a type or a + :class:`~openstack.block_storage.v2.type.Type` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the type does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent type. + + :returns: ``None`` + """ + self._delete(_type.Type, type, ignore_missing=ignore_missing) + + def get_type_access(self, type): + """Lists project IDs that have access to private volume type. + + :param type: The value can be either the ID of a type or a + :class:`~openstack.block_storage.v2.type.Type` instance. + + :returns: List of dictionaries describing projects that have access to + the specified type + """ + res = self._get_resource(_type.Type, type) + return res.get_private_access(self) + + def add_type_access(self, type, project_id): + """Adds private volume type access to a project. + + :param type: The value can be either the ID of a type or a + :class:`~openstack.block_storage.v2.type.Type` instance. + :param str project_id: The ID of the project. Volume Type access to + be added to this project ID. + + :returns: ``None`` + """ + res = self._get_resource(_type.Type, type) + return res.add_private_access(self, project_id) + + def remove_type_access(self, type, project_id): + """Remove private volume type access from a project. + + :param type: The value can be either the ID of a type or a + :class:`~openstack.block_storage.v2.type.Type` instance. + :param str project_id: The ID of the project. Volume Type access to + be removed to this project ID. + + :returns: ``None`` + """ + res = self._get_resource(_type.Type, type) + return res.remove_private_access(self, project_id) + + # ========== Volumes ========== + + def get_volume(self, volume): + """Get a single volume + + :param volume: The value can be the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume` instance. + + :returns: One :class:`~openstack.block_storage.v2.volume.Volume` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_volume.Volume, volume) + + def find_volume( + self, + name_or_id, + ignore_missing=True, + *, + details=True, + all_projects=False, + ): + """Find a single volume + + :param volume: The name or ID a volume + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the volume does not exist. + :param bool details: When set to ``False`` no extended attributes + will be returned. The default, ``True``, will cause an object with + additional attributes to be returned. + :param bool all_projects: When set to ``True``, search for volume by + name across all projects. Note that this will likely result in + a higher chance of duplicates. Admin-only by default. + + :returns: One :class:`~openstack.block_storage.v2.volume.Volume` or + None. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + query = {} + if all_projects: + query['all_projects'] = True + list_base_path = '/volumes/detail' if details else None + return self._find( + _volume.Volume, + name_or_id, + ignore_missing=ignore_missing, + list_base_path=list_base_path, + **query, + ) + + def volumes(self, *, details=True, all_projects=False, **query): + """Retrieve a generator of volumes + + :param bool details: When set to ``False`` no extended attributes + will be returned. The default, ``True``, will cause objects with + additional attributes to be returned. + :param bool all_projects: When set to ``True``, list volumes from all + projects. Admin-only by default. + :param kwargs query: Optional query parameters to be sent to limit + the volumes being returned. Available parameters include: + + * name: Name of the volume as a string. + * status: Value of the status of the volume so that you can filter + on "available" for example. + + :returns: A generator of volume objects. + """ + if all_projects: + query['all_projects'] = True + base_path = '/volumes/detail' if details else None + return self._list(_volume.Volume, base_path=base_path, **query) + + def create_volume(self, **attrs): + """Create a new volume from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v2.volume.Volume`, + comprised of the properties on the Volume class. + + :returns: The results of volume creation + :rtype: :class:`~openstack.block_storage.v2.volume.Volume` + """ + return self._create(_volume.Volume, **attrs) + + def delete_volume( + self, volume, ignore_missing=True, *, force=False, cascade=False + ): + """Delete a volume + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the volume does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + volume. + :param bool force: Whether to try forcing volume deletion. + :param bool cascade: Whether to remove any snapshots along with the + volume. + + :returns: ``None`` + """ + volume = self._get_resource(_volume.Volume, volume) + try: + if not force: + volume.delete(self, params={'cascade': cascade}) + else: + volume.force_delete(self) + except exceptions.NotFoundException: + if ignore_missing: + return None + raise + + # ========== Volume actions ========== + + def extend_volume(self, volume, size): + """Extend a volume + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume` instance. + :param size: New volume size + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.extend(self, size) + + def set_volume_readonly(self, volume, readonly=True): + """Set a volume's read-only flag. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume` instance. + :param bool readonly: Whether the volume should be a read-only volume + or not. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.set_readonly(self, readonly) + + def retype_volume(self, volume, new_type, migration_policy="never"): + """Retype the volume. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume` instance. + :param new_type: The new volume type that volume is changed with. + The value can be either the ID of the volume type or a + :class:`~openstack.block_storage.v2.type.Type` instance. + :param str migration_policy: Specify if the volume should be migrated + when it is re-typed. Possible values are on-demand or never. + Default: never. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + type_id = resource.Resource._get_id(new_type) + volume.retype(self, type_id, migration_policy) + + def set_volume_bootable_status(self, volume, bootable): + """Set bootable status of the volume. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume` instance. + :param bool bootable: Specifies whether the volume should be bootable + or not. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.set_bootable_status(self, bootable) + + def set_volume_image_metadata(self, volume, **metadata): + """Update image metadata for a volume + + :param volume: Either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume`. + :param kwargs metadata: Key/value pairs to be updated in the volume's + image metadata. No other metadata is modified by this call. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + return volume.set_image_metadata(self, metadata=metadata) + + def delete_volume_image_metadata(self, volume, keys=None): + """Delete metadata for a volume + + :param volume: Either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume`. + :param list keys: The keys to delete. If left empty complete + metadata will be removed. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + if keys is not None: + for key in keys: + volume.delete_image_metadata_item(self, key) + else: + volume.delete_image_metadata(self) + + def reset_volume_status( + self, volume, status=None, attach_status=None, migration_status=None + ): + """Reset volume statuses. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume` instance. + :param str status: The new volume status. + :param str attach_status: The new volume attach status. + :param str migration_status: The new volume migration status (admin + only). + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.reset_status(self, status, attach_status, migration_status) + + def attach_volume(self, volume, mountpoint, instance=None, host_name=None): + """Attaches a volume to a server. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume` instance. + :param str mountpoint: The attaching mount point. + :param str instance: The UUID of the attaching instance. + :param str host_name: The name of the attaching host. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.attach(self, mountpoint, instance, host_name) + + def detach_volume(self, volume, attachment, force=False, connector=None): + """Detaches a volume from a server. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume` instance. + :param str attachment: The ID of the attachment. + :param bool force: Whether to force volume detach (Rolls back an + unsuccessful detach operation after you disconnect the volume.) + :param dict connector: The connector object. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.detach(self, attachment, force, connector) + + def unmanage_volume(self, volume): + """Removes a volume from Block Storage management without removing the + back-end storage object that is associated with it. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume` instance. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.unmanage(self) + + def migrate_volume( + self, volume, host=None, force_host_copy=False, lock_volume=False + ): + """Migrates a volume to the specified host. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume` instance. + :param str host: The target host for the volume migration. Host + format is host@backend. + :param bool force_host_copy: If false (the default), rely on the volume + backend driver to perform the migration, which might be optimized. + If true, or the volume driver fails to migrate the volume itself, + a generic host-based migration is performed. + :param bool lock_volume: If true, migrating an available volume will + change its status to maintenance preventing other operations from + being performed on the volume such as attach, detach, retype, etc. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.migrate(self, host, force_host_copy, lock_volume) + + def complete_volume_migration(self, volume, new_volume, error=False): + """Complete the migration of a volume. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume` instance. + :param str new_volume: The UUID of the new volume. + :param bool error: Used to indicate if an error has occured elsewhere + that requires clean up. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.complete_migration(self, new_volume, error) + + # ========== Backend pools ========== + + def backend_pools(self, **query): + """Returns a generator of cinder Back-end storage pools + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns A generator of cinder Back-end storage pools objects + """ + return self._list(_stats.Pools, **query) + + # ========== Backups ========== + + def backups(self, details=True, **query): + """Retrieve a generator of backups + + :param bool details: When set to ``False`` no additional details will + be returned. The default, ``True``, will cause objects with + additional attributes to be returned. + :param dict query: Optional query parameters to be sent to limit the + resources being returned: + + * offset: pagination marker + * limit: pagination limit + * sort_key: Sorts by an attribute. A valid value is + name, status, container_format, disk_format, size, id, + created_at, or updated_at. Default is created_at. + The API uses the natural sorting direction of the + sort_key attribute value. + * sort_dir: Sorts by one or more sets of attribute and sort + direction combinations. If you omit the sort direction + in a set, default is desc. + + :returns: A generator of backup objects. + """ + base_path = '/backups/detail' if details else None + return self._list(_backup.Backup, base_path=base_path, **query) + + def get_backup(self, backup): + """Get a backup + + :param backup: The value can be the ID of a backup + or a :class:`~openstack.block_storage.v2.backup.Backup` + instance. + + :returns: Backup instance + :rtype: :class:`~openstack.block_storage.v2.backup.Backup` + """ + return self._get(_backup.Backup, backup) + + def export_record(self, backup): + """Get a backup + + :param backup: The value can be the ID of a backup + or a :class:`~openstack.block_storage.v2.backup.Backup` + instance. + + :returns: The backup export record fields + """ + backup = self._get_resource(_backup.Backup, backup) + return backup.export(self) + + def find_backup(self, name_or_id, ignore_missing=True, *, details=True): + """Find a single backup + + :param snapshot: The name or ID a backup + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the backup does not exist. + :param bool details: When set to ``False`` no additional details will + be returned. The default, ``True``, will cause objects with + additional attributes to be returned. + + :returns: One :class:`~openstack.block_storage.v2.backup.Backup` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + list_base_path = '/backups/detail' if details else None + return self._find( + _backup.Backup, + name_or_id, + ignore_missing=ignore_missing, + list_base_path=list_base_path, + ) + + def create_backup(self, **attrs): + """Create a new Backup from attributes with native API + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v2.backup.Backup` + comprised of the properties on the Backup class. + + :returns: The results of Backup creation + :rtype: :class:`~openstack.block_storage.v2.backup.Backup` + """ + return self._create(_backup.Backup, **attrs) + + def delete_backup(self, backup, ignore_missing=True, force=False): + """Delete a CloudBackup + + :param backup: The value can be the ID of a backup or a + :class:`~openstack.block_storage.v2.backup.Backup` instance + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the zone does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent zone. + :param bool force: Whether to try forcing backup deletion + + :returns: ``None`` + """ + if not force: + self._delete(_backup.Backup, backup, ignore_missing=ignore_missing) + else: + backup = self._get_resource(_backup.Backup, backup) + backup.force_delete(self) + + # ========== Backup actions ========== + + def restore_backup(self, backup, volume_id, name): + """Restore a Backup to volume + + :param backup: The value can be the ID of a backup or a + :class:`~openstack.block_storage.v2.backup.Backup` instance + :param volume_id: The ID of the volume to restore the backup to. + :param name: The name for new volume creation to restore. + + :returns: Updated backup instance + :rtype: :class:`~openstack.block_storage.v2.backup.Backup` + """ + backup = self._get_resource(_backup.Backup, backup) + return backup.restore(self, volume_id=volume_id, name=name) + + def reset_backup_status(self, backup, status): + """Reset status of the backup + + :param backup: The value can be either the ID of a backup or a + :class:`~openstack.block_storage.v2.backup.Backup` instance. + :param str status: New backup status + + :returns: None + """ + backup = self._get_resource(_backup.Backup, backup) + backup.reset_status(self, status) + + def reset_backup(self, backup, status): + warnings.warn( + "reset_backup is a deprecated alias for reset_backup_status " + "and will be removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + return self.reset_backup_status(backup, status) + + # ========== Limits ========== + + def get_limits(self, project=None): + """Retrieves limits + + :param project: A project to get limits for. The value can be either + the ID of a project or an + :class:`~openstack.identity.v2.project.Project` instance. + :returns: A Limits object, including both + :class:`~openstack.block_storage.v2.limits.AbsoluteLimit` and + :class:`~openstack.block_storage.v2.limits.RateLimit` + :rtype: :class:`~openstack.block_storage.v2.limits.Limits` + """ + if project: + return self._get( + _limits.Limits, + requires_id=False, + project_id=resource.Resource._get_id(project), + ) + return self._get(_limits.Limits, requires_id=False) + + # ========== Capabilities ========== + + def get_capabilities(self, host): + """Get a backend's capabilites + + :param host: Specified backend to obtain volume stats and properties. + + :returns: One :class: + `~openstack.block_storage.v2.capabilites.Capabilities` instance. + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + return self._get(_capabilities.Capabilities, host) + + # ========== Quota class sets ========== + + def get_quota_class_set(self, quota_class_set='default'): + """Get a single quota class set + + Only one quota class is permitted, ``default``. + + :param quota_class_set: The value can be the ID of a quota class set + (only ``default`` is supported) or a + :class:`~openstack.block_storage.v2.quota_class_set.QuotaClassSet` + instance. + + :returns: One + :class:`~openstack.block_storage.v2.quota_class_set.QuotaClassSet` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_quota_class_set.QuotaClassSet, quota_class_set) + + def update_quota_class_set(self, quota_class_set, **attrs): + """Update a QuotaClassSet. + + Only one quota class is permitted, ``default``. + + :param quota_class_set: Either the ID of a quota class set (only + ``default`` is supported) or a + :class:`~openstack.block_storage.v2.quota_class_set.QuotaClassSet` + instance. + :param attrs: The attributes to update on the QuotaClassSet represented + by ``quota_class_set``. + + :returns: The updated QuotaSet + :rtype: :class:`~openstack.block_storage.v2.quota_set.QuotaSet` + """ + return self._update( + _quota_class_set.QuotaClassSet, quota_class_set, **attrs + ) + + # ========== Quota sets ========== + + def get_quota_set(self, project, usage=False, **query): + """Show QuotaSet information for the project + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the quota should be retrieved + :param bool usage: When set to ``True`` quota usage and reservations + would be filled. + :param dict query: Additional query parameters to use. + + :returns: One :class:`~openstack.block_storage.v2.quota_set.QuotaSet` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + project = self._get_resource(_project.Project, project) + res = self._get_resource( + _quota_set.QuotaSet, None, project_id=project.id + ) + return res.fetch(self, usage=usage, **query) + + def get_quota_set_defaults(self, project): + """Show QuotaSet defaults for the project + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the quota should be retrieved + + :returns: One :class:`~openstack.block_storage.v2.quota_set.QuotaSet` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + project = self._get_resource(_project.Project, project) + res = self._get_resource( + _quota_set.QuotaSet, None, project_id=project.id + ) + return res.fetch(self, base_path='/os-quota-sets/defaults') + + def revert_quota_set(self, project, **query): + """Reset Quota for the project/user. + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the quota should be resetted. + :param dict query: Additional parameters to be used. + + :returns: ``None`` + """ + project = self._get_resource(_project.Project, project) + res = self._get_resource( + _quota_set.QuotaSet, None, project_id=project.id + ) + + if not query: + query = {} + return res.delete(self, **query) + + def update_quota_set(self, project, **attrs): + """Update a QuotaSet. + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the quota should be reset. + :param attrs: The attributes to update on the QuotaSet represented + by ``quota_set``. + + :returns: The updated QuotaSet + :rtype: :class:`~openstack.block_storage.v2.quota_set.QuotaSet` + """ + if 'project_id' in attrs or isinstance(project, _quota_set.QuotaSet): + warnings.warn( + "The signature of 'update_quota_set' has changed and it " + "now expects a Project as the first argument, in line " + "with the other quota set methods.", + os_warnings.RemovedInSDK50Warning, + ) + # cinder doesn't support any query parameters so we simply pop + # these + if 'query' in attrs: + warnings.warn( + "The query argument is no longer supported and should " + "be removed.", + os_warnings.RemovedInSDK50Warning, + ) + attrs.pop('query') + + res = self._get_resource(_quota_set.QuotaSet, project, **attrs) + return res.commit(self) + else: + project = self._get_resource(_project.Project, project) + attrs['project_id'] = project.id + return self._update(_quota_set.QuotaSet, None, **attrs) + + # ========== Services ========== + @ty.overload + def find_service( + self, + name_or_id: str, + ignore_missing: ty.Literal[True] = True, + **query: ty.Any, + ) -> _service.Service | None: ... + + @ty.overload + def find_service( + self, + name_or_id: str, + ignore_missing: ty.Literal[False], + **query: ty.Any, + ) -> _service.Service: ... + + # excuse the duplication here: it's mypy's fault + # https://github.com/python/mypy/issues/14764 + @ty.overload + def find_service( + self, + name_or_id: str, + ignore_missing: bool, + **query: ty.Any, + ) -> _service.Service | None: ... + + def find_service( + self, + name_or_id: str, + ignore_missing: bool = True, + **query: ty.Any, + ) -> _service.Service | None: + """Find a single service + + :param name_or_id: The name or ID of a service + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param dict query: Additional attributes like 'host' + + :returns: One: class:`~openstack.block_storage.v2.service.Service` or + None + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + return self._find( + _service.Service, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def services( + self, + **query: ty.Any, + ) -> ty.Generator[_service.Service, None, None]: + """Return a generator of service + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + :returns: A generator of Service objects + :rtype: class: `~openstack.block_storage.v2.service.Service` + """ + return self._list(_service.Service, **query) + + def enable_service( + self, + service: str | _service.Service, + ) -> _service.Service: + """Enable a service + + :param service: Either the ID of a service or a + :class:`~openstack.block_storage.v2.service.Service` instance. + + :returns: Updated service instance + :rtype: class: `~openstack.block_storage.v2.service.Service` + """ + service_obj = self._get_resource(_service.Service, service) + return service_obj.enable(self) + + def disable_service( + self, + service: str | _service.Service, + *, + reason: str | None = None, + ) -> _service.Service: + """Disable a service + + :param service: Either the ID of a service or a + :class:`~openstack.block_storage.v2.service.Service` instance + :param str reason: The reason to disable a service + + :returns: Updated service instance + :rtype: class: `~openstack.block_storage.v2.service.Service` + """ + service_obj = self._get_resource(_service.Service, service) + return service_obj.disable(self, reason=reason) + + def thaw_service( + self, + service: str | _service.Service, + ) -> _service.Service: + """Thaw a service + + :param service: Either the ID of a service or a + :class:`~openstack.block_storage.v2.service.Service` instance + + :returns: Updated service instance + :rtype: class: `~openstack.block_storage.v2.service.Service` + """ + service_obj = self._get_resource(_service.Service, service) + return service_obj.thaw(self) + + def freeze_service( + self, + service: str | _service.Service, + ) -> _service.Service: + """Freeze a service + + :param service: Either the ID of a service or a + :class:`~openstack.block_storage.v2.service.Service` instance + + :returns: Updated service instance + :rtype: class: `~openstack.block_storage.v2.service.Service` + """ + service_obj = self._get_resource(_service.Service, service) + return service_obj.freeze(self) + + def failover_service( + self, + service: str | _service.Service, + *, + backend_id: str | None = None, + ) -> _service.Service: + """Failover a service + + Only applies to replicating cinder-volume services. + + :param service: Either the ID of a service or a + :class:`~openstack.block_storage.v2.service.Service` instance + + :returns: Updated service instance + :rtype: class: `~openstack.block_storage.v2.service.Service` + """ + service_obj = self._get_resource(_service.Service, service) + return service_obj.failover(self, backend_id=backend_id) + + # ========== Volume metadata ========== + + def get_volume_metadata(self, volume): + """Return a dictionary of metadata for a volume + + :param volume: Either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume`. + + :returns: A :class:`~openstack.block_storage.v2.volume.Volume` with the + volume's metadata. All keys and values are Unicode text. + :rtype: :class:`~openstack.block_storage.v2.volume.Volume` + """ + volume = self._get_resource(_volume.Volume, volume) + return volume.fetch_metadata(self) + + def set_volume_metadata(self, volume, **metadata): + """Update metadata for a volume + + :param volume: Either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume`. + :param kwargs metadata: Key/value pairs to be updated in the volume's + metadata. No other metadata is modified by this call. All keys + and values are stored as Unicode. + + :returns: A :class:`~openstack.block_storage.v2.volume.Volume` with the + volume's metadata. All keys and values are Unicode text. + :rtype: :class:`~openstack.block_storage.v2.volume.Volume` + """ + volume = self._get_resource(_volume.Volume, volume) + return volume.set_metadata(self, metadata=metadata) + + def delete_volume_metadata(self, volume, keys=None): + """Delete metadata for a volume + + :param volume: Either the ID of a volume or a + :class:`~openstack.block_storage.v2.volume.Volume`. + :param list keys: The keys to delete. If left empty complete + metadata will be removed. + + :rtype: ``None`` + """ + volume = self._get_resource(_volume.Volume, volume) + if keys is not None: + for key in keys: + volume.delete_metadata_item(self, key) + else: + volume.delete_metadata(self) + + # ========== Snapshot metadata ========== + + def get_snapshot_metadata(self, snapshot): + """Return a dictionary of metadata for a snapshot + + :param snapshot: Either the ID of a snapshot or a + :class:`~openstack.block_storage.v2.snapshot.Snapshot`. + + :returns: A + :class:`~openstack.block_storage.v2.snapshot.Snapshot` with the + snapshot's metadata. All keys and values are Unicode text. + :rtype: :class:`~openstack.block_storage.v2.snapshot.Snapshot` + """ + snapshot = self._get_resource(_snapshot.Snapshot, snapshot) + return snapshot.fetch_metadata(self) + + def set_snapshot_metadata(self, snapshot, **metadata): + """Update metadata for a snapshot + + :param snapshot: Either the ID of a snapshot or a + :class:`~openstack.block_storage.v2.snapshot.Snapshot`. + :param kwargs metadata: Key/value pairs to be updated in the snapshot's + metadata. No other metadata is modified by this call. All keys + and values are stored as Unicode. + + :returns: A + :class:`~openstack.block_storage.v2.snapshot.Snapshot` with the + snapshot's metadata. All keys and values are Unicode text. + :rtype: :class:`~openstack.block_storage.v2.snapshot.Snapshot` + """ + snapshot = self._get_resource(_snapshot.Snapshot, snapshot) + return snapshot.set_metadata(self, metadata=metadata) + + def delete_snapshot_metadata(self, snapshot, keys=None): + """Delete metadata for a snapshot + + :param snapshot: Either the ID of a snapshot or a + :class:`~openstack.block_storage.v2.snapshot.Snapshot`. + :param list keys: The keys to delete. If left empty complete + metadata will be removed. + + :rtype: ``None`` + """ + snapshot = self._get_resource(_snapshot.Snapshot, snapshot) + if keys is not None: + for key in keys: + snapshot.delete_metadata_item(self, key) + else: + snapshot.delete_metadata(self) + + # ========== Transfers ========== + + def create_transfer(self, **attrs): + """Create a new Transfer record + + :param volume_id: The value is ID of the volume. + :param name: The value is name of the transfer + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v2.transfer.Transfer` + comprised of the properties on the Transfer class. + :returns: The results of Transfer creation + :rtype: :class:`~openstack.block_storage.v2.transfer.Transfer` + """ + return self._create(_transfer.Transfer, **attrs) + + def delete_transfer(self, transfer, ignore_missing=True): + """Delete a volume transfer + + :param transfer: The value can be either the ID of a transfer or a + :class:`~openstack.block_storage.v2.transfer.Transfer`` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the transfer does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent transfer. + + :returns: ``None`` + """ + self._delete( + _transfer.Transfer, + transfer, + ignore_missing=ignore_missing, + ) + + def find_transfer(self, name_or_id, ignore_missing=True): + """Find a single transfer + + :param name_or_id: The name or ID a transfer + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the volume transfer does not exist. + + :returns: One :class:`~openstack.block_storage.v2.transfer.Transfer` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + return self._find( + _transfer.Transfer, + name_or_id, + ignore_missing=ignore_missing, + ) + + def get_transfer(self, transfer): + """Get a single transfer + + :param transfer: The value can be the ID of a transfer or a + :class:`~openstack.block_storage.v2.transfer.Transfer` + instance. + + :returns: One :class:`~openstack.block_storage.v2.transfer.Transfer` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_transfer.Transfer, transfer) + + def transfers(self, *, details=True, all_projects=False, **query): + """Retrieve a generator of transfers + + :param bool details: When set to ``False`` no extended attributes + will be returned. The default, ``True``, will cause objects with + additional attributes to be returned. + :param bool all_projects: When set to ``True``, list transfers from + all projects. Admin-only by default. + :param kwargs query: Optional query parameters to be sent to limit + the transfers being returned. + + :returns: A generator of transfer objects. + """ + if all_projects: + query['all_projects'] = True + base_path = '/volume-transfers' + if details: + base_path = utils.urljoin(base_path, 'detail') + return self._list(_transfer.Transfer, base_path=base_path, **query) + + def accept_transfer(self, transfer_id, auth_key): + """Accept a Transfer + + :param transfer_id: The value can be the ID of a transfer or a + :class:`~openstack.block_storage.v2.transfer.Transfer` + instance. + :param auth_key: The key to authenticate volume transfer. + + :returns: The results of Transfer creation + :rtype: :class:`~openstack.block_storage.v2.transfer.Transfer` + """ + transfer = self._get_resource(_transfer.Transfer, transfer_id) + return transfer.accept(self, auth_key=auth_key) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str = 'available', + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + if failures is None: + failures = ['error'] + + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/block_storage/v2/backup.py b/openstack/block_storage/v2/backup.py new file mode 100644 index 0000000000..73f30f249e --- /dev/null +++ b/openstack/block_storage/v2/backup.py @@ -0,0 +1,219 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import warnings + +from openstack import exceptions +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +class Backup(resource.Resource): + """Volume Backup""" + + resource_key = "backup" + resources_key = "backups" + base_path = "/backups" + + _query_mapping = resource.QueryParameters( + 'all_tenants', + 'limit', + 'marker', + 'project_id', + 'name', + 'status', + 'volume_id', + 'sort_key', + 'sort_dir', + ) + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_list = True + + #: Properties + #: backup availability zone + availability_zone = resource.Body("availability_zone") + #: The container backup in + container = resource.Body("container") + #: The date and time when the resource was created. + created_at = resource.Body("created_at") + #: data timestamp + #: The time when the data on the volume was first saved. + #: If it is a backup from volume, it will be the same as created_at + #: for a backup. If it is a backup from a snapshot, + #: it will be the same as created_at for the snapshot. + data_timestamp = resource.Body('data_timestamp') + #: backup description + description = resource.Body("description") + #: Backup fail reason + fail_reason = resource.Body("fail_reason") + #: Force backup + force = resource.Body("force", type=bool) + #: has_dependent_backups + #: If this value is true, there are other backups depending on this backup. + has_dependent_backups = resource.Body('has_dependent_backups', type=bool) + #: Indicates whether the backup mode is incremental. + #: If this value is true, the backup mode is incremental. + #: If this value is false, the backup mode is full. + is_incremental = resource.Body("is_incremental", type=bool) + #: A list of links associated with this volume. *Type: list* + links = resource.Body("links", type=list) + #: backup name + name = resource.Body("name") + #: backup object count + object_count = resource.Body("object_count", type=int) + #: The size of the volume, in gibibytes (GiB). + size = resource.Body("size", type=int) + #: The UUID of the source volume snapshot. + snapshot_id = resource.Body("snapshot_id") + #: backup status + #: values: creating, available, deleting, error, restoring, error_restoring + status = resource.Body("status") + #: The date and time when the resource was updated. + updated_at = resource.Body("updated_at") + #: The UUID of the volume. + volume_id = resource.Body("volume_id") + #: The name of the volume. + volume_name = resource.Body("volume_name") + + def create(self, session, prepend_key=True, base_path=None, **params): + """Create a remote resource based on this instance. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param prepend_key: A boolean indicating whether the resource_key + should be prepended in a resource creation + request. Default to True. + :param str base_path: Base part of the URI for creating resources, if + different from + :data:`~openstack.resource.Resource.base_path`. + :param dict params: Additional params to pass. + :return: This :class:`Resource` instance. + :raises: :exc:`~openstack.exceptions.MethodNotSupported` if + :data:`Resource.allow_create` is not set to ``True``. + """ + if not self.allow_create: + raise exceptions.MethodNotSupported(self, "create") + + session = self._get_session(session) + requires_id = ( + self.create_requires_id + if self.create_requires_id is not None + else self.create_method == 'PUT' + ) + + if self.create_exclude_id_from_body: + self._body._dirty.discard("id") + + if self.create_method == 'POST': + request = self._prepare_request( + requires_id=requires_id, + prepend_key=prepend_key, + base_path=base_path, + ) + # NOTE(gtema) this is a funny example of when attribute + # is called "incremental" on create, "is_incremental" on get + # and use of "alias" or "aka" is not working for such conflict, + # since our preferred attr name is exactly "is_incremental" + body = request.body + if 'is_incremental' in body['backup']: + body['backup']['incremental'] = body['backup'].pop( + 'is_incremental' + ) + response = session.post( + request.url, + json=request.body, + headers=request.headers, + params=params, + ) + else: + # Just for safety of the implementation (since PUT removed) + raise exceptions.ResourceFailure( + f"Invalid create method: {self.create_method}" + ) + + has_body = ( + self.has_body + if self.create_returns_body is None + else self.create_returns_body + ) + self._translate_response(response, has_body=has_body) + # direct comparision to False since we need to rule out None + if self.has_body and self.create_returns_body is False: + # fetch the body if it's required but not returned by create + return self.fetch(session) + return self + + def _action(self, session, body): + """Preform backup actions given the message body.""" + url = utils.urljoin(self.base_path, self.id, 'action') + resp = session.post(url, json=body) + exceptions.raise_from_response(resp) + return resp + + def export(self, session): + """Export the current backup + + :param session: openstack session + :return: The backup export record fields + """ + url = utils.urljoin(self.base_path, self.id, "export_record") + resp = session.get(url) + exceptions.raise_from_response(resp) + return resp.json() + + def restore(self, session, volume_id=None, name=None): + """Restore current backup to volume + + :param session: openstack session + :param volume_id: The ID of the volume to restore the backup to. + :param name: The name for new volume creation to restore. + :return: Updated backup instance + """ + url = utils.urljoin(self.base_path, self.id, "restore") + body: dict[str, dict] = {'restore': {}} + if volume_id: + body['restore']['volume_id'] = volume_id + if name: + body['restore']['name'] = name + if not (volume_id or name): + raise exceptions.SDKException( + 'Either of `name` or `volume_id` must be specified.' + ) + response = session.post(url, json=body) + self._translate_response(response, resource_response_key='restore') + return self + + def force_delete(self, session): + """Force backup deletion""" + body = {'os-force_delete': None} + self._action(session, body) + + def reset_status(self, session, status): + """Reset the status of the backup""" + body = {'os-reset_status': {'status': status}} + self._action(session, body) + + def reset(self, session, status): + warnings.warn( + "reset is a deprecated alias for reset_status and will be " + "removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + self.reset_status(session, status) + + +BackupDetail = Backup diff --git a/openstack/block_storage/v2/capabilities.py b/openstack/block_storage/v2/capabilities.py new file mode 100644 index 0000000000..03d958d8b5 --- /dev/null +++ b/openstack/block_storage/v2/capabilities.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Capabilities(resource.Resource): + base_path = "/capabilities" + + # Capabilities + allow_fetch = True + + #: Properties + #: The capabilities description + description = resource.Body("description") + #: The name of volume backend capabilities. + display_name = resource.Body("display_name") + #: The driver version. + driver_version = resource.Body("driver_version") + #: The storage namespace, such as OS::Storage::Capabilities::foo. + namespace = resource.Body("namespace") + #: The name of the storage pool. + pool_name = resource.Body("pool_name") + #: The backend volume capabilites list, which consists of cinder + #: standard capabilities and vendor unique properties. + properties = resource.Body("properties", type=dict) + #: A list of volume backends used to replicate volumes on this backend. + replication_targets = resource.Body("replication_targets", type=list) + #: The storage backend for the backend volume. + storage_protocol = resource.Body("storage_protocol") + #: The name of the vendor. + vendor_name = resource.Body("vendor_name") + #: The volume type access. + visibility = resource.Body("visibility") + #: The name of the back-end volume. + volume_backend_name = resource.Body("volume_backend_name") diff --git a/openstack/block_storage/v2/extension.py b/openstack/block_storage/v2/extension.py new file mode 100644 index 0000000000..98a3093fcd --- /dev/null +++ b/openstack/block_storage/v2/extension.py @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Extension(resource.Resource): + resources_key = "extensions" + base_path = "/extensions" + + # Capabilities + allow_list = True + + #: Properties + #: The alias for the extension. + alias = resource.Body('alias', type=str) + #: The extension description. + description = resource.Body('description', type=str) + #: Links pertaining to this extension. + links = resource.Body('links', type=list) + #: The name of this extension. + name = resource.Body('name') + #: A URL pointing to the namespace for this extension. + namespace = resource.Body('namespace') + #: The date and time when the resource was updated. + #: The date and time stamp format is ISO 8601. + updated_at = resource.Body('updated', type=str) diff --git a/openstack/block_storage/v2/limits.py b/openstack/block_storage/v2/limits.py new file mode 100644 index 0000000000..486a97d0d5 --- /dev/null +++ b/openstack/block_storage/v2/limits.py @@ -0,0 +1,86 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class AbsoluteLimit(resource.Resource): + #: Properties + #: The maximum total amount of backups, in gibibytes (GiB). + max_total_backup_gigabytes = resource.Body( + "maxTotalBackupGigabytes", type=int + ) + #: The maximum number of backups. + max_total_backups = resource.Body("maxTotalBackups", type=int) + #: The maximum number of snapshots. + max_total_snapshots = resource.Body("maxTotalSnapshots", type=int) + #: The maximum total amount of volumes, in gibibytes (GiB). + max_total_volume_gigabytes = resource.Body( + "maxTotalVolumeGigabytes", type=int + ) + #: The maximum number of volumes. + max_total_volumes = resource.Body("maxTotalVolumes", type=int) + #: The total number of backups gibibytes (GiB) used. + total_backup_gigabytes_used = resource.Body( + "totalBackupGigabytesUsed", type=int + ) + #: The total number of backups used. + total_backups_used = resource.Body("totalBackupsUsed", type=int) + #: The total number of gibibytes (GiB) used. + total_gigabytes_used = resource.Body("totalGigabytesUsed", type=int) + #: The total number of snapshots used. + total_snapshots_used = resource.Body("totalSnapshotsUsed", type=int) + #: The total number of volumes used. + total_volumes_used = resource.Body("totalVolumesUsed", type=int) + + +class RateLimit(resource.Resource): + #: Properties + #: Rate limits next availabe time. + next_available = resource.Body("next-available") + #: Integer for rate limits remaining. + remaining = resource.Body("remaining", type=int) + #: Unit of measurement for the value parameter. + unit = resource.Body("unit") + #: Integer number of requests which can be made. + value = resource.Body("value", type=int) + #: An HTTP verb (POST, PUT, etc.). + verb = resource.Body("verb") + + +class RateLimits(resource.Resource): + #: Properties + #: A list of the specific limits that apply to the ``regex`` and ``uri``. + limits = resource.Body("limit", type=list, list_type=RateLimit) + #: A regex representing which routes this rate limit applies to. + regex = resource.Body("regex") + #: A URI representing which routes this rate limit applies to. + uri = resource.Body("uri") + + +class Limits(resource.Resource): + resource_key = "limits" + base_path = "/limits" + + # capabilities + allow_fetch = True + + #: Properties + #: An absolute limits object. + absolute = resource.Body("absolute", type=AbsoluteLimit) + #: Rate-limit volume copy bandwidth, used to mitigate + #: slow down of data access from the instances. + rate = resource.Body("rate", type=list, list_type=RateLimits) + + +# legacy alias +Limit = Limits diff --git a/openstack/block_storage/v2/quota_class_set.py b/openstack/block_storage/v2/quota_class_set.py new file mode 100644 index 0000000000..8cf62d2721 --- /dev/null +++ b/openstack/block_storage/v2/quota_class_set.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class QuotaClassSet(resource.Resource): + resource_key = 'quota_class_set' + base_path = '/os-quota-class-sets' + + # Capabilities + allow_fetch = True + allow_commit = True + + # Properties + #: The size (GiB) of backups that are allowed for each project. + backup_gigabytes = resource.Body('backup_gigabytes', type=int) + #: The number of backups that are allowed for each project. + backups = resource.Body('backups', type=int) + #: The size (GiB) of volumes and snapshots that are allowed for each + #: project. + gigabytes = resource.Body('gigabytes', type=int) + #: The number of groups that are allowed for each project. + groups = resource.Body('groups', type=int) + #: The size (GiB) of volumes in request that are allowed for each volume. + per_volume_gigabytes = resource.Body('per_volume_gigabytes', type=int) + #: The number of snapshots that are allowed for each project. + snapshots = resource.Body('snapshots', type=int) + #: The number of volumes that are allowed for each project. + volumes = resource.Body('volumes', type=int) diff --git a/openstack/block_storage/v2/quota_set.py b/openstack/block_storage/v2/quota_set.py new file mode 100644 index 0000000000..568e58c406 --- /dev/null +++ b/openstack/block_storage/v2/quota_set.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.common import quota_set +from openstack import resource + + +class QuotaSet(quota_set.QuotaSet): + #: Properties + #: The size (GB) of backups that are allowed for each project. + backup_gigabytes = resource.Body('backup_gigabytes', type=int) + #: The number of backups that are allowed for each project. + backups = resource.Body('backups', type=int) + #: The size (GB) of volumes and snapshots that are allowed for each + #: project. + gigabytes = resource.Body('gigabytes', type=int) + #: The number of groups that are allowed for each project. + groups = resource.Body('groups', type=int) + #: The size (GB) of volumes in request that are allowed for each volume. + per_volume_gigabytes = resource.Body('per_volume_gigabytes', type=int) + #: The number of snapshots that are allowed for each project. + snapshots = resource.Body('snapshots', type=int) + #: The number of volumes that are allowed for each project. + volumes = resource.Body('volumes', type=int) diff --git a/openstack/block_storage/v2/service.py b/openstack/block_storage/v2/service.py new file mode 100644 index 0000000000..0f8c46c9b2 --- /dev/null +++ b/openstack/block_storage/v2/service.py @@ -0,0 +1,208 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +from keystoneauth1 import adapter +import typing_extensions as ty_ext + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Service(resource.Resource): + resources_key = 'services' + base_path = '/os-services' + + # capabilities + allow_list = True + + _query_mapping = resource.QueryParameters( + 'binary', + 'host', + ) + + # Properties + #: The ID of active storage backend (cinder-volume services only) + active_backend_id = resource.Body('active_backend_id') + #: The availability zone of service + availability_zone = resource.Body('zone') + #: Binary name of service + binary = resource.Body('binary') + #: Disabled reason of service + disabled_reason = resource.Body('disabled_reason') + #: The name of the host where service runs + host = resource.Body('host') + # Whether the host is frozen or not (cinder-volume services only) + is_frozen = resource.Body('frozen') + #: Service name + name = resource.Body('name', alias='binary') + #: The volume service replication status (cinder-volume services only) + replication_status = resource.Body('replication_status') + #: State of service + state = resource.Body('state') + #: Status of service + status = resource.Body('status') + #: The date and time when the resource was updated + updated_at = resource.Body('updated_at') + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[True] = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[False], + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self: ... + + # excuse the duplication here: it's mypy's fault + # https://github.com/python/mypy/issues/14764 + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: + # No direct request possible, thus go directly to list + if list_base_path: + params['base_path'] = list_base_path + + # all_projects is a special case that is used by multiple services. We + # handle it here since it doesn't make sense to pass it to the .fetch + # call above + if all_projects is not None: + params['all_projects'] = all_projects + + data = cls.list(session, **params) + + result = None + for maybe_result in data: + # Since ID might be both int and str force cast + id_value = str(cls._get_id(maybe_result)) + name_value = maybe_result.name + + if str(name_or_id) in (id_value, name_value): + if 'host' in params and maybe_result['host'] != params['host']: + continue + # Only allow one resource to be found. If we already + # found a match, raise an exception to show it. + if result is None: + result = maybe_result + else: + msg = "More than one %s exists with the name '%s'." + msg = msg % (cls.__name__, name_or_id) + raise exceptions.DuplicateResource(msg) + + if result is not None: + return result + + if ignore_missing: + return None + raise exceptions.NotFoundException( + f"No {cls.__name__} found for {name_or_id}" + ) + + def commit(self, session, prepend_key=False, *args, **kwargs): + # we need to set prepend_key to false + return super().commit( + session, + prepend_key, + *args, + **kwargs, + ) + + def _action(self, session, action, body): + url = utils.urljoin(Service.base_path, action) + response = session.put(url, json=body) + self._translate_response(response) + return self + + def enable(self, session): + """Enable service.""" + body = {'binary': self.binary, 'host': self.host} + return self._action(session, 'enable', body) + + def disable(self, session, *, reason=None): + """Disable service.""" + body = {'binary': self.binary, 'host': self.host} + + if not reason: + action = 'disable' + else: + action = 'disable-log-reason' + body['disabled_reason'] = reason + + return self._action(session, action, body) + + def thaw(self, session): + body = {'host': self.host} + return self._action(session, 'thaw', body) + + def freeze(self, session): + body = {'host': self.host} + return self._action(session, 'freeze', body) + + def failover( + self, + session, + *, + backend_id=None, + ): + """Failover a service + + Only applies to replicating cinder-volume services. + """ + body = {'host': self.host} + if backend_id: + body['backend_id'] = backend_id + + return self._action(session, 'failover_host', body) diff --git a/openstack/block_storage/v2/snapshot.py b/openstack/block_storage/v2/snapshot.py new file mode 100644 index 0000000000..748bf96df5 --- /dev/null +++ b/openstack/block_storage/v2/snapshot.py @@ -0,0 +1,110 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import warnings + +from openstack.common import metadata +from openstack import exceptions +from openstack import format +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +class Snapshot(resource.Resource, metadata.MetadataMixin): + resource_key = "snapshot" + resources_key = "snapshots" + base_path = "/snapshots" + + _query_mapping = resource.QueryParameters( + 'name', 'status', 'volume_id', all_projects='all_tenants' + ) + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_commit = True + allow_list = True + + # Properties + #: The timestamp of this snapshot creation. + created_at = resource.Body("created_at") + #: Description of snapshot. Default is None. + description = resource.Body("description") + #: Indicate whether to create snapshot, even if the volume is attached. + #: Default is ``False``. *Type: bool* + is_forced = resource.Body("force", type=format.BoolStr) + #: The size of the volume, in gibibytes (GiB). + size = resource.Body("size", type=int) + #: The current status of this snapshot. Potential values are creating, + #: available, deleting, error, and error_deleting. + status = resource.Body("status") + #: The date and time when the resource was updated. + updated_at = resource.Body("updated_at") + #: The ID of the volume this snapshot was taken of. + volume_id = resource.Body("volume_id") + + def _action(self, session, body): + """Preform backup actions given the message body.""" + url = utils.urljoin(self.base_path, self.id, 'action') + resp = session.post(url, json=body) + exceptions.raise_from_response(resp) + return resp + + def reset_status(self, session, status): + """Reset the status of the snapshot.""" + body = {'os-reset_status': {'status': status}} + self._action(session, body) + + def reset(self, session, status): + warnings.warn( + "reset is a deprecated alias for reset_status and will be " + "removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + self.reset_status(session, status) + + @classmethod + def manage( + cls, + session, + volume_id, + ref, + name=None, + description=None, + metadata=None, + ): + """Manage a snapshot under block storage provisioning.""" + url = '/os-snapshot-manage' + body = { + 'snapshot': { + 'volume_id': volume_id, + 'ref': ref, + 'name': name, + 'description': description, + 'metadata': metadata, + } + } + resp = session.post(url, json=body) + exceptions.raise_from_response(resp) + snapshot = Snapshot() + snapshot._translate_response(resp) + return snapshot + + def unmanage(self, session): + """Unmanage a snapshot from block storage provisioning.""" + body = {'os-unmanage': None} + self._action(session, body) + + +SnapshotDetail = Snapshot diff --git a/openstack/block_storage/v2/stats.py b/openstack/block_storage/v2/stats.py new file mode 100644 index 0000000000..8f9a442244 --- /dev/null +++ b/openstack/block_storage/v2/stats.py @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Pools(resource.Resource): + resource_key = "" + resources_key = "pools" + base_path = "/scheduler-stats/get_pools?detail=True" + + # capabilities + allow_fetch = False + allow_create = False + allow_delete = False + allow_list = True + + # Properties + #: The Cinder name for the pool + name = resource.Body("name") + #: returns a dict with information about the pool + capabilities = resource.Body("capabilities", type=dict) diff --git a/openstack/block_storage/v2/transfer.py b/openstack/block_storage/v2/transfer.py new file mode 100644 index 0000000000..d017a6643f --- /dev/null +++ b/openstack/block_storage/v2/transfer.py @@ -0,0 +1,59 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Transfer(resource.Resource): + resource_key = "transfer" + resources_key = "transfers" + base_path = "/os-volume-transfer" + + # capabilities + allow_create = True + allow_delete = True + allow_fetch = True + allow_list = True + + # Properties + #: UUID of the transfer. + id = resource.Body("id") + #: The date and time when the resource was created. + created_at = resource.Body("created_at") + #: Name of the volume to transfer. + name = resource.Body("name") + #: ID of the volume to transfer. + volume_id = resource.Body("volume_id") + #: Auth key for the transfer. + auth_key = resource.Body("auth_key") + #: A list of links associated with this volume. *Type: list* + links = resource.Body("links") + + def accept(self, session, *, auth_key=None): + """Accept a volume transfer. + + :param session: The session to use for making this request. + :param auth_key: The authentication key for the volume transfer. + + :return: This :class:`Transfer` instance. + """ + body = {'accept': {'auth_key': auth_key}} + + url = utils.urljoin(self.base_path, self.id, 'accept') + resp = session.post(url, json=body) + exceptions.raise_from_response(resp) + + transfer = Transfer() + transfer._translate_response(resp) + return transfer diff --git a/openstack/block_storage/v2/type.py b/openstack/block_storage/v2/type.py new file mode 100644 index 0000000000..4b55d7fe7f --- /dev/null +++ b/openstack/block_storage/v2/type.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Type(resource.Resource): + resource_key = "volume_type" + resources_key = "volume_types" + base_path = "/types" + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters("is_public") + + # Properties + #: A dict of extra specifications. "capabilities" is a usual key. + extra_specs = resource.Body("extra_specs", type=dict) + #: a private volume-type. *Type: bool* + is_public = resource.Body('os-volume-type-access:is_public', type=bool) + + def get_private_access(self, session): + """List projects with private access to the volume type. + + :param session: The session to use for making this request. + :returns: The volume type access response. + """ + url = utils.urljoin(self.base_path, self.id, "os-volume-type-access") + resp = session.get(url) + + exceptions.raise_from_response(resp) + + return resp.json().get("volume_type_access", []) + + def add_private_access(self, session, project_id): + """Add project access from the volume type. + + :param session: The session to use for making this request. + :param project_id: The project to add access for. + """ + url = utils.urljoin(self.base_path, self.id, "action") + body = {"addProjectAccess": {"project": project_id}} + + resp = session.post(url, json=body) + + exceptions.raise_from_response(resp) + + def remove_private_access(self, session, project_id): + """Remove project access from the volume type. + + :param session: The session to use for making this request. + :param project_id: The project to remove access for. + """ + url = utils.urljoin(self.base_path, self.id, "action") + body = {"removeProjectAccess": {"project": project_id}} + + resp = session.post(url, json=body) + + exceptions.raise_from_response(resp) diff --git a/openstack/block_storage/v2/volume.py b/openstack/block_storage/v2/volume.py new file mode 100644 index 0000000000..0eb8808f72 --- /dev/null +++ b/openstack/block_storage/v2/volume.py @@ -0,0 +1,219 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.common import metadata +from openstack import format +from openstack import resource +from openstack import utils + + +class Volume(resource.Resource, metadata.MetadataMixin): + resource_key = "volume" + resources_key = "volumes" + base_path = "/volumes" + + _query_mapping = resource.QueryParameters( + 'name', 'status', 'project_id', all_projects='all_tenants' + ) + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_commit = True + allow_list = True + + # Properties + #: TODO(briancurtin): This is currently undocumented in the API. + attachments = resource.Body("attachments") + #: The availability zone. + availability_zone = resource.Body("availability_zone") + #: ID of the consistency group. + consistency_group_id = resource.Body("consistencygroup_id") + #: The timestamp of this volume creation. + created_at = resource.Body("created_at") + #: The volume description. + description = resource.Body("description") + #: Extended replication status on this volume. + extended_replication_status = resource.Body( + "os-volume-replication:extended_status" + ) + #: The volume's current back-end. + host = resource.Body("os-vol-host-attr:host") + #: The ID of the image from which you want to create the volume. + #: Required to create a bootable volume. + image_id = resource.Body("imageRef") + #: Enables or disables the bootable attribute. You can boot an + #: instance from a bootable volume. *Type: bool* + is_bootable = resource.Body("bootable", type=format.BoolStr) + #: ``True`` if this volume is encrypted, ``False`` if not. + #: *Type: bool* + is_encrypted = resource.Body("encrypted", type=format.BoolStr) + #: Whether volume will be sharable or not. + is_multiattach = resource.Body("multiattach", type=format.BoolStr) + #: The volume ID that this volume's name on the back-end is based on. + migration_id = resource.Body("os-vol-mig-status-attr:name_id") + #: The status of this volume's migration (None means that a migration + #: is not currently in progress). + migration_status = resource.Body("os-vol-mig-status-attr:migstat") + #: The project ID associated with current back-end. + project_id = resource.Body("os-vol-tenant-attr:tenant_id") + #: Data set by the replication driver + replication_driver_data = resource.Body( + "os-volume-replication:driver_data" + ) + #: Status of replication on this volume. + replication_status = resource.Body("replication_status") + #: Scheduler hints for the volume + scheduler_hints = resource.Body('OS-SCH-HNT:scheduler_hints', type=dict) + #: The size of the volume, in gibibytes (GiB). *Type: int* + size = resource.Body("size", type=int) + #: To create a volume from an existing snapshot, specify the ID of + #: the existing volume snapshot. If specified, the volume is created + #: in same availability zone and with same size of the snapshot. + snapshot_id = resource.Body("snapshot_id") + #: To create a volume from an existing volume, specify the ID of + #: the existing volume. If specified, the volume is created with + #: same size of the source volume. + source_volume_id = resource.Body("source_volid") + #: One of the following values: creating, available, attaching, in-use + #: deleting, error, error_deleting, backing-up, restoring-backup, + #: error_restoring. For details on these statuses, see the + #: Block Storage API documentation. + status = resource.Body("status") + #: The date and time when the resource was updated. + updated_at = resource.Body("updated_at") + #: The user ID associated with the volume + user_id = resource.Body("user_id") + #: One or more metadata key and value pairs about image + volume_image_metadata = resource.Body("volume_image_metadata") + #: The name of the associated volume type. + volume_type = resource.Body("volume_type") + + def _action(self, session, body): + """Preform volume actions given the message body.""" + # NOTE: This is using Volume.base_path instead of self.base_path + # as both Volume and VolumeDetail instances can be acted on, but + # the URL used is sans any additional /detail/ part. + url = utils.urljoin(Volume.base_path, self.id, 'action') + return session.post(url, json=body) + + def extend(self, session, size): + """Extend a volume size.""" + body = {'os-extend': {'new_size': size}} + self._action(session, body) + + def set_bootable_status(self, session, bootable=True): + """Set volume bootable status flag""" + body = {'os-set_bootable': {'bootable': bootable}} + self._action(session, body) + + def set_readonly(self, session, readonly): + """Set volume readonly flag""" + body = {'os-update_readonly_flag': {'readonly': readonly}} + self._action(session, body) + + def set_image_metadata(self, session, metadata): + """Sets image metadata key-value pairs on the volume""" + body = {'os-set_image_metadata': {'metadata': metadata}} + self._action(session, body) + + def delete_image_metadata(self, session): + """Remove all image metadata from the volume""" + for key in self.metadata: + body = {'os-unset_image_metadata': key} + self._action(session, body) + + def delete_image_metadata_item(self, session, key): + """Remove a single image metadata from the volume""" + body = {'os-unset_image_metadata': key} + self._action(session, body) + + def reset_status( + self, session, status=None, attach_status=None, migration_status=None + ): + """Reset volume statuses (admin operation)""" + body: dict[str, dict[str, str]] = {'os-reset_status': {}} + if status: + body['os-reset_status']['status'] = status + if attach_status: + body['os-reset_status']['attach_status'] = attach_status + if migration_status: + body['os-reset_status']['migration_status'] = migration_status + self._action(session, body) + + def attach(self, session, mountpoint, instance): + """Attach volume to server""" + body = { + 'os-attach': {'mountpoint': mountpoint, 'instance_uuid': instance} + } + + self._action(session, body) + + def detach(self, session, attachment, force=False): + """Detach volume from server""" + if not force: + body = {'os-detach': {'attachment_id': attachment}} + if force: + body = {'os-force_detach': {'attachment_id': attachment}} + + self._action(session, body) + + def unmanage(self, session): + """Unmanage volume""" + body = {'os-unmanage': None} + + self._action(session, body) + + def retype(self, session, new_type, migration_policy=None): + """Change volume type""" + body = {'os-retype': {'new_type': new_type}} + if migration_policy: + body['os-retype']['migration_policy'] = migration_policy + + self._action(session, body) + + def migrate( + self, session, host=None, force_host_copy=False, lock_volume=False + ): + """Migrate volume""" + req = dict() + if host is not None: + req['host'] = host + if force_host_copy: + req['force_host_copy'] = force_host_copy + if lock_volume: + req['lock_volume'] = lock_volume + body = {'os-migrate_volume': req} + + self._action(session, body) + + def complete_migration(self, session, new_volume_id, error=False): + """Complete volume migration""" + body = { + 'os-migrate_volume_completion': { + 'new_volume': new_volume_id, + 'error': error, + } + } + + self._action(session, body) + + def force_delete(self, session): + """Force volume deletion""" + body = {'os-force_delete': None} + + self._action(session, body) + + +VolumeDetail = Volume diff --git a/openstack/telemetry/alarm/__init__.py b/openstack/block_storage/v3/__init__.py similarity index 100% rename from openstack/telemetry/alarm/__init__.py rename to openstack/block_storage/v3/__init__.py diff --git a/openstack/block_storage/v3/_proxy.py b/openstack/block_storage/v3/_proxy.py new file mode 100644 index 0000000000..f97e76fc3e --- /dev/null +++ b/openstack/block_storage/v3/_proxy.py @@ -0,0 +1,2630 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty +import warnings + +from openstack.block_storage.v3 import attachment as _attachment +from openstack.block_storage.v3 import availability_zone +from openstack.block_storage.v3 import backup as _backup +from openstack.block_storage.v3 import block_storage_summary as _summary +from openstack.block_storage.v3 import capabilities as _capabilities +from openstack.block_storage.v3 import default_type as _default_type +from openstack.block_storage.v3 import extension as _extension +from openstack.block_storage.v3 import group as _group +from openstack.block_storage.v3 import group_snapshot as _group_snapshot +from openstack.block_storage.v3 import group_type as _group_type +from openstack.block_storage.v3 import limits as _limits +from openstack.block_storage.v3 import quota_class_set as _quota_class_set +from openstack.block_storage.v3 import quota_set as _quota_set +from openstack.block_storage.v3 import resource_filter as _resource_filter +from openstack.block_storage.v3 import service as _service +from openstack.block_storage.v3 import snapshot as _snapshot +from openstack.block_storage.v3 import stats as _stats +from openstack.block_storage.v3 import transfer as _transfer +from openstack.block_storage.v3 import type as _type +from openstack.block_storage.v3 import volume as _volume +from openstack import exceptions +from openstack.identity.v3 import project as _project +from openstack import proxy +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['3']] = '3' + + _resource_registry = { + "availability_zone": availability_zone.AvailabilityZone, + "attachment": _attachment.Attachment, + "backup": _backup.Backup, + "capabilities": _capabilities.Capabilities, + "extension": _extension.Extension, + "group": _group.Group, + "group_snapshot": _group_snapshot.GroupSnapshot, + "group_type": _group_type.GroupType, + "limits": _limits.Limits, + "quota_set": _quota_set.QuotaSet, + "resource_filter": _resource_filter.ResourceFilter, + "snapshot": _snapshot.Snapshot, + "stats_pools": _stats.Pools, + "summary": _summary.BlockStorageSummary, + "transfer": _transfer.Transfer, + "type": _type.Type, + "volume": _volume.Volume, + } + + # ====== IMAGES ====== + # TODO(stephenfin): Convert to use resources/proxy rather than direct calls + def create_image( + self, + name, + volume, + allow_duplicates, + container_format, + disk_format, + wait, + timeout, + ): + if not disk_format: + disk_format = self._connection.config.config['image_format'] + if not container_format: + # https://docs.openstack.org/image-guide/image-formats.html + container_format = 'bare' + + if 'id' in volume: + volume_id = volume['id'] + else: + volume_obj = self.get_volume(volume) + if not volume_obj: + raise exceptions.SDKException( + f"Volume {volume} given to create_image could not be found" + ) + volume_id = volume_obj['id'] + data = self.post( + f'/volumes/{volume_id}/action', + json={ + 'os-volume_upload_image': { + 'force': allow_duplicates, + 'image_name': name, + 'container_format': container_format, + 'disk_format': disk_format, + } + }, + ) + response = self._connection._get_and_munchify( + 'os-volume_upload_image', data + ) + return self._connection.image._existing_image(id=response['image_id']) + + # ====== SNAPSHOTS ====== + def get_snapshot(self, snapshot): + """Get a single snapshot + + :param snapshot: The value can be the ID of a snapshot or a + :class:`~openstack.block_storage.v3.snapshot.Snapshot` + instance. + + :returns: One :class:`~openstack.block_storage.v3.snapshot.Snapshot` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_snapshot.Snapshot, snapshot) + + def find_snapshot( + self, + name_or_id, + ignore_missing=True, + *, + details=True, + all_projects=False, + ): + """Find a single snapshot + + :param snapshot: The name or ID a snapshot + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the snapshot does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param bool details: When set to ``False`` :class: + `~openstack.block_storage.v3.snapshot.Snapshot` objects will be + returned. The default, ``True``, will cause more attributes to be + returned. + :param bool all_projects: When set to ``True``, search for snapshot by + name across all projects. Note that this will likely result in + a higher chance of duplicates. Admin-only by default. + + :returns: One :class:`~openstack.block_storage.v3.snapshot.Snapshot` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + query = {} + if all_projects: + query['all_projects'] = True + list_base_path = '/snapshots/detail' if details else None + return self._find( + _snapshot.Snapshot, + name_or_id, + ignore_missing=ignore_missing, + list_base_path=list_base_path, + **query, + ) + + def snapshots(self, *, details=True, all_projects=False, **query): + """Retrieve a generator of snapshots + + :param bool details: When set to ``False`` :class: + `~openstack.block_storage.v3.snapshot.Snapshot` + objects will be returned. The default, ``True``, will cause + more attributes to be returned. + :param bool all_projects: When set to ``True``, list snapshots from all + projects. Admin-only by default. + :param kwargs query: Optional query parameters to be sent to limit + the snapshots being returned. Available parameters include: + + * name: Name of the snapshot as a string. + * project_id: Filter the snapshots by project. + * volume_id: volume id of a snapshot. + * status: Value of the status of the snapshot so that you can + filter on "available" for example. + + :returns: A generator of snapshot objects. + """ + if all_projects: + query['all_projects'] = True + base_path = '/snapshots/detail' if details else None + return self._list(_snapshot.Snapshot, base_path=base_path, **query) + + def create_snapshot(self, **attrs): + """Create a new snapshot from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v3.snapshot.Snapshot`, + comprised of the properties on the Snapshot class. + + :returns: The results of snapshot creation + :rtype: :class:`~openstack.block_storage.v3.snapshot.Snapshot` + """ + return self._create(_snapshot.Snapshot, **attrs) + + def update_snapshot(self, snapshot, **attrs): + """Update a snapshot + + :param snapshot: Either the ID of a snapshot or a + :class:`~openstack.block_storage.v3.snapshot.Snapshot` instance. + :param dict attrs: The attributes to update on the snapshot. + + :returns: The updated snapshot + :rtype: :class:`~openstack.block_storage.v3.snapshot.Snapshot` + """ + return self._update(_snapshot.Snapshot, snapshot, **attrs) + + def delete_snapshot(self, snapshot, ignore_missing=True, force=False): + """Delete a snapshot + + :param snapshot: The value can be either the ID of a snapshot or a + :class:`~openstack.block_storage.v3.snapshot.Snapshot` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the snapshot does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent snapshot. + :param bool force: Whether to try forcing snapshot deletion. + + :returns: ``None`` + """ + if not force: + self._delete( + _snapshot.Snapshot, snapshot, ignore_missing=ignore_missing + ) + else: + snapshot = self._get_resource(_snapshot.Snapshot, snapshot) + snapshot.force_delete(self) + + def get_snapshot_metadata(self, snapshot): + """Return a dictionary of metadata for a snapshot + + :param snapshot: Either the ID of a snapshot or a + :class:`~openstack.block_storage.v3.snapshot.Snapshot`. + + :returns: A + :class:`~openstack.block_storage.v3.snapshot.Snapshot` with the + snapshot's metadata. All keys and values are Unicode text. + :rtype: :class:`~openstack.block_storage.v3.snapshot.Snapshot` + """ + snapshot = self._get_resource(_snapshot.Snapshot, snapshot) + return snapshot.fetch_metadata(self) + + def set_snapshot_metadata(self, snapshot, **metadata): + """Update metadata for a snapshot + + :param snapshot: Either the ID of a snapshot or a + :class:`~openstack.block_storage.v3.snapshot.Snapshot`. + :param kwargs metadata: Key/value pairs to be updated in the snapshot's + metadata. No other metadata is modified by this call. All keys + and values are stored as Unicode. + + :returns: A + :class:`~openstack.block_storage.v3.snapshot.Snapshot` with the + snapshot's metadata. All keys and values are Unicode text. + :rtype: :class:`~openstack.block_storage.v3.snapshot.Snapshot` + """ + snapshot = self._get_resource(_snapshot.Snapshot, snapshot) + return snapshot.set_metadata(self, metadata=metadata) + + def delete_snapshot_metadata(self, snapshot, keys=None): + """Delete metadata for a snapshot + + :param snapshot: Either the ID of a snapshot or a + :class:`~openstack.block_storage.v3.snapshot.Snapshot`. + :param list keys: The keys to delete. If left empty complete + metadata will be removed. + + :rtype: ``None`` + """ + snapshot = self._get_resource(_snapshot.Snapshot, snapshot) + if keys is not None: + for key in keys: + snapshot.delete_metadata_item(self, key) + else: + snapshot.delete_metadata(self) + + # ====== SNAPSHOT ACTIONS ====== + def reset_snapshot_status(self, snapshot, status): + """Reset status of the snapshot + + :param snapshot: The value can be either the ID of a backup or a + :class:`~openstack.block_storage.v3.snapshot.Snapshot` instance. + :param str status: New snapshot status + + :returns: None + """ + snapshot = self._get_resource(_snapshot.Snapshot, snapshot) + snapshot.reset_status(self, status) + + def reset_snapshot(self, snapshot, status): + warnings.warn( + "reset_snapshot is a deprecated alias for reset_snapshot_status " + "and will be removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + return self.reset_snapshot_status(snapshot, status) + + def set_snapshot_status(self, snapshot, status, progress=None): + """Update fields related to the status of a snapshot. + + :param snapshot: The value can be either the ID of a backup or a + :class:`~openstack.block_storage.v3.snapshot.Snapshot` instance. + :param str status: New snapshot status + :param str progress: A percentage value for snapshot build progress. + + :returns: None + """ + snapshot = self._get_resource(_snapshot.Snapshot, snapshot) + snapshot.set_status(self, status, progress) + + def manage_snapshot(self, **attrs): + """Creates a snapshot by using existing storage rather than + allocating new storage. + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v3.snapshot.Snapshot`, + comprised of the properties on the Snapshot class. + + :returns: The results of snapshot creation + :rtype: :class:`~openstack.block_storage.v3.snapshot.Snapshot` + """ + return _snapshot.Snapshot.manage(self, **attrs) + + def unmanage_snapshot(self, snapshot): + """Unmanage a snapshot from block storage provisioning. + + :param snapshot: Either the ID of a snapshot or a + :class:`~openstack.block_storage.v3.snapshot.Snapshot`. + + :returns: None + """ + snapshot_obj = self._get_resource(_snapshot.Snapshot, snapshot) + snapshot_obj.unmanage(self) + + # ====== TYPES ====== + def get_type(self, type): + """Get a single type + + :param type: The value can be the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` instance. + + :returns: One :class:`~openstack.block_storage.v3.type.Type` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_type.Type, type) + + def find_type(self, name_or_id, ignore_missing=True): + """Find a single volume type + + :param snapshot: The name or ID a volume type + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the volume type does not exist. + + :returns: One :class:`~openstack.block_storage.v3.type.Type` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + return self._find( + _type.Type, + name_or_id, + ignore_missing=ignore_missing, + ) + + def types(self, **query): + """Retrieve a generator of volume types + + :returns: A generator of volume type objects. + """ + return self._list(_type.Type, **query) + + def create_type(self, **attrs): + """Create a new type from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v3.type.Type`, + comprised of the properties on the Type class. + + :returns: The results of type creation + :rtype: :class:`~openstack.block_storage.v3.type.Type` + """ + return self._create(_type.Type, **attrs) + + def delete_type(self, type, ignore_missing=True): + """Delete a type + + :param type: The value can be either the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the type does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent type. + + :returns: ``None`` + """ + self._delete(_type.Type, type, ignore_missing=ignore_missing) + + def update_type(self, type, **attrs): + """Update a type + + :param type: The value can be either the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` instance. + :param dict attrs: The attributes to update on the type + + :returns: The updated type + :rtype: :class:`~openstack.block_storage.v3.type.Type` + """ + return self._update(_type.Type, type, **attrs) + + def update_type_extra_specs(self, type, **attrs): + """Update the extra_specs for a type + + :param type: The value can be either the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` instance. + :param dict attrs: The extra spec attributes to update on the type + + :returns: A dict containing updated extra_specs + """ + res = self._get_resource(_type.Type, type) + extra_specs = res.set_extra_specs(self, **attrs) + result = _type.Type.existing(id=res.id, extra_specs=extra_specs) + return result + + def delete_type_extra_specs(self, type, keys): + """Delete the extra_specs for a type + + Note: This method will do a HTTP DELETE request for every key in keys. + + :param type: The value can be either the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` instance. + :param keys: The keys to delete + + :returns: ``None`` + """ + res = self._get_resource(_type.Type, type) + return res.delete_extra_specs(self, keys) + + def get_type_access(self, type): + """Lists project IDs that have access to private volume type. + + :param type: The value can be either the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` instance. + + :returns: List of dictionaries describing projects that have access to + the specified type + """ + res = self._get_resource(_type.Type, type) + return res.get_private_access(self) + + def add_type_access(self, type, project_id): + """Adds private volume type access to a project. + + :param type: The value can be either the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` instance. + :param str project_id: The ID of the project. Volume Type access to + be added to this project ID. + + :returns: ``None`` + """ + res = self._get_resource(_type.Type, type) + return res.add_private_access(self, project_id) + + def remove_type_access(self, type, project_id): + """Remove private volume type access from a project. + + :param type: The value can be either the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` instance. + :param str project_id: The ID of the project. Volume Type access to + be removed to this project ID. + + :returns: ``None`` + """ + res = self._get_resource(_type.Type, type) + return res.remove_private_access(self, project_id) + + def get_type_encryption(self, volume_type_id): + """Get the encryption details of a volume type + + :param volume_type_id: The value can be the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` + instance. + + :returns: One :class:`~openstack.block_storage.v3.type.TypeEncryption` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + volume_type = self._get_resource(_type.Type, volume_type_id) + + return self._get( + _type.TypeEncryption, + volume_type_id=volume_type.id, + requires_id=False, + ) + + def create_type_encryption(self, volume_type, **attrs): + """Create new type encryption from attributes + + :param volume_type: The value can be the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` + instance. + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v3.type.TypeEncryption`, + comprised of the properties on the TypeEncryption class. + + :returns: The results of type encryption creation + :rtype: :class:`~openstack.block_storage.v3.type.TypeEncryption` + """ + volume_type = self._get_resource(_type.Type, volume_type) + + return self._create( + _type.TypeEncryption, volume_type_id=volume_type.id, **attrs + ) + + def delete_type_encryption( + self, encryption=None, volume_type=None, ignore_missing=True + ): + """Delete type encryption attributes + + :param encryption: The value can be None or a + :class:`~openstack.block_storage.v3.type.TypeEncryption` + instance. If encryption_id is None then + volume_type_id must be specified. + + :param volume_type: The value can be the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` + instance. Required if encryption_id is None. + + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the type does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent type. + + :returns: ``None`` + """ + + if volume_type: + volume_type = self._get_resource(_type.Type, volume_type) + encryption = self._get( + _type.TypeEncryption, + volume_type_id=volume_type.id, + requires_id=False, + ) + + self._delete( + _type.TypeEncryption, encryption, ignore_missing=ignore_missing + ) + + def update_type_encryption( + self, + encryption=None, + volume_type=None, + **attrs, + ): + """Update a type + + :param encryption: The value can be None or a + :class:`~openstack.block_storage.v3.type.TypeEncryption` + instance. If this is ``None`` then ``volume_type_id`` must be + specified. + :param volume_type: The value can be the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` instance. + Required if ``encryption_id`` is None. + :param dict attrs: The attributes to update on the type encryption. + + :returns: The updated type encryption + :rtype: :class:`~openstack.block_storage.v3.type.TypeEncryption` + """ + + if volume_type: + volume_type = self._get_resource(_type.Type, volume_type) + encryption = self._get( + _type.TypeEncryption, + volume_type_id=volume_type.id, + requires_id=False, + ) + + return self._update(_type.TypeEncryption, encryption, **attrs) + + # ====== DEFAULT TYPES ====== + + def default_types(self): + """Lists default types. + + :returns: List of default types associated to projects. + """ + # This is required since previously default types did not accept + # URL with project ID + if not utils.supports_microversion(self, '3.67'): + raise exceptions.SDKException( + 'List default types require at least microversion 3.67' + ) + + return self._list(_default_type.DefaultType) + + def show_default_type(self, project): + """Show default type for a project. + + :param project: The value can be either the ID of a project or a + :class:`~openstack.identity.v3.project.Project` instance. + + :returns: Default type associated to the project. + """ + # This is required since previously default types did not accept + # URL with project ID + if not utils.supports_microversion(self, '3.67'): + raise exceptions.SDKException( + 'Show default type require at least microversion 3.67' + ) + + project_id = resource.Resource._get_id(project) + return self._get(_default_type.DefaultType, project_id) + + def set_default_type(self, project, type): + """Set default type for a project. + + :param project: The value can be either the ID of a project or a + :class:`~openstack.identity.v3.project.Project` instance. + :param type: The value can be either the ID of a type or a + :class:`~openstack.block_storage.v3.type.Type` instance. + + :returns: Dictionary of project ID and it's associated default type. + """ + # This is required since previously default types did not accept + # URL with project ID + if not utils.supports_microversion(self, '3.67'): + raise exceptions.SDKException( + 'Set default type require at least microversion 3.67' + ) + + type_id = resource.Resource._get_id(type) + project_id = resource.Resource._get_id(project) + return self._create( + _default_type.DefaultType, + id=project_id, + volume_type_id=type_id, + ) + + def unset_default_type(self, project): + """Unset default type for a project. + + :param project: The value can be either the ID of a project or a + :class:`~openstack.identity.v3.project.Project` instance. + + :returns: ``None`` + """ + # This is required since previously default types did not accept + # URL with project ID + if not utils.supports_microversion(self, '3.67'): + raise exceptions.SDKException( + 'Unset default type require at least microversion 3.67' + ) + + project_id = resource.Resource._get_id(project) + self._delete(_default_type.DefaultType, project_id) + + # ====== VOLUMES ====== + def get_volume(self, volume): + """Get a single volume + + :param volume: The value can be the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + + :returns: One :class:`~openstack.block_storage.v3.volume.Volume` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_volume.Volume, volume) + + def find_volume( + self, + name_or_id, + ignore_missing=True, + *, + details=True, + all_projects=False, + ): + """Find a single volume + + :param snapshot: The name or ID a volume + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the volume does not exist. + :param bool details: When set to ``False`` no extended attributes + will be returned. The default, ``True``, will cause objects with + additional attributes to be returned. + :param bool all_projects: When set to ``True``, search for volume by + name across all projects. Note that this will likely result in + a higher chance of duplicates. Admin-only by default. + + :returns: One :class:`~openstack.block_storage.v3.volume.Volume` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + query = {} + if all_projects: + query['all_projects'] = True + list_base_path = '/volumes/detail' if details else None + return self._find( + _volume.Volume, + name_or_id, + ignore_missing=ignore_missing, + list_base_path=list_base_path, + **query, + ) + + def volumes(self, *, details=True, all_projects=False, **query): + """Retrieve a generator of volumes + + :param bool details: When set to ``False`` no extended attributes + will be returned. The default, ``True``, will cause objects with + additional attributes to be returned. + :param bool all_projects: When set to ``True``, list volumes from all + projects. Admin-only by default. + :param kwargs query: Optional query parameters to be sent to limit + the volumes being returned. Available parameters include: + + * name: Name of the volume as a string. + * status: Value of the status of the volume so that you can filter + on "available" for example. + + :returns: A generator of volume objects. + """ + if all_projects: + query['all_projects'] = True + base_path = '/volumes/detail' if details else None + return self._list(_volume.Volume, base_path=base_path, **query) + + def create_volume(self, **attrs): + """Create a new volume from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v3.volume.Volume`, + comprised of the properties on the Volume class. + + :returns: The results of volume creation + :rtype: :class:`~openstack.block_storage.v3.volume.Volume` + """ + return self._create(_volume.Volume, **attrs) + + def delete_volume( + self, volume, ignore_missing=True, *, force=False, cascade=False + ): + """Delete a volume + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the volume does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent volume. + :param bool force: Whether to try forcing volume deletion. + :param bool cascade: Whether to remove any snapshots along with the + volume. + + :returns: ``None`` + """ + volume = self._get_resource(_volume.Volume, volume) + + params = {'cascade': cascade} + if utils.supports_microversion(self, '3.23'): + params['force'] = force + + try: + if force and not utils.supports_microversion(self, '3.23'): + volume.force_delete(self) + else: + volume.delete(self, params=params) + except exceptions.NotFoundException: + if ignore_missing: + return None + raise + + def update_volume(self, volume, **attrs): + """Update a volume + + :param volume: Either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param dict attrs: The attributes to update on the volume. + + :returns: The updated volume + :rtype: :class:`~openstack.block_storage.v3.volume.Volume` + """ + return self._update(_volume.Volume, volume, **attrs) + + def get_volume_metadata(self, volume): + """Return a dictionary of metadata for a volume + + :param volume: Either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume`. + + :returns: A :class:`~openstack.block_storage.v3.volume.Volume` with the + volume's metadata. All keys and values are Unicode text. + :rtype: :class:`~openstack.block_storage.v3.volume.Volume` + """ + volume = self._get_resource(_volume.Volume, volume) + return volume.fetch_metadata(self) + + def set_volume_metadata(self, volume, **metadata): + """Update metadata for a volume + + :param volume: Either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume`. + :param kwargs metadata: Key/value pairs to be updated in the volume's + metadata. No other metadata is modified by this call. All keys + and values are stored as Unicode. + + :returns: A :class:`~openstack.block_storage.v3.volume.Volume` with the + volume's metadata. All keys and values are Unicode text. + :rtype: :class:`~openstack.block_storage.v3.volume.Volume` + """ + volume = self._get_resource(_volume.Volume, volume) + return volume.set_metadata(self, metadata=metadata) + + def delete_volume_metadata(self, volume, keys=None): + """Delete metadata for a volume + + :param volume: Either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume`. + :param list keys: The keys to delete. If left empty complete + metadata will be removed. + + :rtype: ``None`` + """ + volume = self._get_resource(_volume.Volume, volume) + if keys is not None: + for key in keys: + volume.delete_metadata_item(self, key) + else: + volume.delete_metadata(self) + + def summary(self, all_projects, **kwargs): + """Get Volumes Summary + + This method returns the volumes summary in the deployment. + + :param all_projects: Whether to return the summary of all projects + or not. + + :returns: One :class: + `~openstack.block_storage.v3.block_storage_summary.Summary` + instance. + """ + res = self._get(_summary.BlockStorageSummary, requires_id=False) + return res.fetch( + self, + requires_id=False, + resource_response_key='volume-summary', + all_tenants=all_projects, + **kwargs, + ) + + # ====== VOLUME ACTIONS ====== + def extend_volume(self, volume, size): + """Extend a volume + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param size: New volume size + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.extend(self, size) + + def complete_volume_extend(self, volume, error=False): + """Complete a volume extend operation. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param bool error: Used to indicate if an error has occured that + requires Cinder to roll back the extend operation. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.complete_extend(self, error) + + def set_volume_readonly(self, volume, readonly=True): + """Set a volume's read-only flag. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param bool readonly: Whether the volume should be a read-only volume + or not. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.set_readonly(self, readonly) + + def retype_volume(self, volume, new_type, migration_policy="never"): + """Retype the volume. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param new_type: The new volume type that volume is changed with. + The value can be either the ID of the volume type or a + :class:`~openstack.block_storage.v3.type.Type` instance. + :param str migration_policy: Specify if the volume should be migrated + when it is re-typed. Possible values are on-demand or never. + Default: never. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + type_id = resource.Resource._get_id(new_type) + volume.retype(self, type_id, migration_policy) + + def set_volume_bootable_status(self, volume, bootable): + """Set bootable status of the volume. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param bool bootable: Specifies whether the volume should be bootable + or not. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.set_bootable_status(self, bootable) + + def set_volume_image_metadata(self, volume, **metadata): + """Update image metadata for a volume + + :param volume: Either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume`. + :param kwargs metadata: Key/value pairs to be updated in the volume's + image metadata. No other metadata is modified by this call. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + return volume.set_image_metadata(self, metadata=metadata) + + def delete_volume_image_metadata(self, volume, keys=None): + """Delete metadata for a volume + + :param volume: Either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume`. + :param list keys: The keys to delete. If left empty complete + metadata will be removed. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + if keys is not None: + for key in keys: + volume.delete_image_metadata_item(self, key) + else: + volume.delete_image_metadata(self) + + def reset_volume_status( + self, volume, status=None, attach_status=None, migration_status=None + ): + """Reset volume statuses. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param str status: The new volume status. + :param str attach_status: The new volume attach status. + :param str migration_status: The new volume migration status (admin + only). + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.reset_status(self, status, attach_status, migration_status) + + def revert_volume_to_snapshot(self, volume, snapshot): + """Revert a volume to its latest snapshot. + + This method only support reverting a detached volume, and the + volume status must be available. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param snapshot: The value can be either the ID of a snapshot or a + :class:`~openstack.block_storage.v3.snapshot.Snapshot` instance. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + snapshot = self._get_resource(_snapshot.Snapshot, snapshot) + volume.revert_to_snapshot(self, snapshot.id) + + def attach_volume(self, volume, mountpoint, instance=None, host_name=None): + """Attaches a volume to a server. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param str mountpoint: The attaching mount point. + :param str instance: The UUID of the attaching instance. + :param str host_name: The name of the attaching host. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.attach(self, mountpoint, instance, host_name) + + def detach_volume(self, volume, attachment, force=False, connector=None): + """Detaches a volume from a server. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param str attachment: The ID of the attachment. + :param bool force: Whether to force volume detach (Rolls back an + unsuccessful detach operation after you disconnect the volume.) + :param dict connector: The connector object. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.detach(self, attachment, force, connector) + + def manage_volume(self, **attrs): + """Creates a volume by using existing storage rather than + allocating new storage. + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v3.volume.Volume`, comprised of + the properties on the Volume class. + :returns: The results of volume creation + :rtype: :class:`~openstack.block_storage.v3.volume.Volume` + """ + return _volume.Volume.manage(self, **attrs) + + def unmanage_volume(self, volume): + """Removes a volume from Block Storage management without removing the + back-end storage object that is associated with it. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.unmanage(self) + + def migrate_volume( + self, + volume, + host=None, + force_host_copy=False, + lock_volume=False, + cluster=None, + ): + """Migrates a volume to the specified host. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param str host: The target host for the volume migration. Host + format is host@backend. + :param bool force_host_copy: If false (the default), rely on the volume + backend driver to perform the migration, which might be optimized. + If true, or the volume driver fails to migrate the volume itself, + a generic host-based migration is performed. + :param bool lock_volume: If true, migrating an available volume will + change its status to maintenance preventing other operations from + being performed on the volume such as attach, detach, retype, etc. + :param str cluster: The target cluster for the volume migration. + Cluster format is cluster@backend. Starting with microversion + 3.16, either cluster or host must be specified. If host is + specified and is part of a cluster, the cluster is used as the + target for the migration. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.migrate(self, host, force_host_copy, lock_volume, cluster) + + def complete_volume_migration(self, volume, new_volume, error=False): + """Complete the migration of a volume. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param str new_volume: The UUID of the new volume. + :param bool error: Used to indicate if an error has occured elsewhere + that requires clean up. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.complete_migration(self, new_volume, error) + + def upload_volume_to_image( + self, + volume, + image_name, + force=False, + disk_format=None, + container_format=None, + visibility=None, + protected=None, + ): + """Uploads the specified volume to image service. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param str image name: The name for the new image. + :param bool force: Enables or disables upload of a volume that is + attached to an instance. + :param str disk_format: Disk format for the new image. + :param str container_format: Container format for the new image. + :param str visibility: The visibility property of the new image. + :param str protected: Whether the new image is protected. + + :returns: dictionary describing the image. + """ + volume = self._get_resource(_volume.Volume, volume) + return volume.upload_to_image( + self, + image_name, + force=force, + disk_format=disk_format, + container_format=container_format, + visibility=visibility, + protected=protected, + ) + + def reserve_volume(self, volume): + """Mark volume as reserved. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + + :returns: None""" + volume = self._get_resource(_volume.Volume, volume) + volume.reserve(self) + + def unreserve_volume(self, volume): + """Unmark volume as reserved. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + + :returns: None""" + volume = self._get_resource(_volume.Volume, volume) + volume.unreserve(self) + + def begin_volume_detaching(self, volume): + """Update volume status to 'detaching'. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + + :returns: None""" + volume = self._get_resource(_volume.Volume, volume) + volume.begin_detaching(self) + + def abort_volume_detaching(self, volume): + """Update volume status to 'in-use'. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + + :returns: None""" + volume = self._get_resource(_volume.Volume, volume) + volume.abort_detaching(self) + + def init_volume_attachment(self, volume, connector): + """Initialize volume attachment. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param dict connector: The connector object. + + :returns: Dictionary containing the modified connector object""" + volume = self._get_resource(_volume.Volume, volume) + return volume.init_attachment(self, connector) + + def terminate_volume_attachment(self, volume, connector): + """Update volume status to 'in-use'. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param dict connector: The connector object. + + :returns: None + """ + volume = self._get_resource(_volume.Volume, volume) + volume.terminate_attachment(self, connector) + + # ====== ATTACHMENTS ====== + + def create_attachment(self, volume, **attrs): + """Create a new attachment + + This is an internal API and should only be called by services + consuming volume attachments like nova, glance, ironic etc. + + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v3.attachment.Attachment` + comprised of the properties on the Attachment class like + connector, instance_id, mode etc. + :returns: The results of attachment creation + :rtype: :class:`~openstack.block_storage.v3.attachment.Attachment` + """ + volume_id = resource.Resource._get_id(volume) + return self._create( + _attachment.Attachment, volume_id=volume_id, **attrs + ) + + def get_attachment(self, attachment): + """Get a single volume + + This is an internal API and should only be called by services + consuming volume attachments like nova, glance, ironic etc. + + :param attachment: The value can be the ID of an attachment or a + :class:`~attachment.Attachment` instance. + + :returns: One :class:`~attachment.Attachment` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_attachment.Attachment, attachment) + + def attachments(self, **query): + """Returns a generator of attachments. + + This is an internal API and should only be called by services + consuming volume attachments like nova, glance, ironic etc. + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of attachment objects. + """ + return self._list(_attachment.Attachment, **query) + + def delete_attachment(self, attachment, ignore_missing=True): + """Delete an attachment + + This is an internal API and should only be called by services + consuming volume attachments like nova, glance, ironic etc. + + :param type: The value can be either the ID of a attachment or a + :class:`~openstack.block_storage.v3.attachment.Attachment` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the attachment does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent attachment. + + :returns: ``None`` + """ + self._delete( + _attachment.Attachment, + attachment, + ignore_missing=ignore_missing, + ) + + def update_attachment(self, attachment, **attrs): + """Update an attachment + + This is an internal API and should only be called by services + consuming volume attachments like nova, glance, ironic etc. + + :param attachment: The value can be the ID of an attachment or a + :class:`~openstack.block_storage.v3.attachment.Attachment` + instance. + :param dict attrs: Keyword arguments which will be used to update + a :class:`~openstack.block_storage.v3.attachment.Attachment` + comprised of the properties on the Attachment class + + :returns: The updated attachment + :rtype: :class:`~openstack.volume.v3.attachment.Attachment` + """ + return self._update(_attachment.Attachment, attachment, **attrs) + + def complete_attachment(self, attachment): + """Complete an attachment + + This is an internal API and should only be called by services + consuming volume attachments like nova, glance, ironic etc. + + :param attachment: The value can be the ID of an attachment or a + :class:`~openstack.block_storage.v3.attachment.Attachment` + instance. + + :returns: ``None`` + :rtype: :class:`~openstack.volume.v3.attachment.Attachment` + """ + attachment_obj = self._get_resource(_attachment.Attachment, attachment) + return attachment_obj.complete(self) + + # ====== BACKEND POOLS ====== + def backend_pools(self, **query): + """Returns a generator of cinder Back-end storage pools + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns A generator of cinder Back-end storage pools objects + """ + return self._list(_stats.Pools, **query) + + # ====== BACKUPS ====== + def backups(self, *, details=True, **query): + """Retrieve a generator of backups + + :param bool details: When set to ``False`` + no additional details will be returned. The default, ``True``, + will cause objects with additional attributes to be returned. + :param dict query: Optional query parameters to be sent to limit the + resources being returned: + + * offset: pagination marker + * limit: pagination limit + * sort_key: Sorts by an attribute. A valid value is + name, status, container_format, disk_format, size, id, + created_at, or updated_at. Default is created_at. + The API uses the natural sorting direction of the + sort_key attribute value. + * sort_dir: Sorts by one or more sets of attribute and sort + direction combinations. If you omit the sort direction + in a set, default is desc. + * project_id: Project ID to query backups for. + + :returns: A generator of backup objects. + """ + base_path = '/backups/detail' if details else None + return self._list(_backup.Backup, base_path=base_path, **query) + + def get_backup(self, backup): + """Get a backup + + :param backup: The value can be the ID of a backup + or a :class:`~openstack.block_storage.v3.backup.Backup` + instance. + + :returns: Backup instance + :rtype: :class:`~openstack.block_storage.v3.backup.Backup` + """ + return self._get(_backup.Backup, backup) + + def find_backup(self, name_or_id, ignore_missing=True, *, details=True): + """Find a single backup + + :param snapshot: The name or ID a backup + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the backup does not exist. + :param bool details: When set to ``False`` no additional details will + be returned. The default, ``True``, will cause objects with + additional attributes to be returned. + + :returns: One :class:`~openstack.block_storage.v3.backup.Backup` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + list_base_path = '/backups/detail' if details else None + return self._find( + _backup.Backup, + name_or_id, + ignore_missing=ignore_missing, + list_base_path=list_base_path, + ) + + def create_backup(self, **attrs): + """Create a new Backup from attributes with native API + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v3.backup.Backup` + comprised of the properties on the Backup class. + + :returns: The results of Backup creation + :rtype: :class:`~openstack.block_storage.v3.backup.Backup` + """ + return self._create(_backup.Backup, **attrs) + + def delete_backup(self, backup, ignore_missing=True, force=False): + """Delete a CloudBackup + + :param backup: The value can be the ID of a backup or a + :class:`~openstack.block_storage.v3.backup.Backup` instance + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the zone does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent zone. + :param bool force: Whether to try forcing backup deletion + + :returns: ``None`` + """ + if not force: + self._delete(_backup.Backup, backup, ignore_missing=ignore_missing) + else: + backup = self._get_resource(_backup.Backup, backup) + backup.force_delete(self) + + def update_backup(self, backup, **attrs): + """Update a backup + + :param backup: Either the ID of a backup or a + :class:`~openstack.block_storage.v3.backup.Backup`. + :param dict attrs: The attributes to update on the volume. + + :returns: The updated backup + :rtype: :class:`~openstack.block_storage.v3.backup.Backup` + """ + return self._update(_backup.Backup, backup, **attrs) + + def get_backup_metadata(self, backup): + """Return a dictionary of metadata for a backup + + :param backup: Either the ID of a backup or a + :class:`~openstack.block_storage.v3.backup.Backup`. + + :returns: A :class:`~openstack.block_storage.v3.backup.Backup` with the + backup's metadata. + :rtype: :class:`~openstack.block_storage.v3.backup.Backup` + """ + backup = self._get_resource(_backup.Backup, backup) + return backup.fetch_metadata(self) + + def export_record(self, backup): + """Get a backup meatadata to export + + :param backup: The value can be the ID of a backup + or a :class:`~openstack.block_storage.v2.backup.Backup` + instance. + + :returns: The backup export record fields + """ + backup = self._get_resource(_backup.Backup, backup) + return backup.export(self) + + def set_backup_metadata(self, backup, **metadata): + """Update metadata for a backup + + :param backup: Either the ID of a backup or a + :class:`~openstack.block_storage.v3.backup.Backup`. + :param metadata: Key/value pairs to be updated in the backup's + metadata. No other metadata is modified by this call. + + :returns: A :class:`~openstack.block_storage.v3.backup.Backup` with the + backup's metadata. + :rtype: :class:`~openstack.block_storage.v3.backup.Backup` + """ + backup = self._get_resource(_backup.Backup, backup) + return backup.set_metadata(self, metadata=metadata) + + def delete_backup_metadata(self, backup, keys=None): + """Delete metadata for a backup + + :param backup: Either the ID of a backup or a + :class:`~openstack.block_storage.v3.backup.Backup`. + :param list keys: The keys to delete. If left empty complete + metadata will be removed. + + :rtype: ``None`` + """ + backup = self._get_resource(_backup.Backup, backup) + if keys is not None: + for key in keys: + backup.delete_metadata_item(self, key) + else: + backup.delete_metadata(self) + + # ====== BACKUP ACTIONS ====== + def restore_backup(self, backup, volume_id=None, name=None): + """Restore a Backup to volume + + :param backup: The value can be the ID of a backup or a + :class:`~openstack.block_storage.v3.backup.Backup` instance + :param volume_id: The ID of the volume to restore the backup to. + :param name: The name for new volume creation to restore. + + :returns: Updated backup instance + :rtype: :class:`~openstack.block_storage.v3.backup.Backup` + """ + backup = self._get_resource(_backup.Backup, backup) + return backup.restore(self, volume_id=volume_id, name=name) + + def reset_backup_status(self, backup, status): + """Reset status of the backup + + :param backup: The value can be either the ID of a backup or a + :class:`~openstack.block_storage.v3.backup.Backup` instance. + :param str status: New backup status + + :returns: None + """ + backup = self._get_resource(_backup.Backup, backup) + backup.reset_status(self, status) + + def reset_backup(self, backup, status): + warnings.warn( + "reset_backup is a deprecated alias for reset_backup_status " + "and will be removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + return self.reset_backup_status(backup, status) + + # ====== LIMITS ====== + def get_limits(self, project=None): + """Retrieves limits + + :param project: A project to get limits for. The value can be either + the ID of a project or an + :class:`~openstack.identity.v3.project.Project` instance. + :returns: A Limits object, including both + :class:`~openstack.block_storage.v3.limits.AbsoluteLimit` and + :class:`~openstack.block_storage.v3.limits.RateLimit` + :rtype: :class:`~openstack.block_storage.v3.limits.Limits` + """ + project_id = None + if project: + project_id = resource.Resource._get_id(project) + + # we don't use Proxy._get since that doesn't allow passing arbitrary + # query string parameters + res = self._get_resource(_limits.Limits, None) + return res.fetch( + self, + requires_id=False, + project_id=project_id, + ) + + # ====== CAPABILITIES ====== + def get_capabilities(self, host): + """Get a backend's capabilites + + :param host: Specified backend to obtain volume stats and properties. + + :returns: One :class: + `~openstack.block_storage.v3.capabilites.Capabilities` instance. + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + return self._get(_capabilities.Capabilities, host) + + # ====== GROUPS ====== + def get_group(self, group_id, **attrs): + """Get a group + + :param group_id: The ID of the group to get. + :param dict attrs: Optional query parameters to be sent to limit the + resources being returned. + + :returns: A Group instance. + :rtype: :class:`~openstack.block_storage.v3.group` + """ + return self._get(_group.Group, group_id, **attrs) + + def find_group(self, name_or_id, ignore_missing=True, *, details=True): + """Find a single group + + :param name_or_id: The name or ID of a group. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the group snapshot does not exist. + :param bool details: When set to ``False``, no additional details will + be returned. The default, ``True``, will cause additional details + to be returned. + + :returns: One :class:`~openstack.block_storage.v3.group.Group` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + list_base_path = '/groups/detail' if details else None + return self._find( + _group.Group, + name_or_id, + ignore_missing=ignore_missing, + list_base_path=list_base_path, + ) + + def groups(self, *, details=True, **query): + """Retrieve a generator of groups + + :param bool details: When set to ``False``, no additional details will + be returned. The default, ``True``, will cause additional details + to be returned. + :param dict query: Optional query parameters to be sent to limit the + resources being returned: + + * all_tenants: Shows details for all project. + * sort: Comma-separated list of sort keys and optional sort + directions. + * limit: Returns a number of items up to the limit value. + * offset: Used in conjunction with limit to return a slice of + items. Specifies where to start in the list. + * marker: The ID of the last-seen item. + * list_volume: Show volume ids in this group. + * detailed: If True, will list groups with details. + * search_opts: Search options. + + :returns: A generator of group objects. + """ + base_path = '/groups/detail' if details else '/groups' + return self._list(_group.Group, base_path=base_path, **query) + + def create_group(self, **attrs): + """Create a new group from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v3.group.Group` comprised of + the properties on the Group class. + + :returns: The results of group creation. + :rtype: :class:`~openstack.block_storage.v3.group.Group`. + """ + return self._create(_group.Group, **attrs) + + def create_group_from_source(self, **attrs): + """Creates a new group from source + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v3.group.Group` comprised of + the properties on the Group class. + + :returns: The results of group creation. + :rtype: :class:`~openstack.block_storage.v3.group.Group`. + """ + return _group.Group.create_from_source(self, **attrs) + + def delete_group(self, group, delete_volumes=False): + """Delete a group + + :param group: The :class:`~openstack.block_storage.v3.group.Group` to + delete. + :param bool delete_volumes: When set to ``True``, volumes in group + will be deleted. + + :returns: ``None``. + """ + res = self._get_resource(_group.Group, group) + res.delete(self, delete_volumes=delete_volumes) + + def update_group(self, group, **attrs): + """Update a group + + :param group: The value can be the ID of a group or a + :class:`~openstack.block_storage.v3.group.Group` instance. + :param dict attrs: The attributes to update on the group. + + :returns: The updated group + :rtype: :class:`~openstack.volume.v3.group.Group` + """ + return self._update(_group.Group, group, **attrs) + + def reset_group_status(self, group, status): + """Reset group status + + :param group: The :class:`~openstack.block_storage.v3.group.Group` + to set the state. + :param status: The status for a group. + + :returns: ``None`` + """ + res = self._get_resource(_group.Group, group) + return res.reset_status(self, status) + + def reset_group_state(self, group, status): + warnings.warn( + "reset_group_state is a deprecated alias for reset_group_status " + "and will be removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + return self.reset_group_status(group, status) + + def enable_group_replication(self, group): + """Enable replication for a group + + :param group: The :class:`~openstack.block_storage.v3.group.Group` + to enable replication for. + + :returns: ``None`` + """ + res = self._get_resource(_group.Group, group) + return res.enable_replication(self) + + def disable_group_replication(self, group): + """Disable replication for a group + + :param group: The :class:`~openstack.block_storage.v3.group.Group` + to disable replication for. + + :returns: ``None`` + """ + res = self._get_resource(_group.Group, group) + return res.disable_replication(self) + + def failover_group_replication( + self, + group, + *, + allowed_attached_volume=False, + secondary_backend_id=None, + ): + """Failover replication for a group + + :param group: The :class:`~openstack.block_storage.v3.group.Group` + to failover replication for. + :param allowed_attached_volume: Whether to allow attached volumes in + the group. + :param secondary_backend_id: The secondary backend ID. + + :returns: ``None`` + """ + res = self._get_resource(_group.Group, group) + return res.failover_replication( + self, + allowed_attached_volume=allowed_attached_volume, + secondary_backend_id=secondary_backend_id, + ) + + # ====== AVAILABILITY ZONES ====== + def availability_zones(self): + """Return a generator of availability zones + + :returns: A generator of availability zone + :rtype: + :class:`~openstack.block_storage.v3.availability_zone.AvailabilityZone` + """ + + return self._list(availability_zone.AvailabilityZone) + + # ====== GROUP SNAPSHOT ====== + def get_group_snapshot(self, group_snapshot_id): + """Get a group snapshot + + :param group_snapshot_id: The ID of the group snapshot to get. + + :returns: A GroupSnapshot instance. + :rtype: :class:`~openstack.block_storage.v3.group_snapshot` + """ + return self._get(_group_snapshot.GroupSnapshot, group_snapshot_id) + + def find_group_snapshot( + self, + name_or_id, + ignore_missing=True, + *, + details=True, + ): + """Find a single group snapshot + + :param name_or_id: The name or ID of a group snapshot. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the group snapshot does not exist. + :param bool details: When set to ``False``, no additional details will + be returned. The default, ``True``, will cause additional details + to be returned. + + :returns: One :class:`~openstack.block_storage.v3.group_snapshot` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + list_base_path = '/group_snapshots/detail' if details else None + return self._find( + _group_snapshot.GroupSnapshot, + name_or_id, + ignore_missing=ignore_missing, + list_base_path=list_base_path, + ) + + def group_snapshots(self, *, details=True, **query): + """Retrieve a generator of group snapshots + + :param bool details: When ``True``, returns + :class:`~openstack.block_storage.v3.group_snapshot.GroupSnapshot` + objects with additional attributes filled. + :param kwargs query: Optional query parameters to be sent to limit + the group snapshots being returned. + :returns: A generator of group snapshtos. + """ + base_path = '/group_snapshots/detail' if details else None + return self._list( + _group_snapshot.GroupSnapshot, + base_path=base_path, + **query, + ) + + def create_group_snapshot(self, **attrs): + """Create a group snapshot + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.block_storage.v3.group_snapshot.GroupSnapshot` + comprised of the properties on the GroupSnapshot class. + + :returns: The results of group snapshot creation. + :rtype: :class:`~openstack.block_storage.v3.group_snapshot`. + """ + return self._create(_group_snapshot.GroupSnapshot, **attrs) + + def reset_group_snapshot_status(self, group_snapshot, status): + """Reset group snapshot status + + :param group_snapshot: The + :class:`~openstack.block_storage.v3.group_snapshot.GroupSnapshot` + to set the state. + :param state: The status of the group snapshot to be set. + + :returns: None + """ + resource = self._get_resource( + _group_snapshot.GroupSnapshot, group_snapshot + ) + resource.reset_state(self, status) + + def reset_group_snapshot_state(self, group_snapshot, state): + warnings.warn( + "reset_group_snapshot_state is a deprecated alias for " + "reset_group_snapshot_status and will be removed in a future " + "release.", + os_warnings.RemovedInSDK60Warning, + ) + return self.reset_group_snapshot_status(group_snapshot, state) + + def delete_group_snapshot(self, group_snapshot, ignore_missing=True): + """Delete a group snapshot + + :param group_snapshot: The :class:`~openstack.block_storage.v3. + group_snapshot.GroupSnapshot` to delete. + + :returns: None + """ + self._delete( + _group_snapshot.GroupSnapshot, + group_snapshot, + ignore_missing=ignore_missing, + ) + + # ====== GROUP TYPE ====== + def get_group_type(self, group_type): + """Get a specific group type + + :param group_type: The value can be the ID of a group type + or a :class:`~openstack.block_storage.v3.group_type.GroupType` + instance. + + :returns: One :class: + `~openstack.block_storage.v3.group_type.GroupType` instance. + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + return self._get(_group_type.GroupType, group_type) + + def find_group_type(self, name_or_id, ignore_missing=True): + """Find a single group type + + :param name_or_id: The name or ID of a group type. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the group type does not exist. + + :returns: One + :class:`~openstack.block_storage.v3.group_type.GroupType` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + return self._find( + _group_type.GroupType, + name_or_id, + ignore_missing=ignore_missing, + ) + + def group_types(self, **query): + """Retrive a generator of group types + + :param dict query: Optional query parameters to be sent to limit the + resources being returned: + + * sort: Comma-separated list of sort keys and optional sort + directions in the form of [:]. A valid + direction is asc (ascending) or desc (descending). + * limit: Requests a page size of items. Returns a number of items + up to a limit value. Use the limit parameter to make an + initial limited request and use the ID of the last-seen item + from the response as the marker parameter value in a + subsequent limited request. + * offset: Used in conjunction with limit to return a slice of + items. Is where to start in the list. + * marker: The ID of the last-seen item. + + :returns: A generator of group type objects. + """ + return self._list(_group_type.GroupType, **query) + + def create_group_type(self, **attrs): + """Create a group type + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v3.group_type.GroupType` + comprised of the properties on the GroupType class. + + :returns: The results of group type creation. + :rtype: :class:`~openstack.block_storage.v3.group_type.GroupTye`. + """ + return self._create(_group_type.GroupType, **attrs) + + def delete_group_type(self, group_type, ignore_missing=True): + """Delete a group type + + :param group_type: The value can be the ID of a group type + or a :class:`~openstack.block_storage.v3.group_type.GroupType` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the zone does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent zone. + + :returns: None + """ + self._delete( + _group_type.GroupType, group_type, ignore_missing=ignore_missing + ) + + def update_group_type(self, group_type, **attrs): + """Update a group_type + + :param group_type: The value can be the ID of a group type or a + :class:`~openstack.block_storage.v3.group_type.GroupType` + instance. + :param dict attrs: The attributes to update on the group type. + + :returns: The updated group type. + :rtype: :class:`~openstack.block_storage.v3.group_type.GroupType` + """ + return self._update(_group_type.GroupType, group_type, **attrs) + + def fetch_group_type_group_specs(self, group_type): + """Lists group specs of a group type. + + :param group_type: Either the ID of a group type or a + :class:`~openstack.block_storage.v3.group_type.GroupType` instance. + + :returns: One :class:`~openstack.block_storage.v3.group_type.GroupType` + """ + group_type = self._get_resource(_group_type.GroupType, group_type) + return group_type.fetch_group_specs(self) + + def create_group_type_group_specs(self, group_type, group_specs): + """Create group specs for a group type. + + :param group_type: Either the ID of a group type or a + :class:`~openstack.block_storage.v3.group_type.GroupType` instance. + :param dict group_specs: dict of extra specs + + :returns: One :class:`~openstack.block_storage.v3.group_type.GroupType` + """ + group_type = self._get_resource(_group_type.GroupType, group_type) + return group_type.create_group_specs(self, specs=group_specs) + + def get_group_type_group_specs_property(self, group_type, prop): + """Retrieve a group spec property for a group type. + + :param group_type: Either the ID of a group type or a + :class:`~openstack.block_storage.v3.group_type.GroupType` instance. + :param str prop: Property name. + + :returns: String value of the requested property. + """ + group_type = self._get_resource(_group_type.GroupType, group_type) + return group_type.get_group_specs_property(self, prop) + + def update_group_type_group_specs_property(self, group_type, prop, val): + """Update a group spec property for a group type. + + :param group_type: Either the ID of a group type or a + :class:`~openstack.block_storage.v3.group_type.GroupType` instance. + :param str prop: Property name. + :param str val: Property value. + + :returns: String value of the requested property. + """ + group_type = self._get_resource(_group_type.GroupType, group_type) + return group_type.update_group_specs_property(self, prop, val) + + def delete_group_type_group_specs_property(self, group_type, prop): + """Delete a group spec property from a group type. + + :param group_type: Either the ID of a group type or a + :class:`~openstack.block_storage.v3.group_type.GroupType` instance. + :param str prop: Property name. + + :returns: None + """ + group_type = self._get_resource(_group_type.GroupType, group_type) + return group_type.delete_group_specs_property(self, prop) + + # ====== QUOTA CLASS SETS ====== + + def get_quota_class_set(self, quota_class_set='default'): + """Get a single quota class set + + Only one quota class is permitted, ``default``. + + :param quota_class_set: The value can be the ID of a quota class set + (only ``default`` is supported) or a + :class:`~openstack.block_storage.v3.quota_class_set.QuotaClassSet` + instance. + + :returns: One + :class:`~openstack.block_storage.v3.quota_class_set.QuotaClassSet` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_quota_class_set.QuotaClassSet, quota_class_set) + + def update_quota_class_set(self, quota_class_set, **attrs): + """Update a QuotaClassSet. + + Only one quota class is permitted, ``default``. + + :param quota_class_set: Either the ID of a quota class set (only + ``default`` is supported) or a + :param attrs: The attributes to update on the QuotaClassSet represented + by ``quota_class_set``. + + :returns: The updated QuotaSet + :rtype: :class:`~openstack.block_storage.v3.quota_set.QuotaSet` + """ + return self._update( + _quota_class_set.QuotaClassSet, quota_class_set, **attrs + ) + + # ====== QUOTA SETS ====== + + def get_quota_set(self, project, usage=False, **query): + """Show QuotaSet information for the project + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the quota should be retrieved + :param bool usage: When set to ``True`` quota usage and reservations + would be filled. + :param dict query: Additional query parameters to use. + + :returns: One :class:`~openstack.block_storage.v3.quota_set.QuotaSet` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + project = self._get_resource(_project.Project, project) + res = self._get_resource( + _quota_set.QuotaSet, None, project_id=project.id + ) + return res.fetch(self, usage=usage, **query) + + def get_quota_set_defaults(self, project): + """Show QuotaSet defaults for the project + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the quota should be retrieved + + :returns: One :class:`~openstack.block_storage.v3.quota_set.QuotaSet` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + project = self._get_resource(_project.Project, project) + res = self._get_resource( + _quota_set.QuotaSet, None, project_id=project.id + ) + return res.fetch( + self, base_path=(f'/os-quota-sets/{project.id}/defaults') + ) + + def revert_quota_set(self, project, **query): + """Reset Quota for the project/user. + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the quota should be resetted. + :param dict query: Additional parameters to be used. + + :returns: ``None`` + """ + project = self._get_resource(_project.Project, project) + res = self._get_resource( + _quota_set.QuotaSet, None, project_id=project.id + ) + + return res.delete(self, **query) + + def update_quota_set(self, project, **attrs): + """Update a QuotaSet. + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the quota should be reset. + :param attrs: The attributes to update on the QuotaSet represented + by ``quota_set``. + + :returns: The updated QuotaSet + :rtype: :class:`~openstack.block_storage.v3.quota_set.QuotaSet` + """ + if 'project_id' in attrs or isinstance(project, _quota_set.QuotaSet): + warnings.warn( + "The signature of 'update_quota_set' has changed and it " + "now expects a Project as the first argument, in line " + "with the other quota set methods.", + os_warnings.RemovedInSDK50Warning, + ) + # cinder doesn't support any query parameters so we simply pop + # these + if 'query' in attrs: + warnings.warn( + "The query argument is no longer supported and should " + "be removed.", + os_warnings.RemovedInSDK50Warning, + ) + attrs.pop('query') + + res = self._get_resource(_quota_set.QuotaSet, project, **attrs) + return res.commit(self) + else: + project = self._get_resource(_project.Project, project) + attrs['project_id'] = project.id + return self._update(_quota_set.QuotaSet, None, **attrs) + + # ====== SERVICES ====== + @ty.overload + def find_service( + self, + name_or_id: str, + ignore_missing: ty.Literal[True] = True, + **query: ty.Any, + ) -> _service.Service | None: ... + + @ty.overload + def find_service( + self, + name_or_id: str, + ignore_missing: ty.Literal[False], + **query: ty.Any, + ) -> _service.Service: ... + + # excuse the duplication here: it's mypy's fault + # https://github.com/python/mypy/issues/14764 + @ty.overload + def find_service( + self, + name_or_id: str, + ignore_missing: bool, + **query: ty.Any, + ) -> _service.Service | None: ... + + def find_service( + self, + name_or_id: str, + ignore_missing: bool = True, + **query: ty.Any, + ) -> _service.Service | None: + """Find a single service + + :param name_or_id: The name or ID of a service + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param dict query: Additional attributes like 'host' + + :returns: One: class:`~openstack.block_storage.v3.service.Service` or + None + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + return self._find( + _service.Service, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def services( + self, + **query: ty.Any, + ) -> ty.Generator[_service.Service, None, None]: + """Return a generator of service + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + :returns: A generator of Service objects + :rtype: class: `~openstack.block_storage.v3.service.Service` + """ + return self._list(_service.Service, **query) + + def enable_service( + self, + service: str | _service.Service, + ) -> _service.Service: + """Enable a service + + :param service: Either the ID of a service or a + :class:`~openstack.block_storage.v3.service.Service` instance. + + :returns: Updated service instance + :rtype: class: `~openstack.block_storage.v3.service.Service` + """ + service_obj = self._get_resource(_service.Service, service) + return service_obj.enable(self) + + def disable_service( + self, + service: str | _service.Service, + *, + reason: str | None = None, + ) -> _service.Service: + """Disable a service + + :param service: Either the ID of a service or a + :class:`~openstack.block_storage.v3.service.Service` instance + :param str reason: The reason to disable a service + + :returns: Updated service instance + :rtype: class: `~openstack.block_storage.v3.service.Service` + """ + service_obj = self._get_resource(_service.Service, service) + return service_obj.disable(self, reason=reason) + + def thaw_service( + self, + service: str | _service.Service, + ) -> _service.Service: + """Thaw a service + + :param service: Either the ID of a service or a + :class:`~openstack.block_storage.v3.service.Service` instance + + :returns: Updated service instance + :rtype: class: `~openstack.block_storage.v3.service.Service` + """ + service_obj = self._get_resource(_service.Service, service) + return service_obj.thaw(self) + + def freeze_service( + self, + service: str | _service.Service, + ) -> _service.Service: + """Freeze a service + + :param service: Either the ID of a service or a + :class:`~openstack.block_storage.v3.service.Service` instance + + :returns: Updated service instance + :rtype: class: `~openstack.block_storage.v3.service.Service` + """ + service_obj = self._get_resource(_service.Service, service) + return service_obj.freeze(self) + + def set_service_log_levels( + self, + *, + level: _service.Level, + binary: _service.Binary | None = None, + server: str | None = None, + prefix: str | None = None, + ) -> None: + """Set log level for services. + + :param level: The log level to set, case insensitive, accepted values + are ``INFO``, ``WARNING``, ``ERROR`` and ``DEBUG``. + :param binary: The binary name of the service. + :param server: The name of the host. + :param prefix: The prefix for the log path we are querying, for example + ``cinder.`` or ``sqlalchemy.engine.`` When not present or the empty + string is passed all log levels will be retrieved. + :returns: None. + """ + return _service.Service.set_log_levels( + self, level=level, binary=binary, server=server, prefix=prefix + ) + + def get_service_log_levels( + self, + *, + binary: _service.Binary | None = None, + server: str | None = None, + prefix: str | None = None, + ) -> ty.Generator[_service.LogLevel, None, None]: + """Get log level for services. + + :param binary: The binary name of the service. + :param server: The name of the host. + :param prefix: The prefix for the log path we are querying, for example + ``cinder.`` or ``sqlalchemy.engine.`` When not present or the empty + string is passed all log levels will be retrieved. + :returns: A generator of + :class:`~openstack.block_storage.v3.log_level.LogLevel` objects. + """ + return _service.Service.get_log_levels( + self, binary=binary, server=server, prefix=prefix + ) + + def failover_service( + self, + service: str | _service.Service, + *, + cluster: str | None = None, + backend_id: str | None = None, + ) -> _service.Service: + """Failover a service + + Only applies to replicating cinder-volume services. + + :param service: Either the ID of a service or a + :class:`~openstack.block_storage.v3.service.Service` instance + + :returns: Updated service instance + :rtype: class: `~openstack.block_storage.v3.service.Service` + """ + service_obj = self._get_resource(_service.Service, service) + return service_obj.failover( + self, cluster=cluster, backend_id=backend_id + ) + + # ====== RESOURCE FILTERS ====== + def resource_filters(self, **query): + """Retrieve a generator of resource filters + + :returns: A generator of resource filters. + """ + return self._list(_resource_filter.ResourceFilter, **query) + + # ====== EXTENSIONS ====== + def extensions(self): + """Return a generator of extensions + + :returns: A generator of extension + :rtype: :class:`~openstack.block_storage.v3.extension.Extension` + """ + return self._list(_extension.Extension) + + # ===== TRANFERS ===== + + def create_transfer(self, **attrs): + """Create a new Transfer record + + :param volume_id: The value is ID of the volume. + :param name: The value is name of the transfer + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.block_storage.v3.transfer.Transfer` + comprised of the properties on the Transfer class. + :returns: The results of Transfer creation + :rtype: :class:`~openstack.block_storage.v3.transfer.Transfer` + """ + return self._create(_transfer.Transfer, **attrs) + + def delete_transfer(self, transfer, ignore_missing=True): + """Delete a volume transfer + + :param transfer: The value can be either the ID of a transfer or a + :class:`~openstack.block_storage.v3.transfer.Transfer`` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the transfer does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent transfer. + + :returns: ``None`` + """ + self._delete( + _transfer.Transfer, + transfer, + ignore_missing=ignore_missing, + ) + + def find_transfer(self, name_or_id, ignore_missing=True): + """Find a single transfer + + :param name_or_id: The name or ID a transfer + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the volume transfer does not exist. + + :returns: One :class:`~openstack.block_storage.v3.transfer.Transfer` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + return self._find( + _transfer.Transfer, + name_or_id, + ignore_missing=ignore_missing, + ) + + def get_transfer(self, transfer): + """Get a single transfer + + :param transfer: The value can be the ID of a transfer or a + :class:`~openstack.block_storage.v3.transfer.Transfer` + instance. + + :returns: One :class:`~openstack.block_storage.v3.transfer.Transfer` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_transfer.Transfer, transfer) + + def transfers(self, *, details=True, all_projects=False, **query): + """Retrieve a generator of transfers + + :param bool details: When set to ``False`` no extended attributes + will be returned. The default, ``True``, will cause objects with + additional attributes to be returned. + :param bool all_projects: When set to ``True``, list transfers from + all projects. Admin-only by default. + :param kwargs query: Optional query parameters to be sent to limit + the transfers being returned. + + :returns: A generator of transfer objects. + """ + if all_projects: + query['all_projects'] = True + base_path = '/volume-transfers' + if not utils.supports_microversion(self, '3.55'): + base_path = '/os-volume-transfer' + if details: + base_path = utils.urljoin(base_path, 'detail') + return self._list(_transfer.Transfer, base_path=base_path, **query) + + def accept_transfer(self, transfer_id, auth_key): + """Accept a Transfer + + :param transfer_id: The value can be the ID of a transfer or a + :class:`~openstack.block_storage.v3.transfer.Transfer` + instance. + :param auth_key: The key to authenticate volume transfer. + + :returns: The results of Transfer creation + :rtype: :class:`~openstack.block_storage.v3.transfer.Transfer` + """ + transfer = self._get_resource(_transfer.Transfer, transfer_id) + return transfer.accept(self, auth_key=auth_key) + + # ====== UTILS ====== + def wait_for_status( + self, + res: resource.ResourceT, + status: str = 'available', + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + if failures is None: + failures = ['error'] + + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) + + def _get_cleanup_dependencies(self): + return {'block_storage': {'before': []}} + + def _service_cleanup( + self, + dry_run=True, + client_status_queue=None, + identified_resources=None, + filters=None, + resource_evaluation_fn=None, + skip_resources=None, + ): + # It is not possible to delete backup if there are dependent backups. + # In order to be able to do cleanup those is required to have multiple + # iterations (first clean up backups with has no dependent backups, and + # in next iterations there should be no backups with dependencies + # remaining. Logically we can have also failures, therefore it is + # required to limit amount of iterations we do (currently pick 10). In + # dry_run all those iterations are doing not what we want, therefore + # only iterate in a real cleanup mode. + if not self.should_skip_resource_cleanup("backup", skip_resources): + if dry_run: + # Just iterate and evaluate backups in dry_run mode + for obj in self.backups(details=False): + need_delete = self._service_cleanup_del_res( + self.delete_backup, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + else: + # Set initial iterations conditions + need_backup_iteration = True + max_iterations = 10 + while need_backup_iteration and max_iterations > 0: + # Reset iteration controls + need_backup_iteration = False + max_iterations -= 1 + backups = [] + # To increase success chance sort backups by age, dependent + # backups are logically younger. + for obj in self.backups( + details=True, sort_key='created_at', sort_dir='desc' + ): + if not obj.has_dependent_backups: + # If no dependent backups - go with it + need_delete = self._service_cleanup_del_res( + self.delete_backup, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + if not dry_run and need_delete: + backups.append(obj) + else: + # Otherwise we need another iteration + need_backup_iteration = True + + # Before proceeding need to wait for backups to be deleted + for obj in backups: + try: + self.wait_for_delete(obj) + except exceptions.SDKException: + # Well, did our best, still try further + pass + + if not self.should_skip_resource_cleanup("snapshot", skip_resources): + snapshots = [] + for obj in self.snapshots(details=False): + need_delete = self._service_cleanup_del_res( + self.delete_snapshot, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + if not dry_run and need_delete: + snapshots.append(obj) + + # Before deleting volumes need to wait for snapshots to be deleted + for obj in snapshots: + try: + self.wait_for_delete(obj) + except exceptions.SDKException: + # Well, did our best, still try further + pass + + if not self.should_skip_resource_cleanup("volume", skip_resources): + for obj in self.volumes(details=True): + self._service_cleanup_del_res( + self.delete_volume, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) diff --git a/openstack/block_storage/v3/attachment.py b/openstack/block_storage/v3/attachment.py new file mode 100644 index 0000000000..263bd2b9fb --- /dev/null +++ b/openstack/block_storage/v3/attachment.py @@ -0,0 +1,114 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Attachment(resource.Resource): + resource_key = "attachment" + resources_key = "attachments" + base_path = "/attachments" + + _query_mapping = resource.QueryParameters( + 'id', + 'status', + 'project_id', + 'volume_id', + 'instance_id', + all_projects='all_tenants', + ) + + # capabilities + allow_create = True + allow_delete = True + allow_commit = True + allow_list = True + allow_fetch = True + + _max_microversion = "3.71" + + # Properties + #: The ID of the attachment. + id = resource.Body("id") + #: The status of the attachment. + status = resource.Body("status") + #: The UUID of the attaching instance. + instance = resource.Body("instance") + #: The UUID of the volume which the attachment belongs to. + volume_id = resource.Body("volume_id") + #: The time when attachment is attached. + attached_at = resource.Body("attach_time") + #: The time when attachment is detached. + detached_at = resource.Body("detach_time") + #: The attach mode of attachment, read-only ('ro') or read-and-write + # ('rw'), default is 'rw'. + attach_mode = resource.Body("mode") + #: The connection info used for server to connect the volume. + connection_info = resource.Body("connection_info") + #: The connector object. + connector = resource.Body("connector") + + def create( + self, + session, + prepend_key=True, + base_path=None, + *, + resource_request_key=None, + resource_response_key=None, + microversion=None, + **params, + ): + if utils.supports_microversion(session, '3.54'): + if not self.attach_mode: + self._body.clean(only={'mode'}) + return super().create( + session, + prepend_key=prepend_key, + base_path=base_path, + resource_request_key=resource_request_key, + resource_response_key=resource_response_key, + microversion=microversion, + **params, + ) + + def complete(self, session, *, microversion=None): + """Mark the attachment as completed.""" + body = {'os-complete': self.id} + if not microversion: + microversion = self._get_microversion(session) + url = os.path.join(Attachment.base_path, self.id, 'action') + response = session.post(url, json=body, microversion=microversion) + exceptions.raise_from_response(response) + + def _prepare_request_body( + self, + patch, + prepend_key, + *, + resource_request_key=None, + ): + body = self._body.dirty + if body.get('volume_id'): + body['volume_uuid'] = body.pop('volume_id') + if body.get('instance'): + body['instance_uuid'] = body.pop('instance') + if prepend_key and self.resource_key is not None: + body = {self.resource_key: body} + return body + + +AttachmentDetail = Attachment diff --git a/openstack/block_storage/v3/availability_zone.py b/openstack/block_storage/v3/availability_zone.py new file mode 100644 index 0000000000..ce842b4f1d --- /dev/null +++ b/openstack/block_storage/v3/availability_zone.py @@ -0,0 +1,28 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class AvailabilityZone(resource.Resource): + resource_key = "" + resources_key = "availabilityZoneInfo" + base_path = "/os-availability-zone" + + # capabilities + allow_list = True + + #: Properties + #: Name of availability zone + name = resource.Body("zoneName", type=str) + #: State of availability zone, "available" is usual key + state = resource.Body("zoneState", type=dict) diff --git a/openstack/block_storage/v3/backup.py b/openstack/block_storage/v3/backup.py new file mode 100644 index 0000000000..b3db77ba6a --- /dev/null +++ b/openstack/block_storage/v3/backup.py @@ -0,0 +1,242 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import warnings + +from openstack.common import metadata +from openstack import exceptions +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +class Backup(resource.Resource, metadata.MetadataMixin): + """Volume Backup""" + + resource_key = "backup" + resources_key = "backups" + base_path = "/backups" + + # TODO(gtema): Starting from ~3.31(3.45) Cinder seems to support also fuzzy + # search (name~, status~, volume_id~). But this is not documented + # officially and seem to require microversion be set + _query_mapping = resource.QueryParameters( + "limit", + "marker", + "offset", + "project_id", + "name", + "status", + "volume_id", + "sort_key", + "sort_dir", + "sort", + all_projects="all_tenants", + ) + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_commit = True + allow_list = True + + #: Properties + #: backup availability zone + availability_zone = resource.Body("availability_zone") + #: The container backup in + container = resource.Body("container") + #: The date and time when the resource was created. + created_at = resource.Body("created_at") + #: data timestamp + #: The time when the data on the volume was first saved. + #: If it is a backup from volume, it will be the same as created_at + #: for a backup. If it is a backup from a snapshot, + #: it will be the same as created_at for the snapshot. + data_timestamp = resource.Body('data_timestamp') + #: backup description + description = resource.Body("description") + #: The UUID of the encryption key. Only included for encrypted volumes. + encryption_key_id = resource.Body("encryption_key_id") + #: Backup fail reason + fail_reason = resource.Body("fail_reason") + #: Force backup + force = resource.Body("force", type=bool) + #: has_dependent_backups + #: If this value is true, there are other backups depending on this backup. + has_dependent_backups = resource.Body('has_dependent_backups', type=bool) + #: Indicates whether the backup mode is incremental. + #: If this value is true, the backup mode is incremental. + #: If this value is false, the backup mode is full. + is_incremental = resource.Body("is_incremental", type=bool) + #: A list of links associated with this volume. *Type: list* + links = resource.Body("links", type=list) + #: The backup metadata. New in version 3.43 + metadata = resource.Body('metadata', type=dict) + #: backup name + name = resource.Body("name") + #: backup object count + object_count = resource.Body("object_count", type=int) + #: The UUID of the owning project. + #: New in version 3.18 + project_id = resource.Body('os-backup-project-attr:project_id') + #: The size of the volume, in gibibytes (GiB). + size = resource.Body("size", type=int) + #: The UUID of the source volume snapshot. + snapshot_id = resource.Body("snapshot_id") + #: backup status + #: values: creating, available, deleting, error, restoring, error_restoring + status = resource.Body("status") + #: The date and time when the resource was updated. + updated_at = resource.Body("updated_at") + #: The UUID of the project owner. New in 3.56 + user_id = resource.Body('user_id') + #: The UUID of the volume. + volume_id = resource.Body("volume_id") + #: The name of the volume. + volume_name = resource.Body("volume_name") + + _max_microversion = "3.64" + + def create(self, session, prepend_key=True, base_path=None, **params): + """Create a remote resource based on this instance. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param prepend_key: A boolean indicating whether the resource_key + should be prepended in a resource creation + request. Default to True. + :param str base_path: Base part of the URI for creating resources, if + different from + :data:`~openstack.resource.Resource.base_path`. + :param dict params: Additional params to pass. + :return: This :class:`Resource` instance. + :raises: :exc:`~openstack.exceptions.MethodNotSupported` if + :data:`Resource.allow_create` is not set to ``True``. + """ + if not self.allow_create: + raise exceptions.MethodNotSupported(self, "create") + + session = self._get_session(session) + microversion = self._get_microversion(session) + requires_id = ( + self.create_requires_id + if self.create_requires_id is not None + else self.create_method == 'PUT' + ) + + if self.create_exclude_id_from_body: + self._body._dirty.discard("id") + + if self.create_method == 'POST': + request = self._prepare_request( + requires_id=requires_id, + prepend_key=prepend_key, + base_path=base_path, + ) + # NOTE(gtema) this is a funny example of when attribute + # is called "incremental" on create, "is_incremental" on get + # and use of "alias" or "aka" is not working for such conflict, + # since our preferred attr name is exactly "is_incremental" + body = request.body + if 'is_incremental' in body['backup']: + body['backup']['incremental'] = body['backup'].pop( + 'is_incremental' + ) + response = session.post( + request.url, + json=request.body, + headers=request.headers, + microversion=microversion, + params=params, + ) + else: + # Just for safety of the implementation (since PUT removed) + raise exceptions.ResourceFailure( + f"Invalid create method: {self.create_method}" + ) + + has_body = ( + self.has_body + if self.create_returns_body is None + else self.create_returns_body + ) + self.microversion = microversion + self._translate_response(response, has_body=has_body) + # direct comparision to False since we need to rule out None + if self.has_body and self.create_returns_body is False: + # fetch the body if it's required but not returned by create + return self.fetch(session) + return self + + def _action(self, session, body, microversion=None): + """Preform backup actions given the message body.""" + url = utils.urljoin(self.base_path, self.id, 'action') + resp = session.post( + url, json=body, microversion=self._max_microversion + ) + exceptions.raise_from_response(resp) + return resp + + def export(self, session): + """Export the current backup + + :param session: openstack session + :return: The backup export record fields + """ + url = utils.urljoin(self.base_path, self.id, "export_record") + resp = session.get(url) + exceptions.raise_from_response(resp) + return resp + + def restore(self, session, volume_id=None, name=None): + """Restore current backup to volume + + :param session: openstack session + :param volume_id: The ID of the volume to restore the backup to. + :param name: The name for new volume creation to restore. + :return: Updated backup instance + """ + url = utils.urljoin(self.base_path, self.id, "restore") + body: dict[str, dict] = {'restore': {}} + if volume_id: + body['restore']['volume_id'] = volume_id + if name: + body['restore']['name'] = name + if not (volume_id or name): + raise exceptions.SDKException( + 'Either of `name` or `volume_id` must be specified.' + ) + response = session.post(url, json=body) + self._translate_response(response, resource_response_key='restore') + return self + + def force_delete(self, session): + """Force backup deletion""" + body = {'os-force_delete': None} + self._action(session, body) + + def reset_status(self, session, status): + """Reset the status of the backup""" + body = {'os-reset_status': {'status': status}} + self._action(session, body) + + def reset(self, session, status): + warnings.warn( + "reset is a deprecated alias for reset_status and will be " + "removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + self.reset_status(session, status) + + +BackupDetail = Backup diff --git a/openstack/block_storage/v3/block_storage_summary.py b/openstack/block_storage/v3/block_storage_summary.py new file mode 100644 index 0000000000..410424fe4c --- /dev/null +++ b/openstack/block_storage/v3/block_storage_summary.py @@ -0,0 +1,30 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class BlockStorageSummary(resource.Resource): + base_path = "/volumes/summary" + + # capabilities + allow_fetch = True + + # Properties + #: Total size of all the volumes + total_size = resource.Body("total_size") + #: Total count of all the volumes + total_count = resource.Body("total_count") + #: Metadata of all the volumes + metadata = resource.Body("metadata") + + _max_microversion = "3.36" diff --git a/openstack/block_storage/v3/capabilities.py b/openstack/block_storage/v3/capabilities.py new file mode 100644 index 0000000000..03d958d8b5 --- /dev/null +++ b/openstack/block_storage/v3/capabilities.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Capabilities(resource.Resource): + base_path = "/capabilities" + + # Capabilities + allow_fetch = True + + #: Properties + #: The capabilities description + description = resource.Body("description") + #: The name of volume backend capabilities. + display_name = resource.Body("display_name") + #: The driver version. + driver_version = resource.Body("driver_version") + #: The storage namespace, such as OS::Storage::Capabilities::foo. + namespace = resource.Body("namespace") + #: The name of the storage pool. + pool_name = resource.Body("pool_name") + #: The backend volume capabilites list, which consists of cinder + #: standard capabilities and vendor unique properties. + properties = resource.Body("properties", type=dict) + #: A list of volume backends used to replicate volumes on this backend. + replication_targets = resource.Body("replication_targets", type=list) + #: The storage backend for the backend volume. + storage_protocol = resource.Body("storage_protocol") + #: The name of the vendor. + vendor_name = resource.Body("vendor_name") + #: The volume type access. + visibility = resource.Body("visibility") + #: The name of the back-end volume. + volume_backend_name = resource.Body("volume_backend_name") diff --git a/openstack/block_storage/v3/default_type.py b/openstack/block_storage/v3/default_type.py new file mode 100644 index 0000000000..c2dc5a36f0 --- /dev/null +++ b/openstack/block_storage/v3/default_type.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class DefaultType(resource.Resource): + resource_key = "default_type" + resources_key = "default_types" + base_path = "/default-types" + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_list = True + + # Create and update use the same PUT API + create_requires_id = True + create_method = 'PUT' + + _max_microversion = "3.67" + + # Properties + #: The UUID of the project. + project_id = resource.Body("project_id") + #: The UUID for an existing volume type. + volume_type_id = resource.Body("volume_type_id") + + def _prepare_request_body( + self, + patch, + prepend_key, + *, + resource_request_key=None, + ): + body = self._body.dirty + # Set operation expects volume_type instead of + # volume_type_id + if body.get('volume_type_id'): + body['volume_type'] = body.pop('volume_type_id') + # When setting a default type, we want the ID to be + # appended in URL but not in the request body + if body.get('id'): + body.pop('id') + body = {self.resource_key: body} + return body diff --git a/openstack/block_storage/v3/extension.py b/openstack/block_storage/v3/extension.py new file mode 100644 index 0000000000..7f95a5aa43 --- /dev/null +++ b/openstack/block_storage/v3/extension.py @@ -0,0 +1,34 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Extension(resource.Resource): + resources_key = "extensions" + base_path = "/extensions" + + # Capabilities + allow_list = True + + #: Properties + #: The alias for the extension. + alias = resource.Body('alias', type=str) + #: The extension description. + description = resource.Body('description', type=str) + #: Links pertaining to this extension. + links = resource.Body('links', type=list) + #: The name of this extension. + name = resource.Body('name') + #: The date and time when the resource was updated. + #: The date and time stamp format is ISO 8601. + updated_at = resource.Body('updated', type=str) diff --git a/openstack/block_storage/v3/group.py b/openstack/block_storage/v3/group.py new file mode 100644 index 0000000000..f4973d78b6 --- /dev/null +++ b/openstack/block_storage/v3/group.py @@ -0,0 +1,167 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import warnings + +from openstack import exceptions +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +class Group(resource.Resource): + resource_key = "group" + resources_key = "groups" + base_path = "/groups" + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_commit = True + allow_list = True + + _query_mapping = resource.QueryParameters( + "limit", + "marker", + "offset", + "sort_dir", + "sort_key", + "sort", + all_projects="all_tenants", + ) + + availability_zone = resource.Body("availability_zone") + created_at = resource.Body("created_at") + description = resource.Body("description") + group_snapshot_id = resource.Body("group_snapshot_id") + group_type = resource.Body("group_type") + project_id = resource.Body("project_id") + replication_targets = resource.Body("replication_targets", type=list) + replication_status = resource.Body("replication_status") + source_group_id = resource.Body("source_group_id") + status = resource.Body("status") + volumes = resource.Body("volumes", type=list) + volume_types = resource.Body("volume_types", type=list) + + _max_microversion = "3.38" + + def _action(self, session, body): + """Preform group actions given the message body.""" + session = self._get_session(session) + microversion = self._get_microversion(session) + url = utils.urljoin(self.base_path, self.id, 'action') + response = session.post(url, json=body, microversion=microversion) + exceptions.raise_from_response(response) + return response + + def delete(self, session, *args, delete_volumes=False, **kwargs): + """Delete a group.""" + body = {'delete': {'delete-volumes': delete_volumes}} + self._action(session, body) + + def fetch_replication_targets(self, session): + """Fetch replication targets for the group. + + :param session: The session to use for making this request. + :return: This group with the ``replication_targets`` field populated. + """ + body = {'list_replication_targets': None} + response = self._action(session, body) + self._body.attributes.update( + {'replication_targets': response.json()['replication_targets']} + ) + return self + + def enable_replication(self, session): + """Enable replication for the group. + + :param session: The session to use for making this request. + """ + body = {'enable_replication': None} + self._action(session, body) + + def disable_replication(self, session): + """Disable replication for the group. + + :param session: The session to use for making this request. + """ + body = {'disable_replication': None} + self._action(session, body) + + def failover_replication( + self, + session, + *, + allowed_attached_volume=False, + secondary_backend_id=None, + ): + """Failover replication for the group. + + :param session: The session to use for making this request. + :param allowed_attached_volume: Whether to allow attached volumes in + the group. + :param secondary_backend_id: The secondary backend ID. + :returns: None + """ + body = { + 'modify_body_for_action': { + 'allow_attached_volume': allowed_attached_volume, + 'secondary_backend_id': secondary_backend_id, + }, + } + self._action(session, body) + + def reset_status(self, session, status): + """Resets the status for a group. + + :param session: The session to use for making this request. + :param status: The status for the group. + """ + body = {'reset_status': {'status': status}} + self._action(session, body) + + def reset(self, session, status): + warnings.warn( + "reset is a deprecated alias for reset_status and will be " + "removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + self.reset_status(session, status) + + @classmethod + def create_from_source( + cls, + session, + group_snapshot_id, + source_group_id, + name=None, + description=None, + ): + """Creates a new group from source.""" + session = cls._get_session(session) + microversion = cls._get_microversion(session) + url = utils.urljoin(cls.base_path, 'action') + body = { + 'create-from-src': { + 'name': name, + 'description': description, + 'group_snapshot_id': group_snapshot_id, + 'source_group_id': source_group_id, + } + } + response = session.post(url, json=body, microversion=microversion) + exceptions.raise_from_response(response) + + group = Group() + group._translate_response(response) + return group diff --git a/openstack/block_storage/v3/group_snapshot.py b/openstack/block_storage/v3/group_snapshot.py new file mode 100644 index 0000000000..ab745287d8 --- /dev/null +++ b/openstack/block_storage/v3/group_snapshot.py @@ -0,0 +1,97 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import warnings + +from openstack import exceptions +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +class GroupSnapshot(resource.Resource): + resource_key = "group_snapshot" + resources_key = "group_snapshots" + base_path = "/group_snapshots" + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_commit = False + allow_list = True + + _query_mapping = resource.QueryParameters( + "limit", + "marker", + "offset", + "sort_dir", + "sort_key", + "sort", + all_projects="all_tenants", + ) + + #: Properties + #: The date and time when the resource was created. + created_at = resource.Body("created_at") + #: The group snapshot description. + description = resource.Body("description") + #: The UUID of the source group. + group_id = resource.Body("group_id") + #: The group type ID. + group_type_id = resource.Body("group_type_id") + #: The ID of the group snapshot. + id = resource.Body("id") + #: The group snapshot name. + name = resource.Body("name") + #: The UUID of the volume group snapshot project. + project_id = resource.Body("project_id") + #: The status of the generic group snapshot. + status = resource.Body("status") + + # Pagination support was added in microversion 3.29 + _max_microversion = '3.29' + + def _action(self, session, body, microversion=None): + """Preform aggregate actions given the message body.""" + url = utils.urljoin(self.base_path, self.id, 'action') + headers = {'Accept': ''} + # TODO(stephenfin): This logic belongs in openstack.resource I suspect + if microversion is None: + if session.default_microversion: + microversion = session.default_microversion + else: + microversion = utils.maximum_supported_microversion( + session, + self._max_microversion, + ) + response = session.post( + url, + json=body, + headers=headers, + microversion=microversion, + ) + exceptions.raise_from_response(response) + return response + + def reset_status(self, session, state): + """Resets the status for a group snapshot.""" + body = {'reset_status': {'status': state}} + return self._action(session, body) + + def reset_state(self, session, status): + warnings.warn( + "reset_state is a deprecated alias for reset_status and will be " + "removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + self.reset_status(session, status) diff --git a/openstack/block_storage/v3/group_type.py b/openstack/block_storage/v3/group_type.py new file mode 100644 index 0000000000..eb09cf02aa --- /dev/null +++ b/openstack/block_storage/v3/group_type.py @@ -0,0 +1,124 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class GroupType(resource.Resource): + resource_key = "group_type" + resources_key = "group_types" + base_path = "/group_types" + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_commit = True + allow_list = True + + _max_microversion = "3.11" + + #: Properties + #: The group type description. + description = resource.Body("description") + #: Contains the specifications for a group type. + group_specs = resource.Body("group_specs", type=dict, default={}) + #: Whether the group type is publicly visible. + is_public = resource.Body("is_public", type=bool) + + def fetch_group_specs(self, session): + """Fetch group_specs of the group type. + + These are returned by default if the user has suitable permissions + (i.e. you're an admin) but by default you also need the same + permissions to access this API. That means this function is kind of + useless. However, that is how the API was designed and it is + theoretically possible that people will have modified their policy to + allow this but not the other so we provide this anyway. + + :param session: The session to use for making this request. + :returns: An updated version of this object. + """ + url = utils.urljoin(GroupType.base_path, self.id, 'group_specs') + microversion = self._get_microversion(session) + response = session.get(url, microversion=microversion) + exceptions.raise_from_response(response) + specs = response.json().get('group_specs', {}) + self._update(group_specs=specs) + return self + + def create_group_specs(self, session, specs): + """Creates group specs for the group type. + + This will override whatever specs are already present on the group + type. + + :param session: The session to use for making this request. + :param specs: A dict of group specs to set on the group type. + :returns: An updated version of this object. + """ + url = utils.urljoin(GroupType.base_path, self.id, 'group_specs') + microversion = self._get_microversion(session) + response = session.post( + url, + json={'group_specs': specs}, + microversion=microversion, + ) + exceptions.raise_from_response(response) + specs = response.json().get('group_specs', {}) + self._update(group_specs=specs) + return self + + def get_group_specs_property(self, session, prop): + """Retrieve a group spec property of the group type. + + :param session: The session to use for making this request. + :param prop: The name of the group spec property to update. + :returns: The value of the group spec property. + """ + url = utils.urljoin(GroupType.base_path, self.id, 'group_specs', prop) + microversion = self._get_microversion(session) + response = session.get(url, microversion=microversion) + exceptions.raise_from_response(response) + val = response.json().get(prop) + return val + + def update_group_specs_property(self, session, prop, val): + """Update a group spec property of the group type. + + :param session: The session to use for making this request. + :param prop: The name of the group spec property to update. + :param val: The value to set for the group spec property. + :returns: The updated value of the group spec property. + """ + url = utils.urljoin(GroupType.base_path, self.id, 'group_specs', prop) + microversion = self._get_microversion(session) + response = session.put( + url, json={prop: val}, microversion=microversion + ) + exceptions.raise_from_response(response) + val = response.json().get(prop) + return val + + def delete_group_specs_property(self, session, prop): + """Delete a group spec property from the group type. + + :param session: The session to use for making this request. + :param prop: The name of the group spec property to delete. + :returns: None + """ + url = utils.urljoin(GroupType.base_path, self.id, 'group_specs', prop) + microversion = self._get_microversion(session) + response = session.delete(url, microversion=microversion) + exceptions.raise_from_response(response) diff --git a/openstack/block_storage/v3/limits.py b/openstack/block_storage/v3/limits.py new file mode 100644 index 0000000000..71717b4f3f --- /dev/null +++ b/openstack/block_storage/v3/limits.py @@ -0,0 +1,92 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class AbsoluteLimit(resource.Resource): + # Properties + #: The maximum total amount of backups, in gibibytes (GiB). + max_total_backup_gigabytes = resource.Body( + "maxTotalBackupGigabytes", type=int + ) + #: The maximum number of backups. + max_total_backups = resource.Body("maxTotalBackups", type=int) + #: The maximum number of snapshots. + max_total_snapshots = resource.Body("maxTotalSnapshots", type=int) + #: The maximum total amount of volumes, in gibibytes (GiB). + max_total_volume_gigabytes = resource.Body( + "maxTotalVolumeGigabytes", type=int + ) + #: The maximum number of volumes. + max_total_volumes = resource.Body("maxTotalVolumes", type=int) + #: The total number of backups gibibytes (GiB) used. + total_backup_gigabytes_used = resource.Body( + "totalBackupGigabytesUsed", type=int + ) + #: The total number of backups used. + total_backups_used = resource.Body("totalBackupsUsed", type=int) + #: The total number of gibibytes (GiB) used. + total_gigabytes_used = resource.Body("totalGigabytesUsed", type=int) + #: The total number of snapshots used. + total_snapshots_used = resource.Body("totalSnapshotsUsed", type=int) + #: The total number of volumes used. + total_volumes_used = resource.Body("totalVolumesUsed", type=int) + + +class RateLimit(resource.Resource): + # Properties + #: Rate limits next availabe time. + next_available = resource.Body("next-available") + #: Integer for rate limits remaining. + remaining = resource.Body("remaining", type=int) + #: Unit of measurement for the value parameter. + unit = resource.Body("unit") + #: Integer number of requests which can be made. + value = resource.Body("value", type=int) + #: An HTTP verb (POST, PUT, etc.). + verb = resource.Body("verb") + + +class RateLimits(resource.Resource): + # Properties + #: A list of the specific limits that apply to the ``regex`` and ``uri``. + limits = resource.Body("limit", type=list, list_type=RateLimit) + #: A regex representing which routes this rate limit applies to. + regex = resource.Body("regex") + #: A URI representing which routes this rate limit applies to. + uri = resource.Body("uri") + + +class Limits(resource.Resource): + resource_key = "limits" + base_path = "/limits" + + _max_microversion = "3.39" + + _query_mapping = resource.QueryParameters( + "project_id", + ) + + # capabilities + allow_fetch = True + + # Properties + #: An absolute limits object. + absolute = resource.Body("absolute", type=AbsoluteLimit) + #: Rate-limit volume copy bandwidth, used to mitigate + #: slow down of data access from the instances. + rate = resource.Body("rate", type=list, list_type=RateLimits) + + +# Legacy alias +Limit = Limits diff --git a/openstack/block_storage/v3/quota_class_set.py b/openstack/block_storage/v3/quota_class_set.py new file mode 100644 index 0000000000..440fa2bd79 --- /dev/null +++ b/openstack/block_storage/v3/quota_class_set.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class QuotaClassSet(resource.Resource): + resource_key = 'quota_class_set' + base_path = '/os-quota-class-sets' + + # Capabilities + allow_fetch = True + allow_commit = True + + # Properties + #: The size (GiB) of backups that are allowed for each project. + backup_gigabytes = resource.Body('backup_gigabytes', type=int) + #: The number of backups that are allowed for each project. + backups = resource.Body('backups', type=int) + #: The size (GiB) of volumes and snapshots that are allowed for each + #: project. + gigabytes = resource.Body('gigabytes', type=int) + #: The number of groups that are allowed for each project. + groups = resource.Body('groups', type=int) + #: The size (GB) of volumes in request that are allowed for each volume. + per_volume_gigabytes = resource.Body('per_volume_gigabytes', type=int) + #: The number of snapshots that are allowed for each project. + snapshots = resource.Body('snapshots', type=int) + #: The number of volumes that are allowed for each project. + volumes = resource.Body('volumes', type=int) diff --git a/openstack/block_storage/v3/quota_set.py b/openstack/block_storage/v3/quota_set.py new file mode 100644 index 0000000000..568e58c406 --- /dev/null +++ b/openstack/block_storage/v3/quota_set.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.common import quota_set +from openstack import resource + + +class QuotaSet(quota_set.QuotaSet): + #: Properties + #: The size (GB) of backups that are allowed for each project. + backup_gigabytes = resource.Body('backup_gigabytes', type=int) + #: The number of backups that are allowed for each project. + backups = resource.Body('backups', type=int) + #: The size (GB) of volumes and snapshots that are allowed for each + #: project. + gigabytes = resource.Body('gigabytes', type=int) + #: The number of groups that are allowed for each project. + groups = resource.Body('groups', type=int) + #: The size (GB) of volumes in request that are allowed for each volume. + per_volume_gigabytes = resource.Body('per_volume_gigabytes', type=int) + #: The number of snapshots that are allowed for each project. + snapshots = resource.Body('snapshots', type=int) + #: The number of volumes that are allowed for each project. + volumes = resource.Body('volumes', type=int) diff --git a/openstack/block_storage/v3/resource_filter.py b/openstack/block_storage/v3/resource_filter.py new file mode 100644 index 0000000000..06342ba02e --- /dev/null +++ b/openstack/block_storage/v3/resource_filter.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ResourceFilter(resource.Resource): + """Resource Filter""" + + resources_key = "resource_filters" + base_path = "/resource_filters" + + _query_mapping = resource.QueryParameters( + 'resource', + include_pagination_defaults=False, + ) + + # Capabilities + allow_list = True + + # resource_filters introduced in 3.33 + _max_microversion = '3.33' + + #: Properties + #: The list of filters that are applicable to the specified resource. + filters = resource.Body('filters', type=list) + #: The resource that the filters will be applied to. + resource = resource.Body('resource', type=str) diff --git a/openstack/block_storage/v3/service.py b/openstack/block_storage/v3/service.py new file mode 100644 index 0000000000..6141d3c062 --- /dev/null +++ b/openstack/block_storage/v3/service.py @@ -0,0 +1,331 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import enum +import typing as ty + +from keystoneauth1 import adapter +import typing_extensions as ty_ext + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Level(enum.Enum): + ERROR = 'ERROR' + WARNING = 'WARNING' + INFO = 'INFO' + DEBUG = 'DEBUG' + + +class Binary(enum.Enum): + ANY = '*' + API = 'cinder-api' + VOLUME = 'cinder-volume' + SCHEDULER = 'cinder-scheduler' + BACKUP = 'cinder-backup' + + +class LogLevel(resource.Resource): + # Properties + #: The binary name of the service. + binary = resource.Body('binary') + # TODO(stephenfin): Do we need these? They are request-only. + # #: The name of the host. + # server = resource.Body('server') + # #: he prefix for the log path we are querying, for example ``cinder.`` or + # #: ``sqlalchemy.engine.`` When not present or the empty string is passed + # #: all log levels will be retrieved. + # prefix = resource.Body('prefix') + #: The name of the host. + host = resource.Body('host') + #: The current log level that queried. + levels = resource.Body('levels', type=dict) + + +class Service(resource.Resource): + resources_key = 'services' + base_path = '/os-services' + + # capabilities + allow_list = True + + _query_mapping = resource.QueryParameters( + 'binary', + 'host', + ) + + # Properties + #: The ID of active storage backend (cinder-volume services only) + active_backend_id = resource.Body('active_backend_id') + #: The availability zone of service + availability_zone = resource.Body('zone') + #: The state of storage backend (cinder-volume services only) (since 3.49) + backend_state = resource.Body('backend_state') + #: Binary name of service + binary = resource.Body('binary') + #: The cluster name (since 3.7) + cluster = resource.Body('cluster') + #: Disabled reason of service + disabled_reason = resource.Body('disabled_reason') + #: The name of the host where service runs + host = resource.Body('host') + # Whether the host is frozen or not (cinder-volume services only) + is_frozen = resource.Body('frozen') + #: Service name + name = resource.Body('name', alias='binary') + #: The volume service replication status (cinder-volume services only) + replication_status = resource.Body('replication_status') + #: State of service + state = resource.Body('state') + #: Status of service + status = resource.Body('status') + #: The date and time when the resource was updated + updated_at = resource.Body('updated_at') + + # 3.32 introduced the 'set-log' action + _max_microversion = '3.32' + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[True] = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[False], + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self: ... + + # excuse the duplication here: it's mypy's fault + # https://github.com/python/mypy/issues/14764 + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: + # No direct request possible, thus go directly to list + if list_base_path: + params['base_path'] = list_base_path + + # all_projects is a special case that is used by multiple services. We + # handle it here since it doesn't make sense to pass it to the .fetch + # call above + if all_projects is not None: + params['all_projects'] = all_projects + + data = cls.list(session, **params) + + result = None + for maybe_result in data: + # Since ID might be both int and str force cast + id_value = str(cls._get_id(maybe_result)) + name_value = maybe_result.name + + if str(name_or_id) in (id_value, name_value): + if 'host' in params and maybe_result['host'] != params['host']: + continue + # Only allow one resource to be found. If we already + # found a match, raise an exception to show it. + if result is None: + result = maybe_result + else: + msg = "More than one %s exists with the name '%s'." + msg = msg % (cls.__name__, name_or_id) + raise exceptions.DuplicateResource(msg) + + if result is not None: + return result + + if ignore_missing: + return None + raise exceptions.NotFoundException( + f"No {cls.__name__} found for {name_or_id}" + ) + + def commit(self, session, prepend_key=False, *args, **kwargs): + # we need to set prepend_key to false + return super().commit( + session, + prepend_key, + *args, + **kwargs, + ) + + def _action(self, session, action, body, microversion=None): + if not microversion: + microversion = session.default_microversion + url = utils.urljoin(Service.base_path, action) + response = session.put(url, json=body, microversion=microversion) + self._translate_response(response) + return self + + def enable(self, session): + """Enable service.""" + body = {'binary': self.binary, 'host': self.host} + return self._action(session, 'enable', body) + + def disable(self, session, *, reason=None): + """Disable service.""" + body = {'binary': self.binary, 'host': self.host} + + if not reason: + action = 'disable' + else: + action = 'disable-log-reason' + body['disabled_reason'] = reason + + return self._action(session, action, body) + + def thaw(self, session): + body = {'host': self.host} + return self._action(session, 'thaw', body) + + def freeze(self, session): + body = {'host': self.host} + return self._action(session, 'freeze', body) + + @classmethod + def set_log_levels( + cls, + session: adapter.Adapter, + *, + level: Level, + binary: Binary | None = None, + server: str | None = None, + prefix: str | None = None, + ) -> None: + """Set log level for services. + + :param session: The session to use for making this request. + :param level: The log level to set, case insensitive, accepted values + are ``INFO``, ``WARNING``, ``ERROR`` and ``DEBUG``. + :param binary: The binary name of the service. + :param server: The name of the host. + :param prefix: The prefix for the log path we are querying, for example + ``cinder.`` or ``sqlalchemy.engine.`` When not present or the empty + string is passed all log levels will be retrieved. + :returns: None. + """ + microversion = cls._assert_microversion_for( + session, '3.32', error_message="Cannot use set-log action" + ) + body = { + 'level': level, + 'binary': binary or '', # cinder insists on strings + 'server': server, + 'prefix': prefix, + } + url = utils.urljoin(cls.base_path, 'set-log') + response = session.put(url, json=body, microversion=microversion) + exceptions.raise_from_response(response) + + @classmethod + def get_log_levels( + cls, + session: adapter.Adapter, + *, + binary: Binary | None = None, + server: str | None = None, + prefix: str | None = None, + ) -> ty.Generator[LogLevel, None, None]: + """Get log level for services. + + :param session: The session to use for making this request. + :param binary: The binary name of the service. + :param server: The name of the host. + :param prefix: The prefix for the log path we are querying, for example + ``cinder.`` or ``sqlalchemy.engine.`` When not present or the empty + string is passed all log levels will be retrieved. + :returns: A generator of + :class:`~openstack.block_storage.v3.service.LogLevel` objects. + """ + microversion = cls._assert_microversion_for( + session, '3.32', error_message="Cannot use get-log action" + ) + body = { + 'binary': binary or '', # cinder insists on strings + 'server': server, + 'prefix': prefix, + } + url = utils.urljoin(cls.base_path, 'get-log') + response = session.put(url, json=body, microversion=microversion) + exceptions.raise_from_response(response) + + for entry in response.json()['log_levels']: + yield LogLevel( + binary=entry['binary'], + host=entry['host'], + levels=entry['levels'], + ) + + def failover( + self, + session, + *, + cluster=None, + backend_id=None, + ): + """Failover a service + + Only applies to replicating cinder-volume services. + """ + body = {'host': self.host} + if cluster: + body['cluster'] = cluster + if backend_id: + body['backend_id'] = backend_id + + action = 'failover_host' + if utils.supports_microversion(session, '3.26'): + action = 'failover' + + return self._action(session, action, body) diff --git a/openstack/block_storage/v3/snapshot.py b/openstack/block_storage/v3/snapshot.py new file mode 100644 index 0000000000..00a6b38c13 --- /dev/null +++ b/openstack/block_storage/v3/snapshot.py @@ -0,0 +1,153 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import warnings + +from openstack.common import metadata +from openstack import exceptions +from openstack import format +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +class Snapshot(resource.Resource, metadata.MetadataMixin): + resource_key = "snapshot" + resources_key = "snapshots" + base_path = "/snapshots" + + _query_mapping = resource.QueryParameters( + "name", + "status", + "volume_id", + "project_id", + "limit", + "marker", + "offset", + "sort_dir", + "sort_key", + "sort", + all_projects="all_tenants", + ) + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_commit = True + allow_list = True + + # Properties + #: Whether this resource consumes quota or not. Resources that not + #: counted for quota usage are usually temporary internal resources + #: created to perform an operation. + #: This is included from microversion 3.65 + consumes_quota = resource.Body("consumes_quota") + #: The timestamp of this snapshot creation. + created_at = resource.Body("created_at") + #: Description of snapshot. Default is None. + description = resource.Body("description") + #: The ID of the group snapshot. + #: This is included from microversion 3.14 + group_snapshot_id = resource.Body("group_snapshot_id") + #: Indicate whether to create snapshot, even if the volume is attached. + #: Default is ``False``. *Type: bool* + is_forced = resource.Body("force", type=format.BoolStr) + #: The percentage of completeness the snapshot is currently at. + progress = resource.Body("os-extended-snapshot-attributes:progress") + #: The project ID this snapshot is associated with. + project_id = resource.Body("os-extended-snapshot-attributes:project_id") + #: The size of the volume, in gibibytes (GiB). + size = resource.Body("size", type=int) + #: The current status of this snapshot. Potential values are creating, + #: available, deleting, error, and error_deleting. + status = resource.Body("status") + #: The date and time when the resource was updated. + updated_at = resource.Body("updated_at") + #: The UUID of the user. + #: This is included from microversion 3.41 + user_id = resource.Body("user_id") + #: The ID of the volume this snapshot was taken of. + volume_id = resource.Body("volume_id") + + _max_microversion = '3.65' + + def _action(self, session, body, microversion=None): + """Preform backup actions given the message body.""" + url = utils.urljoin(self.base_path, self.id, 'action') + resp = session.post( + url, json=body, microversion=self._max_microversion + ) + exceptions.raise_from_response(resp) + return resp + + def force_delete(self, session): + """Force snapshot deletion.""" + body = {'os-force_delete': None} + self._action(session, body) + + def reset_status(self, session, status): + """Reset the status of the snapshot.""" + body = {'os-reset_status': {'status': status}} + self._action(session, body) + + def reset(self, session, status): + warnings.warn( + "reset is a deprecated alias for reset_status and will be " + "removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + self.reset_status(session, status) + + def set_status(self, session, status, progress=None): + """Update fields related to the status of a snapshot.""" + body = {'os-update_snapshot_status': {'status': status}} + if progress is not None: + body['os-update_snapshot_status']['progress'] = progress + self._action(session, body) + + @classmethod + def manage( + cls, + session, + volume_id, + ref, + name=None, + description=None, + metadata=None, + ): + """Manage a snapshot under block storage provisioning.""" + url = '/manageable_snapshots' + if not utils.supports_microversion(session, '3.8'): + url = '/os-snapshot-manage' + body = { + 'snapshot': { + 'volume_id': volume_id, + 'ref': ref, + 'name': name, + 'description': description, + 'metadata': metadata, + } + } + resp = session.post(url, json=body, microversion=cls._max_microversion) + exceptions.raise_from_response(resp) + snapshot = Snapshot() + snapshot._translate_response(resp) + return snapshot + + def unmanage(self, session): + """Unmanage a snapshot from block storage provisioning.""" + body = {'os-unmanage': None} + self._action(session, body) + + +SnapshotDetail = Snapshot diff --git a/openstack/block_storage/v3/stats.py b/openstack/block_storage/v3/stats.py new file mode 100644 index 0000000000..8f9a442244 --- /dev/null +++ b/openstack/block_storage/v3/stats.py @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Pools(resource.Resource): + resource_key = "" + resources_key = "pools" + base_path = "/scheduler-stats/get_pools?detail=True" + + # capabilities + allow_fetch = False + allow_create = False + allow_delete = False + allow_list = True + + # Properties + #: The Cinder name for the pool + name = resource.Body("name") + #: returns a dict with information about the pool + capabilities = resource.Body("capabilities", type=dict) diff --git a/openstack/block_storage/v3/transfer.py b/openstack/block_storage/v3/transfer.py new file mode 100644 index 0000000000..660581f275 --- /dev/null +++ b/openstack/block_storage/v3/transfer.py @@ -0,0 +1,202 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Transfer(resource.Resource): + resource_key = "transfer" + resources_key = "transfers" + base_path = "/volume-transfers" + + # capabilities + allow_create = True + allow_delete = True + allow_fetch = True + allow_list = True + + # Properties + #: UUID of the transfer. + id = resource.Body("id") + #: The date and time when the resource was created. + created_at = resource.Body("created_at") + #: Name of the volume to transfer. + name = resource.Body("name") + #: ID of the volume to transfer. + volume_id = resource.Body("volume_id") + #: Auth key for the transfer. + auth_key = resource.Body("auth_key") + #: A list of links associated with this volume. *Type: list* + links = resource.Body("links") + #: Whether to transfer snapshots or not + no_snapshots = resource.Body("no_snapshots") + + _max_microversion = "3.55" + + def create( + self, + session, + prepend_key=True, + base_path=None, + *, + resource_request_key=None, + resource_response_key=None, + microversion=None, + **params, + ): + """Create a volume transfer. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param prepend_key: A boolean indicating whether the resource_key + should be prepended in a resource creation request. Default to + True. + :param str base_path: Base part of the URI for creating resources, if + different from :data:`~openstack.resource.Resource.base_path`. + :param str resource_request_key: Overrides the usage of + self.resource_key when prepending a key to the request body. + Ignored if `prepend_key` is false. + :param str resource_response_key: Overrides the usage of + self.resource_key when processing response bodies. + Ignored if `prepend_key` is false. + :param str microversion: API version to override the negotiated one. + :param dict params: Additional params to pass. + :return: This :class:`Resource` instance. + :raises: :exc:`~openstack.exceptions.MethodNotSupported` if + :data:`Resource.allow_create` is not set to ``True``. + """ + + # With MV 3.55 we introduced new API for volume transfer + # (/volume-transfers). Prior to that (MV < 3.55), we use + # the old API (/os-volume-transfer) + if not utils.supports_microversion(session, '3.55'): + base_path = '/os-volume-transfer' + # With MV 3.55, we also introduce the ability to transfer + # snapshot along with the volume. If MV < 3.55, we should + # not send 'no_snapshots' parameter in the request. + if 'no_snapshots' in params: + params.pop('no_snapshots') + + return super().create( + session, + prepend_key=prepend_key, + base_path=base_path, + resource_request_key=resource_request_key, + resource_response_key=resource_response_key, + microversion=microversion, + **params, + ) + + def fetch( + self, + session, + requires_id=True, + base_path=None, + error_message=None, + skip_cache=False, + *, + resource_response_key=None, + microversion=None, + **params, + ): + """Get volume transfer. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param boolean requires_id: A boolean indicating whether resource ID + should be part of the requested URI. + :param str base_path: Base part of the URI for fetching resources, if + different from :data:`~openstack.resource.Resource.base_path`. + :param str error_message: An Error message to be returned if + requested object does not exist. + :param bool skip_cache: A boolean indicating whether optional API + cache should be skipped for this invocation. + :param str resource_response_key: Overrides the usage of + self.resource_key when processing the response body. + :param str microversion: API version to override the negotiated one. + :param dict params: Additional parameters that can be consumed. + :return: This :class:`Resource` instance. + :raises: :exc:`~openstack.exceptions.MethodNotSupported` if + :data:`Resource.allow_fetch` is not set to ``True``. + :raises: :exc:`~openstack.exceptions.NotFoundException` if + the resource was not found. + """ + + if not utils.supports_microversion(session, '3.55'): + base_path = '/os-volume-transfer' + + return super().fetch( + session, + requires_id=requires_id, + base_path=base_path, + error_message=error_message, + skip_cache=skip_cache, + resource_response_key=resource_response_key, + microversion=microversion, + **params, + ) + + def delete( + self, session, error_message=None, *, microversion=None, **kwargs + ): + """Delete a volume transfer. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param str microversion: API version to override the negotiated one. + :param dict kwargs: Parameters that will be passed to + _prepare_request() + + :return: This :class:`Resource` instance. + :raises: :exc:`~openstack.exceptions.MethodNotSupported` if + :data:`Resource.allow_commit` is not set to ``True``. + :raises: :exc:`~openstack.exceptions.NotFoundException` if + the resource was not found. + """ + + if not utils.supports_microversion(session, '3.55'): + kwargs['base_path'] = '/os-volume-transfer' + return super().delete( + session, + error_message=error_message, + microversion=microversion, + **kwargs, + ) + + def accept(self, session, *, auth_key=None): + """Accept a volume transfer. + + :param session: The session to use for making this request. + :param auth_key: The authentication key for the volume transfer. + + :return: This :class:`Transfer` instance. + """ + body = {'accept': {'auth_key': auth_key}} + + path = self.base_path + if not utils.supports_microversion(session, '3.55'): + path = '/os-volume-transfer' + + url = utils.urljoin(path, self.id, 'accept') + microversion = self._get_microversion(session) + resp = session.post( + url, + json=body, + microversion=microversion, + ) + exceptions.raise_from_response(resp) + + transfer = Transfer() + transfer._translate_response(resp) + return transfer diff --git a/openstack/block_storage/v3/type.py b/openstack/block_storage/v3/type.py new file mode 100644 index 0000000000..488fd4a97a --- /dev/null +++ b/openstack/block_storage/v3/type.py @@ -0,0 +1,176 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Type(resource.Resource): + resource_key = "volume_type" + resources_key = "volume_types" + base_path = "/types" + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_list = True + allow_commit = True + + _query_mapping = resource.QueryParameters( + "is_public", + "limit", + "marker", + "offset", + "sort_dir", + "sort_key", + "sort", + all_projects="all_tenants", + ) + + # Properties + #: Description of the type. + description = resource.Body("description") + #: A dict of extra specifications. "capabilities" is a usual key. + extra_specs = resource.Body("extra_specs", type=dict) + #: a private volume-type. *Type: bool* + is_public = resource.Body('os-volume-type-access:is_public', type=bool) + + def _extra_specs(self, method, key=None, delete=False, extra_specs=None): + extra_specs = extra_specs or {} + for k, v in extra_specs.items(): + if not isinstance(v, str): + raise ValueError( + f"The value for {k} ({v}) must be a text string" + ) + + if key is not None: + url = utils.urljoin(self.base_path, self.id, "extra_specs", key) + else: + url = utils.urljoin(self.base_path, self.id, "extra_specs") + + kwargs = {} + if extra_specs: + kwargs["json"] = {"extra_specs": extra_specs} + + response = method(url, headers={}, **kwargs) + + # ensure Cinder API has not returned us an error + exceptions.raise_from_response(response) + # DELETE doesn't return a JSON body while everything else does. + return response.json() if not delete else None + + def set_extra_specs(self, session, **extra_specs): + """Update extra specs. + + This call will replace only the extra_specs with the same keys + given here. Other keys will not be modified. + + :param session: The session to use for making this request. + :param kwargs extra_specs: Key/value extra_specs pairs to be update on + this volume type. All keys and values. + :returns: The updated extra specs. + """ + if not extra_specs: + return dict() + + result = self._extra_specs(session.post, extra_specs=extra_specs) + return result["extra_specs"] + + def delete_extra_specs(self, session, keys): + """Delete extra specs. + + .. note:: + + This method will do a HTTP DELETE request for every key in keys. + + :param session: The session to use for this request. + :param list keys: The keys to delete. + :returns: ``None`` + """ + for key in keys: + self._extra_specs(session.delete, key=key, delete=True) + + def get_private_access(self, session): + """List projects with private access to the volume type. + + :param session: The session to use for making this request. + :returns: The volume type access response. + """ + url = utils.urljoin(self.base_path, self.id, "os-volume-type-access") + resp = session.get(url) + + exceptions.raise_from_response(resp) + + return resp.json().get("volume_type_access", []) + + def add_private_access(self, session, project_id): + """Add project access from the volume type. + + :param session: The session to use for making this request. + :param project_id: The project to add access for. + """ + url = utils.urljoin(self.base_path, self.id, "action") + body = {"addProjectAccess": {"project": project_id}} + + resp = session.post(url, json=body) + + exceptions.raise_from_response(resp) + + def remove_private_access(self, session, project_id): + """Remove project access from the volume type. + + :param session: The session to use for making this request. + :param project_id: The project to remove access for. + """ + url = utils.urljoin(self.base_path, self.id, "action") + body = {"removeProjectAccess": {"project": project_id}} + + resp = session.post(url, json=body) + + exceptions.raise_from_response(resp) + + +class TypeEncryption(resource.Resource): + resource_key = "encryption" + resources_key = "encryption" + base_path = "/types/%(volume_type_id)s/encryption" + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_list = False + allow_commit = True + + # Properties + #: The encryption algorithm or mode. + cipher = resource.Body("cipher") + #: Notional service where encryption is performed. + control_location = resource.Body("control_location") + #: The date and time when the resource was created. + created_at = resource.Body("created_at") + #: The resource is deleted or not. + deleted = resource.Body("deleted") + #: The date and time when the resource was deleted. + deleted_at = resource.Body("deleted_at") + #: A ID representing this type. + encryption_id = resource.Body("encryption_id", alternate_id=True) + #: The Size of encryption key. + key_size = resource.Body("key_size") + #: The class that provides encryption support. + provider = resource.Body("provider") + #: The date and time when the resource was updated. + updated_at = resource.Body("updated_at") + #: The ID of the Volume Type. + volume_type_id = resource.URI("volume_type_id") diff --git a/openstack/block_storage/v3/volume.py b/openstack/block_storage/v3/volume.py new file mode 100644 index 0000000000..8fe389d8e2 --- /dev/null +++ b/openstack/block_storage/v3/volume.py @@ -0,0 +1,412 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.common import metadata +from openstack import exceptions +from openstack import format +from openstack import resource +from openstack import utils + + +class Volume(resource.Resource, metadata.MetadataMixin): + resource_key = "volume" + resources_key = "volumes" + base_path = "/volumes" + + _query_mapping = resource.QueryParameters( + 'name', + 'status', + 'user_id', + 'project_id', + 'created_at', + 'updated_at', + properties='metadata', + all_projects='all_tenants', + ) + + # capabilities + allow_fetch = True + allow_create = True + allow_delete = True + allow_commit = True + allow_list = True + + # Properties + #: Instance attachment information. If this volume is attached to a server + #: instance, the attachments list includes the UUID of the attached server, + #: an attachment UUID, the name of the attached host, if any, the volume + #: UUID, the device, and the device UUID. Otherwise, this list is empty. + attachments = resource.Body("attachments", type=list) + #: The availability zone. + availability_zone = resource.Body("availability_zone") + #: To create a volume from an existing backup, specify the ID of + #: the existing volume backup. (since 3.47) + backup_id = resource.Body("backup_id") + #: ID of the consistency group. + consistency_group_id = resource.Body("consistencygroup_id") + #: Whether this resource consumes quota or not. Resources that not counted + #: for quota usage are usually temporary internal resources created to + #: perform an operation. (since 3.65) + consumes_quota = resource.Body("consumes_quota") + #: The cluster name of volume backend. (since 3.61) + cluster_name = resource.Body("cluster_name") + #: The timestamp of this volume creation. + created_at = resource.Body("created_at") + #: The volume description. + description = resource.Body("description") + #: The UUID of the encryption key. Only included for encrypted volumes. + #: (since 3.64) + encryption_key_id = resource.Body("encryption_key_id") + #: Extended replication status on this volume. + extended_replication_status = resource.Body( + "os-volume-replication:extended_status" + ) + #: The ID of the group that the volume belongs to. (since 3.13) + group_id = resource.Body("group_id") + #: The volume's current back-end. + host = resource.Body("os-vol-host-attr:host") + #: The ID of the image from which you want to create the volume. + #: Required to create a bootable volume. + image_id = resource.Body("imageRef") + #: Enables or disables the bootable attribute. You can boot an + #: instance from a bootable volume. *Type: bool* + is_bootable = resource.Body("bootable", type=format.BoolStr) + #: ``True`` if this volume is encrypted, ``False`` if not. + #: *Type: bool* + is_encrypted = resource.Body("encrypted", type=format.BoolStr) + #: Whether volume will be sharable or not. + is_multiattach = resource.Body("multiattach", type=bool) + #: The volume ID that this volume's name on the back-end is based on. + migration_id = resource.Body("os-vol-mig-status-attr:name_id") + #: The status of this volume's migration (None means that a migration + #: is not currently in progress). + migration_status = resource.Body("os-vol-mig-status-attr:migstat") + #: The project ID associated with current back-end. + project_id = resource.Body("os-vol-tenant-attr:tenant_id") + #: Data set by the replication driver + replication_driver_data = resource.Body( + "os-volume-replication:driver_data" + ) + #: The provider ID for the volume. (since 3.21) + provider_id = resource.Body("provider_id") + #: Status of replication on this volume. + replication_status = resource.Body("replication_status") + #: Scheduler hints for the volume + scheduler_hints = resource.Body('OS-SCH-HNT:scheduler_hints', type=dict) + #: A unique identifier that's used to indicate what node the volume-service + #: for a particular volume is being serviced by. (since 3.48) + service_uuid = resource.Body("service_uuid") + #: An indicator whether the host connecting the volume should lock for the + #: whole attach/detach process or not. true means only is iSCSI initiator + #: running on host doesn't support manual scans, false means never use + #: locks, and null means to always use locks. Look at os-brick's + #: guard_connection context manager. Default=True. (since 3.48) + shared_targets = resource.Body("shared_targets", type=bool) + #: The size of the volume, in gibibytes (GiB). + size = resource.Body("size", type=int) + #: To create a volume from an existing snapshot, specify the ID of + #: the existing volume snapshot. If specified, the volume is created + #: in same availability zone and with same size of the snapshot. + snapshot_id = resource.Body("snapshot_id") + #: To create a volume from an existing volume, specify the ID of + #: the existing volume. If specified, the volume is created with + #: same size of the source volume. + source_volume_id = resource.Body("source_volid") + #: One of the following values: creating, available, attaching, in-use + #: deleting, error, error_deleting, backing-up, restoring-backup, + #: error_restoring. For details on these statuses, see the + #: Block Storage API documentation. + status = resource.Body("status") + #: The date and time when the resource was updated. + updated_at = resource.Body("updated_at") + #: The user ID associated with the volume + user_id = resource.Body("user_id") + #: One or more metadata key and value pairs about image + volume_image_metadata = resource.Body("volume_image_metadata") + #: The name of the associated volume type. + volume_type = resource.Body("volume_type") + #: The associated volume type ID for the volume. (since 3.61) + volume_type_id = resource.Body("volume_type_id") + + _max_microversion = "3.71" + + def _action(self, session, body, microversion=None): + """Preform volume actions given the message body.""" + # NOTE: This is using Volume.base_path instead of self.base_path + # as both Volume and VolumeDetail instances can be acted on, but + # the URL used is sans any additional /detail/ part. + url = utils.urljoin(Volume.base_path, self.id, 'action') + if microversion is None: + microversion = self._get_microversion(session) + resp = session.post(url, json=body, microversion=microversion) + exceptions.raise_from_response(resp) + return resp + + def extend(self, session, size): + """Extend a volume size.""" + body = {'os-extend': {'new_size': size}} + self._action(session, body) + + def complete_extend(self, session, error=False): + """Complete volume extend operation""" + body = {'os-extend_volume_completion': {'error': error}} + self._action(session, body) + + def set_bootable_status(self, session, bootable=True): + """Set volume bootable status flag""" + body = {'os-set_bootable': {'bootable': bootable}} + self._action(session, body) + + def set_readonly(self, session, readonly): + """Set volume readonly flag""" + body = {'os-update_readonly_flag': {'readonly': readonly}} + self._action(session, body) + + def set_image_metadata(self, session, metadata): + """Sets image metadata key-value pairs on the volume""" + body = {'os-set_image_metadata': {'metadata': metadata}} + self._action(session, body) + + def delete_image_metadata(self, session): + """Remove all image metadata from the volume""" + for key in self.metadata: + body = {'os-unset_image_metadata': key} + self._action(session, body) + + def delete_image_metadata_item(self, session, key): + """Remove a single image metadata from the volume""" + body = {'os-unset_image_metadata': key} + self._action(session, body) + + def reset_status( + self, session, status=None, attach_status=None, migration_status=None + ): + """Reset volume statuses (admin operation)""" + body: dict[str, dict[str, str]] = {'os-reset_status': {}} + if status: + body['os-reset_status']['status'] = status + if attach_status: + body['os-reset_status']['attach_status'] = attach_status + if migration_status: + body['os-reset_status']['migration_status'] = migration_status + self._action(session, body) + + def revert_to_snapshot(self, session, snapshot_id): + """Revert volume to its snapshot""" + utils.require_microversion(session, "3.40") + body = {'revert': {'snapshot_id': snapshot_id}} + self._action(session, body) + + def attach(self, session, mountpoint, instance=None, host_name=None): + """Attach volume to server""" + body = {'os-attach': {'mountpoint': mountpoint}} + + if instance is not None: + body['os-attach']['instance_uuid'] = instance + elif host_name is not None: + body['os-attach']['host_name'] = host_name + else: + raise ValueError( + 'Either instance_uuid or host_name must be specified' + ) + + self._action(session, body) + + def detach(self, session, attachment, force=False, connector=None): + """Detach volume from server""" + if not force: + body = {'os-detach': {'attachment_id': attachment}} + if force: + body = {'os-force_detach': {'attachment_id': attachment}} + if connector: + body['os-force_detach']['connector'] = connector + + self._action(session, body) + + @classmethod + def manage( + cls, + session, + host, + ref, + name=None, + description=None, + volume_type=None, + availability_zone=None, + metadata=None, + bootable=False, + cluster=None, + ): + """Manage an existing volume.""" + url = '/manageable_volumes' + if not utils.supports_microversion(session, '3.8'): + url = '/os-volume-manage' + body = { + 'volume': { + 'host': host, + 'ref': ref, + 'name': name, + 'description': description, + 'volume_type': volume_type, + 'availability_zone': availability_zone, + 'metadata': metadata, + 'bootable': bootable, + } + } + if cluster is not None: + body['volume']['cluster'] = cluster + resp = session.post(url, json=body, microversion=cls._max_microversion) + exceptions.raise_from_response(resp) + volume = Volume() + volume._translate_response(resp) + return volume + + def unmanage(self, session): + """Unmanage volume""" + body = {'os-unmanage': None} + + self._action(session, body) + + def retype(self, session, new_type, migration_policy=None): + """Change volume type""" + body = {'os-retype': {'new_type': new_type}} + if migration_policy: + body['os-retype']['migration_policy'] = migration_policy + + self._action(session, body) + + def migrate( + self, + session, + host=None, + force_host_copy=False, + lock_volume=False, + cluster=None, + ): + """Migrate volume""" + req = dict() + if host is not None: + req['host'] = host + if force_host_copy: + req['force_host_copy'] = force_host_copy + if lock_volume: + req['lock_volume'] = lock_volume + if cluster is not None: + req['cluster'] = cluster + utils.require_microversion(session, "3.16") + body = {'os-migrate_volume': req} + + self._action(session, body) + + def complete_migration(self, session, new_volume_id, error=False): + """Complete volume migration""" + body = { + 'os-migrate_volume_completion': { + 'new_volume': new_volume_id, + 'error': error, + } + } + + self._action(session, body) + + def force_delete(self, session): + """Force volume deletion""" + body = {'os-force_delete': None} + + self._action(session, body) + + def upload_to_image( + self, + session, + image_name, + force=False, + disk_format=None, + container_format=None, + visibility=None, + protected=None, + ): + """Upload the volume to image service""" + req = dict(image_name=image_name, force=force) + if disk_format is not None: + req['disk_format'] = disk_format + if container_format is not None: + req['container_format'] = container_format + if visibility is not None: + req['visibility'] = visibility + if protected is not None: + req['protected'] = protected + + if visibility is not None or protected is not None: + utils.require_microversion(session, "3.1") + + body = {'os-volume_upload_image': req} + + resp = self._action(session, body).json() + return resp['os-volume_upload_image'] + + def reserve(self, session): + """Reserve volume""" + body = {'os-reserve': None} + + self._action(session, body) + + def unreserve(self, session): + """Unreserve volume""" + body = {'os-unreserve': None} + + self._action(session, body) + + def begin_detaching(self, session): + """Update volume status to 'detaching'""" + body = {'os-begin_detaching': None} + + self._action(session, body) + + def abort_detaching(self, session): + """Roll back volume status to 'in-use'""" + body = {'os-roll_detaching': None} + + self._action(session, body) + + def init_attachment(self, session, connector): + """Initialize volume attachment""" + body = {'os-initialize_connection': {'connector': connector}} + + resp = self._action(session, body).json() + return resp['connection_info'] + + def terminate_attachment(self, session, connector): + """Terminate volume attachment""" + body = {'os-terminate_connection': {'connector': connector}} + + self._action(session, body) + + def _prepare_request_body( + self, patch, prepend_key, *, resource_request_key=None + ): + body = self._body.dirty + # Scheduler hints is external to the standard volume request + # so pass it separately and not under the volume JSON object. + scheduler_hints = None + if 'OS-SCH-HNT:scheduler_hints' in body.keys(): + scheduler_hints = body.pop('OS-SCH-HNT:scheduler_hints') + if prepend_key and self.resource_key is not None: + body = {self.resource_key: body} + # If scheduler hints was passed in the request but the value is + # None, it doesn't make a difference to include it. + if scheduler_hints: + body['OS-SCH-HNT:scheduler_hints'] = scheduler_hints + return body + + +VolumeDetail = Volume diff --git a/openstack/block_store/block_store_service.py b/openstack/block_store/block_store_service.py deleted file mode 100644 index fa133a6a78..0000000000 --- a/openstack/block_store/block_store_service.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import service_filter - - -class BlockStoreService(service_filter.ServiceFilter): - """The block store service.""" - - valid_versions = [service_filter.ValidVersion('v2')] - - def __init__(self, version=None): - """Create a block store service.""" - super(BlockStoreService, self).__init__(service_type='volume', - version=version, - requires_project_id=True) diff --git a/openstack/block_store/v2/_proxy.py b/openstack/block_store/v2/_proxy.py deleted file mode 100644 index 1d82eae3ab..0000000000 --- a/openstack/block_store/v2/_proxy.py +++ /dev/null @@ -1,189 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.block_store.v2 import snapshot as _snapshot -from openstack.block_store.v2 import type as _type -from openstack.block_store.v2 import volume as _volume -from openstack import proxy2 - - -class Proxy(proxy2.BaseProxy): - - def get_snapshot(self, snapshot): - """Get a single snapshot - - :param snapshot: The value can be the ID of a snapshot or a - :class:`~openstack.volume.v2.snapshot.Snapshot` - instance. - - :returns: One :class:`~openstack.volume.v2.snapshot.Snapshot` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. - """ - return self._get(_snapshot.Snapshot, snapshot) - - def snapshots(self, details=True, **query): - """Retrieve a generator of snapshots - - :param bool details: When set to ``False`` - :class:`~openstack.block_store.v2.snapshot.Snapshot` - objects will be returned. The default, ``True``, will cause - :class:`~openstack.block_store.v2.snapshot.SnapshotDetail` - objects to be returned. - :param kwargs \*\*query: Optional query parameters to be sent to limit - the snapshots being returned. Available parameters include: - - * name: Name of the snapshot as a string. - * all_tenants: Whether return the snapshots of all tenants. - * volume_id: volume id of a snapshot. - * status: Value of the status of the snapshot so that you can - filter on "available" for example. - - :returns: A generator of snapshot objects. - """ - snapshot = _snapshot.SnapshotDetail if details else _snapshot.Snapshot - return self._list(snapshot, paginated=True, **query) - - def create_snapshot(self, **attrs): - """Create a new snapshot from attributes - - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.volume.v2.snapshot.Snapshot`, - comprised of the properties on the Snapshot class. - - :returns: The results of snapshot creation - :rtype: :class:`~openstack.volume.v2.snapshot.Snapshot` - """ - return self._create(_snapshot.Snapshot, **attrs) - - def delete_snapshot(self, snapshot, ignore_missing=True): - """Delete a snapshot - - :param snapshot: The value can be either the ID of a snapshot or a - :class:`~openstack.volume.v2.snapshot.Snapshot` - instance. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the snapshot does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent snapshot. - - :returns: ``None`` - """ - self._delete(_snapshot.Snapshot, snapshot, - ignore_missing=ignore_missing) - - def get_type(self, type): - """Get a single type - - :param type: The value can be the ID of a type or a - :class:`~openstack.volume.v2.type.Type` instance. - - :returns: One :class:`~openstack.volume.v2.type.Type` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. - """ - return self._get(_type.Type, type) - - def types(self): - """Retrieve a generator of volume types - - :returns: A generator of volume type objects. - """ - return self._list(_type.Type, paginated=False) - - def create_type(self, **attrs): - """Create a new type from attributes - - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.volume.v2.type.Type`, - comprised of the properties on the Type class. - - :returns: The results of type creation - :rtype: :class:`~openstack.volume.v2.type.Type` - """ - return self._create(_type.Type, **attrs) - - def delete_type(self, type, ignore_missing=True): - """Delete a type - - :param type: The value can be either the ID of a type or a - :class:`~openstack.volume.v2.type.Type` instance. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the type does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent type. - - :returns: ``None`` - """ - self._delete(_type.Type, type, ignore_missing=ignore_missing) - - def get_volume(self, volume): - """Get a single volume - - :param volume: The value can be the ID of a volume or a - :class:`~openstack.volume.v2.volume.Volume` instance. - - :returns: One :class:`~openstack.volume.v2.volume.Volume` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. - """ - return self._get(_volume.Volume, volume) - - def volumes(self, details=True, **query): - """Retrieve a generator of volumes - - :param bool details: When set to ``False`` - :class:`~openstack.block_store.v2.volume.Volume` objects - will be returned. The default, ``True``, will cause - :class:`~openstack.block_store.v2.volume.VolumeDetail` - objects to be returned. - :param kwargs \*\*query: Optional query parameters to be sent to limit - the volumes being returned. Available parameters include: - - * name: Name of the volume as a string. - * all_tenants: Whether return the volumes of all tenants - * status: Value of the status of the volume so that you can filter - on "available" for example. - - :returns: A generator of volume objects. - """ - volume = _volume.VolumeDetail if details else _volume.Volume - return self._list(volume, paginated=True, **query) - - def create_volume(self, **attrs): - """Create a new volume from attributes - - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.volume.v2.volume.Volume`, - comprised of the properties on the Volume class. - - :returns: The results of volume creation - :rtype: :class:`~openstack.volume.v2.volume.Volume` - """ - return self._create(_volume.Volume, **attrs) - - def delete_volume(self, volume, ignore_missing=True): - """Delete a volume - - :param volume: The value can be either the ID of a volume or a - :class:`~openstack.volume.v2.volume.Volume` instance. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the volume does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent volume. - - :returns: ``None`` - """ - self._delete(_volume.Volume, volume, ignore_missing=ignore_missing) diff --git a/openstack/block_store/v2/snapshot.py b/openstack/block_store/v2/snapshot.py deleted file mode 100644 index ae2755c262..0000000000 --- a/openstack/block_store/v2/snapshot.py +++ /dev/null @@ -1,65 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.block_store import block_store_service -from openstack import format -from openstack import resource2 - - -class Snapshot(resource2.Resource): - resource_key = "snapshot" - resources_key = "snapshots" - base_path = "/snapshots" - service = block_store_service.BlockStoreService() - - _query_mapping = resource2.QueryParameters('all_tenants', 'name', 'status', - 'volume_id') - - # capabilities - allow_get = True - allow_create = True - allow_delete = True - allow_update = True - allow_list = True - - # Properties - #: A ID representing this snapshot. - id = resource2.Body("id") - #: Name of the snapshot. Default is None. - name = resource2.Body("name") - - #: The current status of this snapshot. Potential values are creating, - #: available, deleting, error, and error_deleting. - status = resource2.Body("status") - #: Description of snapshot. Default is None. - description = resource2.Body("description") - #: The timestamp of this snapshot creation. - created_at = resource2.Body("created_at") - #: Metadata associated with this snapshot. - metadata = resource2.Body("metadata", type=dict) - #: The ID of the volume this snapshot was taken of. - volume_id = resource2.Body("volume_id") - #: The size of the volume, in GBs. - size = resource2.Body("size", type=int) - #: Indicate whether to create snapshot, even if the volume is attached. - #: Default is ``False``. *Type: bool* - is_forced = resource2.Body("force", type=format.BoolStr) - - -class SnapshotDetail(Snapshot): - - base_path = "/snapshots/detail" - - #: The percentage of completeness the snapshot is currently at. - progress = resource2.Body("os-extended-snapshot-attributes:progress") - #: The project ID this snapshot is associated with. - project_id = resource2.Body("os-extended-snapshot-attributes:project_id") diff --git a/openstack/block_store/v2/type.py b/openstack/block_store/v2/type.py deleted file mode 100644 index a39466a5a3..0000000000 --- a/openstack/block_store/v2/type.py +++ /dev/null @@ -1,35 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.block_store import block_store_service -from openstack import resource2 - - -class Type(resource2.Resource): - resource_key = "volume_type" - resources_key = "volume_types" - base_path = "/types" - service = block_store_service.BlockStoreService() - - # capabilities - allow_get = True - allow_create = True - allow_delete = True - allow_list = True - - # Properties - #: A ID representing this type. - id = resource2.Body("id") - #: Name of the type. - name = resource2.Body("name") - #: A dict of extra specifications. "capabilities" is a usual key. - extra_specs = resource2.Body("extra_specs", type=dict) diff --git a/openstack/block_store/v2/volume.py b/openstack/block_store/v2/volume.py deleted file mode 100644 index a20ddd621a..0000000000 --- a/openstack/block_store/v2/volume.py +++ /dev/null @@ -1,103 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.block_store import block_store_service -from openstack import format -from openstack import resource2 - - -class Volume(resource2.Resource): - resource_key = "volume" - resources_key = "volumes" - base_path = "/volumes" - service = block_store_service.BlockStoreService() - - _query_mapping = resource2.QueryParameters('all_tenants', 'name', - 'status', 'project_id') - - # capabilities - allow_get = True - allow_create = True - allow_delete = True - allow_update = True - allow_list = True - - # Properties - #: A ID representing this volume. - id = resource2.Body("id") - #: The name of this volume. - name = resource2.Body("name") - #: A list of links associated with this volume. *Type: list* - links = resource2.Body("links", type=list) - - #: The availability zone. - availability_zone = resource2.Body("availability_zone") - #: To create a volume from an existing volume, specify the ID of - #: the existing volume. If specified, the volume is created with - #: same size of the source volume. - source_volume_id = resource2.Body("source_volid") - #: The volume description. - description = resource2.Body("description") - #: To create a volume from an existing snapshot, specify the ID of - #: the existing volume snapshot. If specified, the volume is created - #: in same availability zone and with same size of the snapshot. - snapshot_id = resource2.Body("snapshot_id") - #: The size of the volume, in GBs. *Type: int* - size = resource2.Body("size", type=int) - #: The ID of the image from which you want to create the volume. - #: Required to create a bootable volume. - image_id = resource2.Body("imageRef") - #: The name of the associated volume type. - volume_type = resource2.Body("volume_type") - #: Enables or disables the bootable attribute. You can boot an - #: instance from a bootable volume. *Type: bool* - is_bootable = resource2.Body("bootable", type=format.BoolStr) - #: One or more metadata key and value pairs to associate with the volume. - metadata = resource2.Body("metadata") - - #: One of the following values: creating, available, attaching, in-use - #: deleting, error, error_deleting, backing-up, restoring-backup, - #: error_restoring. For details on these statuses, see the - #: Block Storage API documentation. - status = resource2.Body("status") - #: TODO(briancurtin): This is currently undocumented in the API. - attachments = resource2.Body("attachments") - #: The timestamp of this volume creation. - created_at = resource2.Body("created_at") - - -class VolumeDetail(Volume): - - base_path = "/volumes/detail" - - #: The volume's current back-end. - host = resource2.Body("os-vol-host-attr:host") - #: The project ID associated with current back-end. - project_id = resource2.Body("os-vol-tenant-attr:tenant_id") - #: The status of this volume's migration (None means that a migration - #: is not currently in progress). - migration_status = resource2.Body("os-vol-mig-status-attr:migstat") - #: The volume ID that this volume's name on the back-end is based on. - migration_id = resource2.Body("os-vol-mig-status-attr:name_id") - #: Status of replication on this volume. - replication_status = resource2.Body("replication_status") - #: Extended replication status on this volume. - extended_replication_status = resource2.Body( - "os-volume-replication:extended_status") - #: ID of the consistency group. - consistency_group_id = resource2.Body("consistencygroup_id") - #: Data set by the replication driver - replication_driver_data = resource2.Body( - "os-volume-replication:driver_data") - #: ``True`` if this volume is encrypted, ``False`` if not. - #: *Type: bool* - is_encrypted = resource2.Body("encrypted", type=format.BoolStr) diff --git a/openstack/cloud/__init__.py b/openstack/cloud/__init__.py new file mode 100644 index 0000000000..b8472ea906 --- /dev/null +++ b/openstack/cloud/__init__.py @@ -0,0 +1,16 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + + +from openstack.cloud.exc import * # noqa diff --git a/openstack/cloud/_accelerator.py b/openstack/cloud/_accelerator.py new file mode 100644 index 0000000000..9944147d2f --- /dev/null +++ b/openstack/cloud/_accelerator.py @@ -0,0 +1,155 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.cloud import openstackcloud + + +class AcceleratorCloudMixin(openstackcloud._OpenStackCloudMixin): + def list_deployables(self, filters=None): + """List all available deployables. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of accelerator ``Deployable`` objects. + """ + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + return list(self.accelerator.deployables(**filters)) + + def list_devices(self, filters=None): + """List all devices. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of accelerator ``Device`` objects. + """ + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + return list(self.accelerator.devices(**filters)) + + def list_device_profiles(self, filters=None): + """List all device_profiles. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of accelerator ``DeviceProfile`` objects. + """ + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + return list(self.accelerator.device_profiles(**filters)) + + def create_device_profile(self, attrs): + """Create a device_profile. + + :param attrs: The info of device_profile to be created. + :returns: An accelerator ``DeviceProfile`` objects. + """ + return self.accelerator.create_device_profile(**attrs) + + def delete_device_profile(self, name_or_id, filters): + """Delete a device_profile. + + :param name_or_id: The name or uuid of the device profile to be + deleted. + :param filters: dict of filter conditions to push down + :returns: True if delete succeeded, False otherwise. + """ + device_profile = self.accelerator.get_device_profile( + name_or_id, + filters, + ) + if device_profile is None: + self.log.debug( + "device_profile %s not found for deleting", + name_or_id, + ) + return False + + self.accelerator.delete_device_profile(device_profile=device_profile) + + return True + + def list_accelerator_requests(self, filters=None): + """List all accelerator_requests. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of accelerator ``AcceleratorRequest`` objects. + """ + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + return list(self.accelerator.accelerator_requests(**filters)) + + def delete_accelerator_request(self, name_or_id, filters): + """Delete a accelerator_request. + + :param name_or_id: The name or UUID of the accelerator request to + be deleted. + :param filters: dict of filter conditions to push down + :returns: True if delete succeeded, False otherwise. + """ + accelerator_request = self.accelerator.get_accelerator_request( + name_or_id, + filters, + ) + if accelerator_request is None: + self.log.debug( + "accelerator_request %s not found for deleting", + name_or_id, + ) + return False + + self.accelerator.delete_accelerator_request( + accelerator_request=accelerator_request, + ) + + return True + + def create_accelerator_request(self, attrs): + """Create an accelerator_request. + + :param attrs: The info of accelerator_request to be created. + :returns: An accelerator ``AcceleratorRequest`` object. + """ + return self.accelerator.create_accelerator_request(**attrs) + + def bind_accelerator_request(self, uuid, properties): + """Bind an accelerator to VM. + + :param uuid: The uuid of the accelerator_request to be binded. + :param properties: The info of VM that will bind the accelerator. + :returns: True if bind succeeded, False otherwise. + """ + accelerator_request = self.accelerator.get_accelerator_request(uuid) + if accelerator_request is None: + self.log.debug( + "accelerator_request %s not found for unbinding", uuid + ) + return False + + return self.accelerator.update_accelerator_request(uuid, properties) + + def unbind_accelerator_request(self, uuid, properties): + """Unbind an accelerator from VM. + + :param uuid: The uuid of the accelerator_request to be unbinded. + :param properties: The info of VM that will unbind the accelerator. + :returns: True if unbind succeeded, False otherwise. + """ + accelerator_request = self.accelerator.get_accelerator_request(uuid) + if accelerator_request is None: + self.log.debug( + "accelerator_request %s not found for unbinding", uuid + ) + return False + + return self.accelerator.update_accelerator_request(uuid, properties) diff --git a/openstack/cloud/_baremetal.py b/openstack/cloud/_baremetal.py new file mode 100644 index 0000000000..bdbf295bb6 --- /dev/null +++ b/openstack/cloud/_baremetal.py @@ -0,0 +1,638 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import sys +import warnings + +import jsonpatch + +from openstack.cloud import openstackcloud +from openstack import exceptions +from openstack import warnings as os_warnings + + +def _normalize_port_list(nics): + ports = [] + for row in nics: + if isinstance(row, str): + address = row + row = {} + elif 'mac' in row: + address = row.pop('mac') + else: + try: + address = row.pop('address') + except KeyError: + raise TypeError( + "Either 'address' or 'mac' must be provided " + f"for port {row}" + ) + ports.append(dict(row, address=address)) + return ports + + +class BaremetalCloudMixin(openstackcloud._OpenStackCloudMixin): + def list_nics(self): + """Return a list of all bare metal ports.""" + return list(self.baremetal.ports(details=True)) + + def list_nics_for_machine(self, uuid): + """Returns a list of ports present on the machine node. + + :param uuid: String representing machine UUID value in order to + identify the machine. + :returns: A list of ports. + """ + # TODO(dtantsur): support node names here. + return list(self.baremetal.ports(details=True, node_id=uuid)) + + def get_nic_by_mac(self, mac): + """Get bare metal NIC by its hardware address (usually MAC).""" + results = list(self.baremetal.ports(address=mac, details=True)) + try: + return results[0] + except IndexError: + return None + + def list_machines(self): + """List Machines. + + :returns: list of :class:`~openstack.baremetal.v1.node.Node`. + """ + return list(self.baremetal.nodes()) + + def get_machine(self, name_or_id): + """Get Machine by name or uuid + + Search the baremetal host out by utilizing the supplied id value + which can consist of a name or UUID. + + :param name_or_id: A node name or UUID that will be looked up. + + :rtype: :class:`~openstack.baremetal.v1.node.Node`. + :returns: The node found or None if no nodes are found. + """ + return self.baremetal.find_node(name_or_id, ignore_missing=True) + + def get_machine_by_mac(self, mac): + """Get machine by port MAC address + + :param mac: Port MAC address to query in order to return a node. + + :rtype: :class:`~openstack.baremetal.v1.node.Node`. + :returns: The node found or None if no nodes are found. + """ + nic = self.get_nic_by_mac(mac) + if nic is None: + return None + else: + return self.get_machine(nic['node_uuid']) + + def inspect_machine(self, name_or_id, wait=False, timeout=3600): + """Inspect a Barmetal machine + + Engages the Ironic node inspection behavior in order to collect + metadata about the baremetal machine. + + :param name_or_id: String representing machine name or UUID value in + order to identify the machine. + + :param wait: Boolean value controlling if the method is to wait for + the desired state to be reached or a failure to occur. + + :param timeout: Integer value, defautling to 3600 seconds, for the + wait state to reach completion. + + :rtype: :class:`~openstack.baremetal.v1.node.Node`. + :returns: Current state of the node. + """ + + return_to_available = False + + node = self.baremetal.get_node(name_or_id) + + # NOTE(TheJulia): If in available state, we can do this. However, + # we need to move the machine back to manageable first. + if node.provision_state == 'available': + if node.instance_id: + raise exceptions.SDKException( + f"Refusing to inspect available machine {node.id} " + "which is associated with an instance " + f"(instance_uuid {node.instance_id})" + ) + + return_to_available = True + # NOTE(TheJulia): Changing available machine to managedable state + # and due to state transitions we need to until that transition has + # completed. + node = self.baremetal.set_node_provision_state( + node, 'manage', wait=True, timeout=timeout + ) + + if node.provision_state not in ('manageable', 'inspect failed'): + raise exceptions.SDKException( + f"Machine {node.id} must be in 'manageable', 'inspect failed' " + "or 'available' provision state to start inspection, the " + f"current state is {node.provision_state}" + ) + + node = self.baremetal.set_node_provision_state( + node, 'inspect', wait=True, timeout=timeout + ) + + if return_to_available: + node = self.baremetal.set_node_provision_state( + node, 'provide', wait=True, timeout=timeout + ) + + return node + + @contextlib.contextmanager + def _delete_node_on_error(self, node): + try: + yield + except Exception as exc: + self.log.debug( + "cleaning up node %s because of an error: %s", node.id, exc + ) + tb = sys.exc_info()[2] + try: + self.baremetal.delete_node(node) + except Exception: + self.log.debug( + "could not remove node %s", node.id, exc_info=True + ) + raise exc.with_traceback(tb) + + def register_machine( + self, + nics, + wait=False, + timeout=3600, + lock_timeout=600, + provision_state='available', + **kwargs, + ): + """Register Baremetal with Ironic + + Allows for the registration of Baremetal nodes with Ironic + and population of pertinant node information or configuration + to be passed to the Ironic API for the node. + + This method also creates ports for a list of MAC addresses passed + in to be utilized for boot and potentially network configuration. + + If a failure is detected creating the network ports, any ports + created are deleted, and the node is removed from Ironic. + + :param nics: + An array of ports that represent the network interfaces for the + node to be created. The ports are created after the node is + enrolled but before it goes through cleaning. + + Example:: + + [ + {'address': 'aa:bb:cc:dd:ee:01'}, + {'address': 'aa:bb:cc:dd:ee:02'}, + ] + + Alternatively, you can provide an array of MAC addresses. + :param wait: Boolean value, defaulting to false, to wait for the node + to reach the available state where the node can be provisioned. It + must be noted, when set to false, the method will still wait for + locks to clear before sending the next required command. + :param timeout: Integer value, defautling to 3600 seconds, for the wait + state to reach completion. + :param lock_timeout: Integer value, defaulting to 600 seconds, for + locks to clear. + :param provision_state: The expected provision state, one of "enroll" + "manageable" or "available". Using "available" results in automated + cleaning. + :param kwargs: Key value pairs to be passed to the Ironic API, + including uuid, name, chassis_uuid, driver_info, properties. + + :returns: Current state of the node. + :rtype: :class:`~openstack.baremetal.v1.node.Node`. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if provision_state not in ('enroll', 'manageable', 'available'): + raise ValueError( + 'Initial provision state must be enroll, ' + f'manageable or available, got {provision_state}' + ) + + # Available is tricky: it cannot be directly requested on newer API + # versions, we need to go through cleaning. But we cannot go through + # cleaning until we create ports. + if provision_state != 'available': + kwargs['provision_state'] = 'enroll' + machine = self.baremetal.create_node(**kwargs) + + with self._delete_node_on_error(machine): + # Making a node at least manageable + if ( + machine.provision_state == 'enroll' + and provision_state != 'enroll' + ): + machine = self.baremetal.set_node_provision_state( + machine, 'manage', wait=True, timeout=timeout + ) + machine = self.baremetal.wait_for_node_reservation( + machine, timeout=lock_timeout + ) + + # Create NICs before trying to run cleaning + created_nics = [] + try: + for port in _normalize_port_list(nics): + nic = self.baremetal.create_port( + node_id=machine.id, **port + ) + created_nics.append(nic.id) + + except Exception: + for uuid in created_nics: + try: + self.baremetal.delete_port(uuid) + except Exception: # noqa: S110 + # the port might not have been actually created, so a + # failure to delete isn't necessarily an issue + pass + raise + + if ( + machine.provision_state != 'available' + and provision_state == 'available' + ): + machine = self.baremetal.set_node_provision_state( + machine, 'provide', wait=wait, timeout=timeout + ) + + return machine + + def unregister_machine(self, nics, uuid, wait=None, timeout=600): + """Unregister Baremetal from Ironic + + Removes entries for Network Interfaces and baremetal nodes + from an Ironic API + + :param nics: An array of strings that consist of MAC addresses + to be removed. + :param string uuid: The UUID of the node to be deleted. + :param wait: DEPRECATED, do not use. + :param timeout: Integer value, representing seconds with a default + value of 600, which controls the maximum amount of time to block + until a lock is released on machine. + + :raises: :class:`~openstack.exceptions.SDKException` on operation + failure. + """ + if wait is not None: + warnings.warn( + "wait argument is deprecated and has no effect", + os_warnings.RemovedInSDK50Warning, + ) + + machine = self.get_machine(uuid) + invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed'] + if machine['provision_state'] in invalid_states: + raise exceptions.SDKException( + "Error unregistering node '{}' due to current provision " + "state '{}'".format(uuid, machine['provision_state']) + ) + + # NOTE(TheJulia) There is a high possibility of a lock being present + # if the machine was just moved through the state machine. This was + # previously concealed by exception retry logic that detected the + # failure, and resubitted the request in python-ironicclient. + try: + self.baremetal.wait_for_node_reservation(machine, timeout) + except exceptions.SDKException as e: + raise exceptions.SDKException( + "Error unregistering node '{}': Exception occured while " + "waiting to be able to proceed: {}".format(machine['uuid'], e) + ) + + for nic in _normalize_port_list(nics): + try: + port = next(self.baremetal.ports(address=nic['address'])) + except StopIteration: + continue + self.baremetal.delete_port(port.id) + + self.baremetal.delete_node(uuid) + + def patch_machine(self, name_or_id, patch): + """Patch Machine Information + + This method allows for an interface to manipulate node entries + within Ironic. + + :param string name_or_id: A machine name or UUID to be updated. + :param patch: + The JSON Patch document is a list of dictonary objects that comply + with RFC 6902 which can be found at + https://tools.ietf.org/html/rfc6902. + + Example patch construction:: + + patch = [] + patch.append({'op': 'remove', 'path': '/instance_info'}) + patch.append( + {'op': 'replace', 'path': '/name', 'value': 'newname'} + ) + patch.append( + { + 'op': 'add', + 'path': '/driver_info/username', + 'value': 'administrator', + } + ) + + :returns: Current state of the node. + :rtype: :class:`~openstack.baremetal.v1.node.Node`. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + return self.baremetal.patch_node(name_or_id, patch) + + def update_machine(self, name_or_id, **attrs): + """Update a machine with new configuration information + + A user-friendly method to perform updates of a machine, in whole or + part. + + :param string name_or_id: A machine name or UUID to be updated. + :param attrs: Attributes to updated on the machine. + + :returns: Dictionary containing a machine sub-dictonary consisting + of the updated data returned from the API update operation, and a + list named changes which contains all of the API paths that + received updates. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + machine = self.get_machine(name_or_id) + if not machine: + raise exceptions.SDKException( + f"Machine update failed to find Machine: {name_or_id}. " + ) + + new_config = dict(machine._to_munch(), **attrs) + + try: + patch = jsonpatch.JsonPatch.from_diff( + machine._to_munch(), new_config + ) + except Exception as e: + raise exceptions.SDKException( + "Machine update failed - Error generating JSON patch object " + f"for submission to the API. Machine: {name_or_id} Error: {e}" + ) + + if not patch: + return dict(node=machine, changes=None) + + change_list = [change['path'] for change in patch] + node = self.baremetal.update_node(machine, **attrs) + return dict(node=node, changes=change_list) + + def attach_port_to_machine(self, name_or_id, port_name_or_id): + """Attach a virtual port to the bare metal machine. + + :param string name_or_id: A machine name or UUID. + :param string port_name_or_id: A port name or UUID. + Note that this is a Network service port, not a bare metal NIC. + :return: Nothing. + """ + machine = self.get_machine(name_or_id) + port = self.network.find_port(port_name_or_id, ignore_missing=False) + self.baremetal.attach_vif_to_node(machine, port['id']) + + def detach_port_from_machine(self, name_or_id, port_name_or_id): + """Detach a virtual port from the bare metal machine. + + :param string name_or_id: A machine name or UUID. + :param string port_name_or_id: A port name or UUID. + Note that this is a Network service port, not a bare metal NIC. + :return: Nothing. + """ + machine = self.get_machine(name_or_id) + port = self.network.find_port(port_name_or_id, ignore_missing=False) + self.baremetal.detach_vif_from_node(machine, port['id']) + + def list_ports_attached_to_machine(self, name_or_id): + """List virtual ports attached to the bare metal machine. + + :param string name_or_id: A machine name or UUID. + :returns: List of ``openstack.Resource`` objects representing + the ports. + """ + machine = self.get_machine(name_or_id) + vif_ids = self.baremetal.list_node_vifs(machine) + return [ + self.network.find_port(vif, ignore_missing=False) + for vif in vif_ids + ] + + def validate_machine(self, name_or_id, for_deploy=True): + """Validate parameters of the machine. + + :param string name_or_id: The Name or UUID value representing the + baremetal node. + :param bool for_deploy: If ``True``, validate readiness for deployment, + otherwise validate only the power management properties. + :raises: :exc:`~openstack.exceptions.ValidationException` + """ + if for_deploy: + ifaces = ['boot', 'deploy', 'management', 'power'] + else: + ifaces = ['power'] + self.baremetal.validate_node(name_or_id, required=ifaces) + + def validate_node(self, uuid): + warnings.warn( + 'validate_node is deprecated, please use validate_machine instead', + os_warnings.RemovedInSDK50Warning, + ) + self.baremetal.validate_node(uuid) + + def node_set_provision_state( + self, name_or_id, state, configdrive=None, wait=False, timeout=3600 + ): + """Set Node Provision State + + Enables a user to provision a Machine and optionally define a + config drive to be utilized. + + :param string name_or_id: The Name or UUID value representing the + baremetal node. + :param string state: The desired provision state for the baremetal + node. + :param string configdrive: An optional URL or file or path + representing the configdrive. In the case of a directory, the + client API will create a properly formatted configuration drive + file and post the file contents to the API for deployment. + :param boolean wait: A boolean value, defaulted to false, to control + if the method will wait for the desire end state to be reached + before returning. + :param integer timeout: Integer value, defaulting to 3600 seconds, + representing the amount of time to wait for the desire end state to + be reached. + + :returns: Current state of the machine upon exit of the method. + :rtype: :class:`~openstack.baremetal.v1.node.Node`. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + node = self.baremetal.set_node_provision_state( + name_or_id, + target=state, + config_drive=configdrive, + wait=wait, + timeout=timeout, + ) + return node + + def set_machine_maintenance_state( + self, name_or_id, state=True, reason=None + ): + """Set Baremetal Machine Maintenance State + + Sets Baremetal maintenance state and maintenance reason. + + :param string name_or_id: The Name or UUID value representing the + baremetal node. + :param boolean state: The desired state of the node. True being in + maintenance where as False means the machine is not in maintenance + mode. This value defaults to True if not explicitly set. + :param string reason: An optional freeform string that is supplied to + the baremetal API to allow for notation as to why the node is in + maintenance state. + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if state: + self.baremetal.set_node_maintenance(name_or_id, reason) + else: + self.baremetal.unset_node_maintenance(name_or_id) + + def remove_machine_from_maintenance(self, name_or_id): + """Remove Baremetal Machine from Maintenance State + + Similarly to set_machine_maintenance_state, this method removes a + machine from maintenance state. It must be noted that this method + simpily calls set_machine_maintenace_state for the name_or_id requested + and sets the state to False. + + :param string name_or_id: The Name or UUID value representing the + baremetal node. + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + self.baremetal.unset_node_maintenance(name_or_id) + + def set_machine_power_on(self, name_or_id): + """Activate baremetal machine power + + This is a method that sets the node power state to "on". + + :params string name_or_id: A string representing the baremetal + node to have power turned to an "on" state. + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + self.baremetal.set_node_power_state(name_or_id, 'power on') + + def set_machine_power_off(self, name_or_id): + """De-activate baremetal machine power + + This is a method that sets the node power state to "off". + + :params string name_or_id: A string representing the baremetal + node to have power turned to an "off" state. + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + self.baremetal.set_node_power_state(name_or_id, 'power off') + + def set_machine_power_reboot(self, name_or_id): + """De-activate baremetal machine power + + This is a method that sets the node power state to "reboot", which + in essence changes the machine power state to "off", and that back + to "on". + + :params string name_or_id: A string representing the baremetal + node to have power turned to an "off" state. + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + self.baremetal.set_node_power_state(name_or_id, 'rebooting') + + def activate_node(self, uuid, configdrive=None, wait=False, timeout=1200): + self.node_set_provision_state( + uuid, 'active', configdrive, wait=wait, timeout=timeout + ) + + def deactivate_node(self, uuid, wait=False, timeout=1200): + self.node_set_provision_state( + uuid, 'deleted', wait=wait, timeout=timeout + ) + + def set_node_instance_info(self, uuid, patch): + warnings.warn( + "The set_node_instance_info call is deprecated, " + "use patch_machine or update_machine instead", + os_warnings.RemovedInSDK50Warning, + ) + return self.patch_machine(uuid, patch) + + def purge_node_instance_info(self, uuid): + warnings.warn( + "The purge_node_instance_info call is deprecated, " + "use patch_machine or update_machine instead", + os_warnings.RemovedInSDK50Warning, + ) + return self.patch_machine( + uuid, dict(path='/instance_info', op='remove') + ) + + def wait_for_baremetal_node_lock(self, node, timeout=30): + """Wait for a baremetal node to have no lock. + + DEPRECATED, use ``wait_for_node_reservation`` on the `baremetal` proxy. + + :raises: :class:`~openstack.exceptions.SDKException` upon client + failure. + :returns: None + """ + warnings.warn( + "The wait_for_baremetal_node_lock call is deprecated " + "in favor of wait_for_node_reservation on the baremetal " + "proxy", + os_warnings.RemovedInSDK50Warning, + ) + self.baremetal.wait_for_node_reservation(node, timeout) diff --git a/openstack/cloud/_block_storage.py b/openstack/cloud/_block_storage.py new file mode 100644 index 0000000000..687934fb9f --- /dev/null +++ b/openstack/cloud/_block_storage.py @@ -0,0 +1,936 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +from openstack.cloud import _utils +from openstack.cloud import openstackcloud +from openstack import exceptions +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +class BlockStorageCloudMixin(openstackcloud._OpenStackCloudMixin): + def list_volumes(self, cache=None): + """List all available volumes. + + :param cache: **DEPRECATED** This parameter no longer does anything. + :returns: A list of volume ``Volume`` objects. + """ + if cache is not None: + warnings.warn( + "the 'cache' argument is deprecated and no longer does " + "anything; consider removing it from calls", + os_warnings.RemovedInSDK50Warning, + ) + return list(self.block_storage.volumes()) + + def list_volume_types(self, get_extra=None): + """List all available volume types. + + :param get_extra: **DEPRECATED** This parameter no longer does + anything. + :returns: A list of volume ``Type`` objects. + """ + if get_extra is not None: + warnings.warn( + "the 'get_extra' argument is deprecated and no longer does " + "anything; consider removing it from calls", + os_warnings.RemovedInSDK50Warning, + ) + return list(self.block_storage.types()) + + # TODO(stephenfin): Remove 'filters' in a future major version + def get_volume(self, name_or_id, filters=None): + """Get a volume by name or ID. + + :param name_or_id: Name or unique ID of the volume. + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A volume ``Volume`` object if found, else None. + """ + if filters is not None: + warnings.warn( + "The 'filters' argument is deprecated; use " + "'search_volumes' instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_volumes(name_or_id, filters) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + return entities[0] + + return self.block_storage.find_volume(name_or_id) + + def get_volume_by_id(self, id): + """Get a volume by ID + + :param id: ID of the volume. + :returns: A volume ``Volume`` object if found, else None. + """ + return self.block_storage.get_volume(id) + + # TODO(stephenfin): Remove 'filters' in a future major version + def get_volume_type(self, name_or_id, filters=None): + """Get a volume type by name or ID. + + :param name_or_id: Name or unique ID of the volume type. + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A volume ``Type`` object if found, else None. + """ + if filters is not None: + warnings.warn( + "The 'filters' argument is deprecated; use " + "'search_volume_types' instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_volume_types(name_or_id, filters) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + return entities[0] + + return self.block_storage.find_type(name_or_id) + + def create_volume( + self, + size, + wait=True, + timeout=None, + image=None, + bootable=None, + **kwargs, + ): + """Create a volume. + + :param size: Size, in GB of the volume to create. + :param wait: If true, waits for volume to be created. + :param timeout: Seconds to wait for volume creation. None is forever. + :param image: (optional) Image name, ID or object from which to create + the volume + :param bootable: (optional) Make this volume bootable. If set, wait + will also be set to true. + :param kwargs: Keyword arguments as expected for cinder client. + + :returns: The created volume ``Volume`` object. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time + exceeded. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if bootable is not None: + wait = True + + if image: + if isinstance(image, dict): + if not isinstance(image, resource.Resource): + warnings.warn( + "Support for passing image as a raw dict has " + "been deprecated for removal. Consider passing a " + "string name or ID or an Image object instead.", + os_warnings.RemovedInSDK60Warning, + ) + kwargs['imageRef'] = image['id'] + else: # object + image_obj = self.image.find_image(image, ignore_missing=False) + kwargs['imageRef'] = image_obj['id'] + kwargs = self._get_volume_kwargs(kwargs) + kwargs['size'] = size + + volume = self.block_storage.create_volume(**kwargs) + + if volume['status'] == 'error': + raise exceptions.SDKException("Error in creating volume") + + if wait: + self.block_storage.wait_for_status(volume, wait=timeout) + if bootable: + self.block_storage.set_volume_bootable_status(volume, True) + + return volume + + def update_volume(self, name_or_id, **kwargs): + """Update a volume. + + :param name_or_id: Name or unique ID of the volume. + :param kwargs: Volume attributes to be updated. + :returns: The updated volume ``Volume`` object. + """ + kwargs = self._get_volume_kwargs(kwargs) + + volume = self.get_volume(name_or_id) + if not volume: + raise exceptions.SDKException(f"Volume {name_or_id} not found.") + + block_storage = utils.ensure_service_version(self.block_storage, '3') + volume = block_storage.update_volume(volume, **kwargs) + + return volume + + def set_volume_bootable(self, name_or_id, bootable=True): + """Set a volume's bootable flag. + + :param name_or_id: Name or unique ID of the volume. + :param bool bootable: Whether the volume should be bootable. + (Defaults to True) + + :returns: None + :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time + exceeded. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + + volume = self.get_volume(name_or_id) + + if not volume: + raise exceptions.SDKException( + f"Volume {name_or_id} does not exist" + ) + + self.block_storage.set_volume_bootable_status(volume, bootable) + + def delete_volume( + self, + name_or_id=None, + wait=True, + timeout=None, + force=False, + ): + """Delete a volume. + + :param name_or_id: Name or unique ID of the volume. + :param wait: If true, waits for volume to be deleted. + :param timeout: Seconds to wait for volume deletion. None is forever. + :param force: Force delete volume even if the volume is in deleting + or error_deleting state. + + :returns: True if deletion was successful, else False. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time + exceeded. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + volume = self.block_storage.find_volume( + name_or_id, ignore_missing=True + ) + if not volume: + self.log.debug( + "Volume %(name_or_id)s does not exist", + {'name_or_id': name_or_id}, + exc_info=True, + ) + return False + try: + self.block_storage.delete_volume(volume, force=force) + except exceptions.SDKException: + self.log.exception("error in deleting volume") + raise + + if wait: + self.block_storage.wait_for_delete(volume, wait=timeout) + + return True + + def get_volumes(self, server, cache=None): + """Get volumes for a server. + + :param server: The server to fetch volumes for. + :param cache: **DEPRECATED** This parameter no longer does anything. + :returns: A list of volume ``Volume`` objects. + """ + if cache is not None: + warnings.warn( + "the 'cache' argument is deprecated and no longer does " + "anything; consider removing it from calls", + os_warnings.RemovedInSDK50Warning, + ) + # avoid spamming warnings + cache = None + + volumes = [] + for volume in self.list_volumes(cache=cache): + for attach in volume['attachments']: + if attach['server_id'] == server['id']: + volumes.append(volume) + return volumes + + def get_volume_limits(self, name_or_id=None): + """Get volume limits for the current project + + :param name_or_id: (optional) Project name or ID to get limits for + if different from the current project + :returns: The volume ``Limits`` object if found, else None. + """ + params = {} + if name_or_id: + identity = utils.ensure_service_version(self.identity, '3') + project = identity.find_project(name_or_id, ignore_missing=False) + params['project'] = project + return self.block_storage.get_limits(**params) + + def get_volume_id(self, name_or_id): + """Get ID of a volume. + + :param name_or_id: Name or unique ID of the volume. + :returns: The ID of the volume if found, else None. + """ + volume = self.get_volume(name_or_id) + if volume: + return volume['id'] + return None + + def volume_exists(self, name_or_id): + """Check if a volume exists. + + :param name_or_id: Name or unique ID of the volume. + :returns: True if the volume exists, else False. + """ + return self.get_volume(name_or_id) is not None + + def get_volume_attach_device(self, volume, server_id): + """Return the device name a volume is attached to for a server. + + This can also be used to verify if a volume is attached to + a particular server. + + :param volume: The volume to fetch the device name from. + :param server_id: ID of server to check. + :returns: Device name if attached, None if volume is not attached. + """ + for attach in volume['attachments']: + if server_id == attach['server_id']: + return attach['device'] + return None + + def detach_volume(self, server, volume, wait=True, timeout=None): + """Detach a volume from a server. + + :param server: The server dict to detach from. + :param volume: The volume dict to detach. + :param wait: If true, waits for volume to be detached. + :param timeout: Seconds to wait for volume detachment. None is forever. + + :returns: None + :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time + exceeded. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + self.compute.delete_volume_attachment( + server=server['id'], + volume=volume['id'], + ignore_missing=False, + ) + if wait: + vol = self.get_volume(volume['id']) + self.block_storage.wait_for_status(vol) + + def attach_volume( + self, + server, + volume, + device=None, + wait=True, + timeout=None, + ): + """Attach a volume to a server. + + This will attach a volume, described by the passed in volume + dict (as returned by get_volume()), to the server described by + the passed in server dict (as returned by get_server()) on the + named device on the server. + + If the volume is already attached to the server, or generally not + available, then an exception is raised. To re-attach to a server, + but under a different device, the user must detach it first. + + :param server: The server dict to attach to. + :param volume: The volume dict to attach. + :param device: The device name where the volume will attach. + :param wait: If true, waits for volume to be attached. + :param timeout: Seconds to wait for volume attachment. None is forever. + + :returns: a volume attachment object. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time + exceeded. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + dev = self.get_volume_attach_device(volume, server['id']) + if dev: + raise exceptions.SDKException( + "Volume {} already attached to server {} on device {}".format( + volume['id'], server['id'], dev + ) + ) + + if volume['status'] != 'available': + raise exceptions.SDKException( + "Volume {} is not available. Status is '{}'".format( + volume['id'], volume['status'] + ) + ) + + payload = {} + if device: + payload['device'] = device + attachment = self.compute.create_volume_attachment( + server=server['id'], + volume=volume['id'], + **payload, + ) + + if wait: + if not hasattr(volume, 'fetch'): + # If we got volume as dict we need to re-fetch it to be able to + # use wait_for_status. + volume = self.block_storage.get_volume(volume['id']) + self.block_storage.wait_for_status(volume, 'in-use', wait=timeout) + return attachment + + def _get_volume_kwargs(self, kwargs): + name = kwargs.pop('name', kwargs.pop('display_name', None)) + description = kwargs.pop( + 'description', kwargs.pop('display_description', None) + ) + if name: + kwargs['name'] = name + if description: + kwargs['description'] = description + return kwargs + + @_utils.valid_kwargs( + 'name', 'display_name', 'description', 'display_description' + ) + def create_volume_snapshot( + self, + volume_id, + force=False, + wait=True, + timeout=None, + **kwargs, + ): + """Create a snapshot. + + :param volume_id: the ID of the volume to snapshot. + :param force: If set to True the snapshot will be created even if the + volume is attached to an instance, if False it will not + :param name: name of the snapshot, one will be generated if one is + not provided + :param description: description of the snapshot, one will be generated + if one is not provided + :param wait: If true, waits for volume snapshot to be created. + :param timeout: Seconds to wait for volume snapshot creation. None is + forever. + + :returns: The created volume ``Snapshot`` object. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time + exceeded. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + kwargs = self._get_volume_kwargs(kwargs) + payload = {'volume_id': volume_id, 'force': force} + payload.update(kwargs) + snapshot = self.block_storage.create_snapshot(**payload) + if wait: + snapshot = self.block_storage.wait_for_status( + snapshot, wait=timeout + ) + + return snapshot + + def get_volume_snapshot_by_id(self, snapshot_id): + """Takes a snapshot_id and gets a dict of the snapshot + that maches that ID. + + Note: This is more efficient than get_volume_snapshot. + + param: snapshot_id: ID of the volume snapshot. + :returns: A volume ``Snapshot`` object if found, else None. + """ + return self.block_storage.get_snapshot(snapshot_id) + + # TODO(stephenfin): Remove 'filters' in a future major version + def get_volume_snapshot(self, name_or_id, filters=None): + """Get a volume by name or ID. + + :param name_or_id: Name or unique ID of the volume snapshot. + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A volume ``Snapshot`` object if found, else None. + """ + if filters is not None: + warnings.warn( + "The 'filters' argument is deprecated; use " + "'search_volume_snapshots' instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_volume_snapshots(name_or_id, filters) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + return entities[0] + + return self.block_storage.find_snapshot(name_or_id) + + def create_volume_backup( + self, + volume_id, + name=None, + description=None, + force=False, + wait=True, + timeout=None, + incremental=False, + snapshot_id=None, + ): + """Create a volume backup. + + :param volume_id: the ID of the volume to backup. + :param name: name of the backup, one will be generated if one is + not provided + :param description: description of the backup, one will be generated + if one is not provided + :param force: If set to True the backup will be created even if the + volume is attached to an instance, if False it will not + :param wait: If true, waits for volume backup to be created. + :param timeout: Seconds to wait for volume backup creation. None is + forever. + :param incremental: If set to true, the backup will be incremental. + :param snapshot_id: The UUID of the source snapshot to back up. + + :returns: The created volume ``Backup`` object. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time + exceeded. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + payload = { + 'name': name, + 'volume_id': volume_id, + 'description': description, + 'force': force, + 'is_incremental': incremental, + 'snapshot_id': snapshot_id, + } + + backup = self.block_storage.create_backup(**payload) + + if wait: + backup = self.block_storage.wait_for_status(backup, wait=timeout) + + return backup + + def export_volume_backup(self, backup_id): + """Export a volume backup. + + :param backup_id: the ID of the backup. + + :returns: The backup export record fields + :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time + exceeded. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + payload = {'backup': backup_id} + + return self.block_storage.export_record(**payload) + + # TODO(stephenfin): Remove 'filters' in a future major version + def get_volume_backup(self, name_or_id, filters=None): + """Get a volume backup by name or ID. + + :param name_or_id: Name or unique ID of the volume backup. + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A volume ``Backup`` object if found, else None. + """ + if filters is not None: + warnings.warn( + "The 'filters' argument is deprecated; use " + "'search_volume_backups' instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_volume_backups(name_or_id, filters) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + return entities[0] + + return self.block_storage.find_backup(name_or_id) + + def list_volume_snapshots(self, detailed=True, filters=None): + """List all volume snapshots. + + :param detailed: Whether or not to add detailed additional information. + :param filters: A dictionary of meta data to use for further filtering. + Example:: + + { + 'name': 'my-volume-snapshot', + 'volume_id': 'e126044c-7b4c-43be-a32a-c9cbbc9ddb56', + 'all_tenants': 1, + } + + :returns: A list of volume ``Snapshot`` objects. + """ + if not filters: + filters = {} + return list(self.block_storage.snapshots(details=detailed, **filters)) + + def list_volume_backups(self, detailed=True, filters=None): + """List all volume backups. + + :param detailed: Whether or not to add detailed additional information. + :param filters: A dictionary of meta data to use for further filtering. + Example:: + + { + 'name': 'my-volume-backup', + 'status': 'available', + 'volume_id': 'e126044c-7b4c-43be-a32a-c9cbbc9ddb56', + 'all_tenants': 1, + } + + :returns: A list of volume ``Backup`` objects. + """ + if not filters: + filters = {} + + return list(self.block_storage.backups(details=detailed, **filters)) + + def delete_volume_backup( + self, name_or_id=None, force=False, wait=False, timeout=None + ): + """Delete a volume backup. + + :param name_or_id: Name or unique ID of the volume backup. + :param force: Allow delete in state other than error or available. + :param wait: If true, waits for volume backup to be deleted. + :param timeout: Seconds to wait for volume backup deletion. None is + forever. + + :returns: True if deletion was successful, else False. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time + exceeded. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + volume_backup = self.get_volume_backup(name_or_id) + + if not volume_backup: + return False + + self.block_storage.delete_backup( + volume_backup, ignore_missing=False, force=force + ) + if wait: + self.block_storage.wait_for_delete(volume_backup, wait=timeout) + + return True + + def delete_volume_snapshot( + self, + name_or_id=None, + wait=False, + timeout=None, + ): + """Delete a volume snapshot. + + :param name_or_id: Name or unique ID of the volume snapshot. + :param wait: If true, waits for volume snapshot to be deleted. + :param timeout: Seconds to wait for volume snapshot deletion. None is + forever. + + :returns: True if deletion was successful, else False. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if wait time + exceeded. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + volumesnapshot = self.get_volume_snapshot(name_or_id) + + if not volumesnapshot: + return False + + self.block_storage.delete_snapshot( + volumesnapshot, ignore_missing=False + ) + + if wait: + self.block_storage.wait_for_delete(volumesnapshot, wait=timeout) + + return True + + def search_volumes(self, name_or_id=None, filters=None): + """Search for one or more volumes. + + :param name_or_id: Name or unique ID of volume(s). + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A list of volume ``Volume`` objects, if any are found. + """ + volumes = self.list_volumes() + return _utils._filter_list(volumes, name_or_id, filters) + + def search_volume_snapshots(self, name_or_id=None, filters=None): + """Search for one or more volume snapshots. + + :param name_or_id: Name or unique ID of volume snapshot(s). + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A list of volume ``Snapshot`` objects, if any are found. + """ + volumesnapshots = self.list_volume_snapshots() + return _utils._filter_list(volumesnapshots, name_or_id, filters) + + def search_volume_backups(self, name_or_id=None, filters=None): + """Search for one or more volume backups. + + :param name_or_id: Name or unique ID of volume backup(s). + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A list of volume ``Backup`` objects, if any are found. + """ + volume_backups = self.list_volume_backups() + return _utils._filter_list(volume_backups, name_or_id, filters) + + def search_volume_types( + self, + name_or_id=None, + filters=None, + get_extra=None, + ): + """Search for one or more volume types. + + :param name_or_id: Name or unique ID of volume type(s). + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A list of volume ``Type`` objects, if any are found. + """ + if get_extra is not None: + warnings.warn( + "the 'get_extra' argument is deprecated and no longer does " + "anything; consider removing it from calls", + os_warnings.RemovedInSDK50Warning, + ) + volume_types = self.list_volume_types() + return _utils._filter_list(volume_types, name_or_id, filters) + + def get_volume_type_access(self, name_or_id): + """Return a list of volume_type_access. + + :param name_or_id: Name or unique ID of the volume type. + :returns: A volume ``Type`` object if found, else None. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + volume_type = self.get_volume_type(name_or_id) + if not volume_type: + raise exceptions.SDKException( + f"VolumeType not found: {name_or_id}" + ) + + return self.block_storage.get_type_access(volume_type) + + def add_volume_type_access(self, name_or_id, project_id): + """Grant access on a volume_type to a project. + + NOTE: the call works even if the project does not exist. + + :param name_or_id: ID or name of a volume_type + :param project_id: A project id + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + volume_type = self.get_volume_type(name_or_id) + if not volume_type: + raise exceptions.SDKException( + f"VolumeType not found: {name_or_id}" + ) + + self.block_storage.add_type_access(volume_type, project_id) + + def remove_volume_type_access(self, name_or_id, project_id): + """Revoke access on a volume_type to a project. + + :param name_or_id: ID or name of a volume_type + :param project_id: A project id + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + volume_type = self.get_volume_type(name_or_id) + if not volume_type: + raise exceptions.SDKException( + f"VolumeType not found: {name_or_id}" + ) + self.block_storage.remove_type_access(volume_type, project_id) + + def set_volume_quotas(self, name_or_id, **kwargs): + """Set a volume quota in a project + + :param name_or_id: project name or id + :param kwargs: key/value pairs of quota name and quota value + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` if the resource to + set the quota does not exist. + """ + identity = utils.ensure_service_version(self.identity, '3') + project = identity.find_project(name_or_id, ignore_missing=False) + + self.block_storage.update_quota_set(project=project, **kwargs) + + def get_volume_quotas(self, name_or_id): + """Get volume quotas for a project + + :param name_or_id: project name or id + + :returns: A volume ``QuotaSet`` object with the quotas + :raises: :class:`~openstack.exceptions.SDKException` if it's not a + valid project + """ + identity = utils.ensure_service_version(self.identity, '3') + proj = identity.find_project(name_or_id, ignore_missing=False) + + return self.block_storage.get_quota_set(proj) + + def delete_volume_quotas(self, name_or_id): + """Delete volume quotas for a project + + :param name_or_id: project name or id + + :returns: The deleted volume ``QuotaSet`` object. + :raises: :class:`~openstack.exceptions.SDKException` if it's not a + valid project or the call failed + """ + identity = utils.ensure_service_version(self.identity, '3') + proj = identity.find_project(name_or_id, ignore_missing=False) + + return self.block_storage.revert_quota_set(proj) diff --git a/openstack/cloud/_coe.py b/openstack/cloud/_coe.py new file mode 100644 index 0000000000..8783aea6cd --- /dev/null +++ b/openstack/cloud/_coe.py @@ -0,0 +1,304 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.cloud import _utils +from openstack.cloud import openstackcloud +from openstack import exceptions + + +class CoeCloudMixin(openstackcloud._OpenStackCloudMixin): + def list_coe_clusters(self): + """List COE (Container Orchestration Engine) cluster. + + :returns: A list of container infrastructure management ``Cluster`` + objects. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + return list(self.container_infrastructure_management.clusters()) + + def search_coe_clusters(self, name_or_id=None, filters=None): + """Search COE cluster. + + :param name_or_id: cluster name or ID. + :param filters: a dict containing additional filters to use. + :param detail: a boolean to control if we need summarized or + detailed output. + + :returns: A list of container infrastructure management ``Cluster`` + objects. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + coe_clusters = self.list_coe_clusters() + return _utils._filter_list(coe_clusters, name_or_id, filters) + + def get_coe_cluster(self, name_or_id, filters=None): + """Get a COE cluster by name or ID. + + :param name_or_id: Name or ID of the cluster. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A container infrastructure management ``Cluster`` object if + found, else None. + """ + return _utils._get_entity(self, 'coe_cluster', name_or_id, filters) + + def create_coe_cluster( + self, + name, + cluster_template_id, + **kwargs, + ): + """Create a COE cluster based on given cluster template. + + :param string name: Name of the cluster. + :param string cluster_template_id: ID of the cluster template to use. + :param dict kwargs: Any other arguments to pass in. + + :returns: The created container infrastructure management ``Cluster`` + object. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call + """ + cluster = self.container_infrastructure_management.create_cluster( + name=name, + cluster_template_id=cluster_template_id, + **kwargs, + ) + + return cluster + + def delete_coe_cluster(self, name_or_id): + """Delete a COE cluster. + + :param name_or_id: Name or unique ID of the cluster. + + :returns: True if the delete succeeded, False if the + cluster was not found. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + + cluster = self.get_coe_cluster(name_or_id) + + if not cluster: + self.log.debug( + "COE Cluster %(name_or_id)s does not exist", + {'name_or_id': name_or_id}, + exc_info=True, + ) + return False + + self.container_infrastructure_management.delete_cluster(cluster) + return True + + def update_coe_cluster(self, name_or_id, **kwargs): + """Update a COE cluster. + + :param name_or_id: Name or ID of the COE cluster being updated. + :param kwargs: Cluster attributes to be updated. + + :returns: The updated cluster ``Cluster`` object. + + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + cluster = self.get_coe_cluster(name_or_id) + if not cluster: + raise exceptions.SDKException( + f"COE cluster {name_or_id} not found." + ) + + cluster = self.container_infrastructure_management.update_cluster( + cluster, **kwargs + ) + + return cluster + + def get_coe_cluster_certificate(self, cluster_id): + """Get details about the CA certificate for a cluster by name or ID. + + :param cluster_id: ID of the cluster. + + :returns: Details about the CA certificate for the given cluster. + """ + return ( + self.container_infrastructure_management.get_cluster_certificate( + cluster_id + ) + ) + + def sign_coe_cluster_certificate(self, cluster_id, csr): + """Sign client key and generate the CA certificate for a cluster + + :param cluster_id: UUID of the cluster. + :param csr: Certificate Signing Request (CSR) for authenticating + client key.The CSR will be used by Magnum to generate a signed + certificate that client will use to communicate with the cluster. + + :returns: a dict representing the signed certs. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + return self.container_infrastructure_management.create_cluster_certificate( # noqa: E501 + cluster_uuid=cluster_id, csr=csr + ) + + def list_cluster_templates(self, detail=False): + """List cluster templates. + + :param bool detail. Ignored. Included for backwards compat. + ClusterTemplates are always returned with full details. + + :returns: a list of dicts containing the cluster template details. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + return list( + self.container_infrastructure_management.cluster_templates() + ) + + def search_cluster_templates( + self, name_or_id=None, filters=None, detail=False + ): + """Search cluster templates. + + :param name_or_id: cluster template name or ID. + :param filters: a dict containing additional filters to use. + :param detail: a boolean to control if we need summarized or + detailed output. + + :returns: a list of dict containing the cluster templates + :raises: :class:`~openstack.exceptions.SDKException`: if something goes + wrong during the OpenStack API call. + """ + cluster_templates = self.list_cluster_templates(detail=detail) + return _utils._filter_list(cluster_templates, name_or_id, filters) + + def get_cluster_template(self, name_or_id, filters=None, detail=False): + """Get a cluster template by name or ID. + + :param name_or_id: Name or ID of the cluster template. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A cluster template dict or None if no matching + cluster template is found. + """ + return _utils._get_entity( + self, + 'cluster_template', + name_or_id, + filters=filters, + detail=detail, + ) + + def create_cluster_template( + self, name, image_id=None, keypair_id=None, coe=None, **kwargs + ): + """Create a cluster template. + + :param string name: Name of the cluster template. + :param string image_id: Name or ID of the image to use. + :param string keypair_id: Name or ID of the keypair to use. + :param string coe: Name of the coe for the cluster template. + Other arguments will be passed in kwargs. + + :returns: a dict containing the cluster template description + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call + """ + cluster_template = ( + self.container_infrastructure_management.create_cluster_template( + name=name, + image_id=image_id, + keypair_id=keypair_id, + coe=coe, + **kwargs, + ) + ) + + return cluster_template + + def delete_cluster_template(self, name_or_id): + """Delete a cluster template. + + :param name_or_id: Name or unique ID of the cluster template. + + :returns: True if the delete succeeded, False if the + cluster template was not found. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + + cluster_template = self.get_cluster_template(name_or_id) + + if not cluster_template: + self.log.debug( + "Cluster template %(name_or_id)s does not exist", + {'name_or_id': name_or_id}, + exc_info=True, + ) + return False + + self.container_infrastructure_management.delete_cluster_template( + cluster_template + ) + return True + + def update_cluster_template(self, name_or_id, **kwargs): + """Update a cluster template. + + :param name_or_id: Name or ID of the cluster template being updated. + + :returns: an update cluster template. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + cluster_template = self.get_cluster_template(name_or_id) + if not cluster_template: + raise exceptions.SDKException( + f"Cluster template {name_or_id} not found." + ) + + cluster_template = ( + self.container_infrastructure_management.update_cluster_template( + cluster_template, **kwargs + ) + ) + + return cluster_template + + def list_magnum_services(self): + """List all Magnum services. + + :returns: a list of dicts containing the service details. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + return list(self.container_infrastructure_management.services()) diff --git a/openstack/cloud/_compute.py b/openstack/cloud/_compute.py new file mode 100644 index 0000000000..1093d61d54 --- /dev/null +++ b/openstack/cloud/_compute.py @@ -0,0 +1,2107 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import base64 +import operator +import time +import warnings + +import iso8601 + +from openstack.cloud import _network_common +from openstack.cloud import _utils +from openstack.cloud import exc +from openstack.cloud import meta +from openstack.compute.v2 import server as _server +from openstack import exceptions +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +_SERVER_FIELDS = ( + 'accessIPv4', + 'accessIPv6', + 'addresses', + 'adminPass', + 'created', + 'description', + 'key_name', + 'metadata', + 'networks', + 'personality', + 'private_v4', + 'public_v4', + 'public_v6', + 'server_groups', + 'status', + 'updated', + 'user_id', + 'tags', +) + + +def _to_bool(value): + if isinstance(value, str): + if not value: + return False + prospective = value.lower().capitalize() + return prospective == 'True' + return bool(value) + + +def _pop_int(resource, key): + return int(resource.pop(key, 0) or 0) + + +def _pop_or_get(resource, key, default, strict): + if strict: + return resource.pop(key, default) + else: + return resource.get(key, default) + + +class ComputeCloudMixin(_network_common.NetworkCommonCloudMixin): + @property + def _compute_region(self): + # This is only used in exception messages. Can we get rid of it? + return self.config.get_region_name('compute') + + def get_flavor_name(self, flavor_id): + """Get the name of a flavor. + + :param flavor_id: ID of the flavor. + + :returns: The name of the flavor if a match if found, else None. + """ + flavor = self.get_flavor(flavor_id, get_extra=False) + if flavor: + return flavor['name'] + return None + + def get_flavor_by_ram(self, ram, include=None, get_extra=True): + """Get a flavor based on amount of RAM available. + + Finds the flavor with the least amount of RAM that is at least + as much as the specified amount. If `include` is given, further + filter based on matching flavor name. + + :param int ram: Minimum amount of RAM. + :param string include: If given, will return a flavor whose name + contains this string as a substring. + :param get_extra: Whether to fetch extra specs. + + :returns: A compute ``Flavor`` object. + :raises: :class:`~openstack.exceptions.SDKException` if no + matching flavour could be found. + """ + flavors = self.list_flavors(get_extra=get_extra) + for flavor in sorted(flavors, key=operator.itemgetter('ram')): + if flavor['ram'] >= ram and ( + not include or include in flavor['name'] + ): + return flavor + raise exceptions.SDKException( + f"Could not find a flavor with {ram} and '{include}'" + ) + + def search_keypairs(self, name_or_id=None, filters=None): + """Search keypairs. + + :param name_or_id: Name or unique ID of the keypair(s). + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Invalid filters will be ignored. + + :returns: A list of compute ``Keypair`` objects matching the search + criteria. + """ + keypairs = self.list_keypairs( + filters=filters if isinstance(filters, dict) else None + ) + return _utils._filter_list(keypairs, name_or_id, filters) + + def search_flavors(self, name_or_id=None, filters=None, get_extra=True): + """Search flavors. + + :param name_or_id: Name or unique ID of the flavor(s). + :param filters: + :param get_extra: Whether to fetch extra specs. + + :returns: A list of compute ``Flavor`` objects matching the search + criteria. + """ + flavors = self.list_flavors(get_extra=get_extra) + return _utils._filter_list(flavors, name_or_id, filters) + + def search_servers( + self, + name_or_id=None, + filters=None, + detailed=False, + all_projects=False, + bare=False, + ): + """Search servers. + + :param name_or_id: Name or unique ID of the server(s). + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Invalid filters will be ignored. + :param detailed: + :param all_projects: + :param bare: + + :returns: A list of compute ``Server`` objects matching the search + criteria. + """ + servers = self.list_servers( + detailed=detailed, all_projects=all_projects, bare=bare + ) + return _utils._filter_list(servers, name_or_id, filters) + + def search_server_groups(self, name_or_id=None, filters=None): + """Search server groups. + + :param name_or_id: Name or unique ID of the server group(s). + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Invalid filters will be ignored. + + :returns: A list of compute ``ServerGroup`` objects matching the search + criteria. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + server_groups = self.list_server_groups() + return _utils._filter_list(server_groups, name_or_id, filters) + + def list_keypairs(self, filters=None): + """List all available keypairs. + + :param filters: + :returns: A list of compute ``Keypair`` objects. + """ + if not filters: + filters = {} + return list(self.compute.keypairs(**filters)) + + def list_availability_zone_names(self, unavailable=False): + """List names of availability zones. + + :param bool unavailable: Whether or not to include unavailable zones + in the output. Defaults to False. + :returns: A list of availability zone names, or an empty list if the + list could not be fetched. + """ + try: + zones = self.compute.availability_zones() + ret = [] + for zone in zones: + if zone.state['available'] or unavailable: + ret.append(zone.name) + return ret + except exceptions.SDKException: + self.log.debug( + "Availability zone list could not be fetched", exc_info=True + ) + return [] + + def list_flavors(self, get_extra=False): + """List all available flavors. + + :param get_extra: Whether or not to fetch extra specs for each flavor. + Defaults to True. Default behavior value can be overridden in + clouds.yaml by setting openstack.cloud.get_extra_specs to False. + :returns: A list of compute ``Flavor`` objects. + """ + return list( + self.compute.flavors(details=True, get_extra_specs=get_extra) + ) + + def list_server_security_groups(self, server): + """List all security groups associated with the given server. + + :returns: A list of security group dictionary objects. + """ + + # Don't even try if we're a cloud that doesn't have them + if not self._has_secgroups(): + return [] + + server = self.compute.get_server(server) + + server.fetch_security_groups(self.compute) + + return server.security_groups + + def _get_server_security_groups(self, server, security_groups): + if not self._has_secgroups(): + raise exc.OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + if not isinstance(server, dict): + server = self.get_server(server, bare=True) + + if server is None: + self.log.debug('Server %s not found', server) + return None, None + + if not isinstance(security_groups, list | tuple): + security_groups = [security_groups] + + sec_group_objs = [] + + for sg in security_groups: + if not isinstance(sg, dict): + sg = self.get_security_group(sg) + + if sg is None: + self.log.debug( + 'Security group %s not found for adding', sg + ) + + return None, None + + sec_group_objs.append(sg) + + return server, sec_group_objs + + def add_server_security_groups(self, server, security_groups): + """Add security groups to a server. + + Add existing security groups to an existing server. If the security + groups are already present on the server this will continue unaffected. + + :param server: The server to remove security groups from. + :param security_groups: A list of security groups to remove. + :returns: False if server or security groups are undefined, True + otherwise. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + server, security_groups = self._get_server_security_groups( + server, security_groups + ) + + if not (server and security_groups): + return False + + for sg in security_groups: + self.compute.add_security_group_to_server(server, sg) + + return True + + def remove_server_security_groups(self, server, security_groups): + """Remove security groups from a server + + Remove existing security groups from an existing server. If the + security groups are not present on the server this will continue + unaffected. + + :param server: The server to remove security groups from. + :param security_groups: A list of security groups to remove. + :returns: False if server or security groups are undefined, True + otherwise. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + server, security_groups = self._get_server_security_groups( + server, security_groups + ) + + if not (server and security_groups): + return False + + ret = True + + for sg in security_groups: + try: + self.compute.remove_security_group_from_server(server, sg) + + except exceptions.NotFoundException: + # NOTE(jamielennox): Is this ok? If we remove something that + # isn't present should we just conclude job done or is that an + # error? Nova returns ok if you try to add a group twice. + self.log.debug( + "The security group %s was not present on server %s so " + "no action was performed", + sg.name, + server.name, + ) + ret = False + + return ret + + def list_servers( + self, + detailed=False, + all_projects=False, + bare=False, + filters=None, + ): + """List all available servers. + + :param detailed: Whether or not to add detailed additional information. + Defaults to False. + :param all_projects: Whether to list servers from all projects or just + the current auth scoped project. + :param bare: Whether to skip adding any additional information to the + server record. Defaults to False, meaning the addresses dict will + be populated as needed from neutron. Setting to True implies + detailed = False. + :param filters: Additional query parameters passed to the API server. + :returns: A list of compute ``Server`` objects. + """ + if not filters: + filters = {} + + return [ + self._expand_server(server, detailed, bare) + for server in self.compute.servers( + all_projects=all_projects, + **filters, + ) + ] + + def list_server_groups(self): + """List all available server groups. + + :returns: A list of compute ``ServerGroup`` objects. + """ + return list(self.compute.server_groups()) + + def get_compute_limits(self, name_or_id=None): + """Get absolute compute limits for a project + + :param name_or_id: (optional) project name or ID to get limits for + if different from the current project + + :returns: A compute + :class:`~openstack.compute.v2.limits.Limits.AbsoluteLimits` object. + :raises: :class:`~openstack.exceptions.SDKException` if it's not a + valid project + """ + params = {} + if name_or_id: + identity = utils.ensure_service_version(self.identity, '3') + project = identity.find_project(name_or_id, ignore_missing=False) + params['tenant_id'] = project.id + return self.compute.get_limits(**params).absolute + + def get_keypair(self, name_or_id, filters=None, *, user_id=None): + """Get a keypair by name or ID. + + :param name_or_id: Name or ID of the keypair. + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + :param user_id: User to retrieve keypair from. + + :returns: A compute ``Keypair`` object if found, else None. + """ + if filters is not None: + warnings.warn( + "The 'filters' argument is deprecated; use 'search_keypairs' " + "instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_keypairs(name_or_id, filters) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + return entities[0] + + return self.compute.find_keypair(name_or_id, user_id=user_id) + + def get_flavor(self, name_or_id, filters=None, get_extra=True): + """Get a flavor by name or ID. + + :param name_or_id: Name or ID of the flavor. + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :param get_extra: Whether or not the list_flavors call should get the + extra flavor specs. + :returns: A compute ``Flavor`` object if found, else None. + """ + if filters is not None: + warnings.warn( + "The 'filters' argument is deprecated; use 'search_flavors' " + "instead", + os_warnings.RemovedInSDK60Warning, + ) + + if not filters: + filters = {} + return self.compute.find_flavor( + name_or_id, + get_extra_specs=get_extra, + ignore_missing=True, + **filters, + ) + + def get_flavor_by_id(self, id, get_extra=False): + """Get a flavor by ID + + :param id: ID of the flavor. + :param get_extra: Whether or not the list_flavors call should get the + extra flavor specs. + :returns: A compute ``Flavor`` object if found, else None. + """ + return self.compute.get_flavor(id, get_extra_specs=get_extra) + + def get_server_console(self, server, length=None): + """Get the console log for a server. + + :param server: The server to fetch the console log for. Can be either + a server dict or the Name or ID of the server. + :param int length: The number of lines you would like to retrieve from + the end of the log. (optional, defaults to all) + + :returns: A string containing the text of the console log or an + empty string if the cloud does not support console logs. + :raises: :class:`~openstack.exceptions.SDKException` if an invalid + server argument is given or if something else unforseen happens + """ + + if not isinstance(server, dict): + server = self.get_server(server, bare=True) + + if not server: + raise exceptions.SDKException( + "Console log requested for invalid server" + ) + + try: + return self._get_server_console_output(server['id'], length) + except exceptions.BadRequestException: + return "" + + def _get_server_console_output(self, server_id, length=None): + output = self.compute.get_server_console_output( + server=server_id, length=length + ) + if 'output' in output: + return output['output'] + + def get_server( + self, + name_or_id=None, + filters=None, + detailed=False, + bare=False, + all_projects=False, + ): + """Get a server by name or ID. + + :param name_or_id: Name or ID of the server. + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + :param detailed: Whether or not to add detailed additional information. + Defaults to False. + :param bare: Whether to skip adding any additional information to the + server record. Defaults to False, meaning the addresses dict will + be populated as needed from neutron. Setting to True implies + detailed = False. + :param all_projects: Whether to get server from all projects or just + the current auth scoped project. + :returns: A compute ``Server`` object if found, else None. + """ + if filters is not None: + warnings.warn( + "The 'filters' argument is deprecated; use " + "'search_servers' instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_servers( + name_or_id, + filters, + detailed=detailed, + bare=True, + all_projects=all_projects, + ) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + server = entities[0] + return self._expand_server(server, detailed, bare) + + server = self.compute.find_server( + name_or_id, + # detailed controls whether we fetch more information about images, + # volumes etc., not the initial list operation + details=True, + all_projects=all_projects, + ) + return self._expand_server(server, detailed, bare) + + def _expand_server(self, server, detailed, bare): + if bare or not server: + return server + elif detailed: + return meta.get_hostvars_from_server(self, server) + else: + return meta.add_server_interfaces(self, server) + + def get_server_by_id(self, id): + """Get a server by ID. + + :param id: ID of the server. + + :returns: A compute ``Server`` object if found, else None. + """ + try: + server = self.compute.get_server(id) + return meta.add_server_interfaces(self, server) + except exceptions.NotFoundException: + return None + + def get_server_group(self, name_or_id=None, filters=None): + """Get a server group by name or ID. + + :param name_or_id: Name or ID of the server group. + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + { + 'policy': 'affinity', + } + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A compute ``ServerGroup`` object if found, else None. + """ + if filters is not None: + warnings.warn( + "The 'filters' argument is deprecated; use " + "'search_server_groups' instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_server_groups(name_or_id, filters) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + return entities[0] + + return self.compute.find_server_group(name_or_id) + + def create_keypair(self, name, public_key=None): + """Create a new keypair. + + :param name: Name of the keypair being created. + :param public_key: Public key for the new keypair. + + :returns: The created compute ``Keypair`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + keypair = { + 'name': name, + } + if public_key: + keypair['public_key'] = public_key + return self.compute.create_keypair(**keypair) + + def delete_keypair(self, name): + """Delete a keypair. + + :param name: Name of the keypair to delete. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + try: + self.compute.delete_keypair(name, ignore_missing=False) + except exceptions.NotFoundException: + self.log.debug("Keypair %s not found for deleting", name) + return False + return True + + def create_image_snapshot( + self, + name, + server, + wait=False, + timeout=3600, + **metadata, + ): + """Create an image by snapshotting an existing server. + + ..note:: + On most clouds this is a cold snapshot - meaning that the server in + question will be shutdown before taking the snapshot. It is + possible that it's a live snapshot - but there is no way to know as + a user, so caveat emptor. + + :param name: Name of the image to be created + :param server: Server name or ID or dict representing the server + to be snapshotted + :param wait: If true, waits for image to be created. + :param timeout: Seconds to wait for image creation. None is forever. + :param metadata: Metadata to give newly-created image entity + + :returns: The created image ``Image`` object. + :raises: :class:`~openstack.exceptions.SDKException` if there are + problems uploading + """ + if not isinstance(server, dict): + server_obj = self.get_server(server, bare=True) + if not server_obj: + raise exceptions.SDKException( + f"Server {server} could not be found and therefore " + f"could not be snapshotted." + ) + server = server_obj + image = self.compute.create_server_image( + server, name=name, metadata=metadata, wait=wait, timeout=timeout + ) + return image + + def get_server_id(self, name_or_id): + """Get the ID of a server. + + :param name_or_id: + :returns: The name of the server if found, else None. + """ + server = self.get_server(name_or_id, bare=True) + if server: + return server['id'] + return None + + def get_server_private_ip(self, server): + """Get the private IP of a server. + + :param server: + :returns: The private IP of the server if set, else None. + """ + return meta.get_server_private_ip(server, self) + + def get_server_public_ip(self, server): + """Get the public IP of a server. + + :param server: + :returns: The public IP of the server if set, else None. + """ + return meta.get_server_external_ipv4(self, server) + + def get_server_meta(self, server): + """Get the metadata for a server. + + :param server: + :returns: The metadata for the server if found, else None. + """ + # TODO(mordred) remove once ansible has moved to Inventory interface + server_vars = meta.get_hostvars_from_server(self, server) + groups = meta.get_groups_from_server(self, server, server_vars) + return dict(server_vars=server_vars, groups=groups) + + @_utils.valid_kwargs( + 'meta', + 'files', + 'userdata', + 'description', + 'reservation_id', + 'return_raw', + 'min_count', + 'max_count', + 'security_groups', + 'key_name', + 'availability_zone', + 'block_device_mapping', + 'block_device_mapping_v2', + 'nics', + 'scheduler_hints', + 'config_drive', + 'admin_pass', + 'disk_config', + 'tags', + ) + def create_server( + self, + name, + image=None, + flavor=None, + auto_ip=True, + ips=None, + ip_pool=None, + root_volume=None, + terminate_volume=False, + wait=False, + timeout=180, + reuse_ips=True, + network=None, + boot_from_volume=False, + volume_size='50', + boot_volume=None, + volumes=None, + nat_destination=None, + group=None, + **kwargs, + ): + """Create a virtual server instance. + + :param name: Something to name the server. + :param image: Image dict, name or ID to boot with. image is required + unless boot_volume is given. + :param flavor: Flavor dict, name or ID to boot onto. + :param auto_ip: Whether to take actions to find a routable IP for + the server. (defaults to True) + :param ips: List of IPs to attach to the server (defaults to None) + :param ip_pool: Name of the network or floating IP pool to get an + address from. (defaults to None) + :param root_volume: Name or ID of a volume to boot from + (defaults to None - deprecated, use boot_volume) + :param boot_volume: Name or ID of a volume to boot from + (defaults to None) + :param terminate_volume: If booting from a volume, whether it should + be deleted when the server is destroyed. (defaults to False) + :param volumes: (optional) A list of volumes to attach to the server + :param meta: (optional) A dict of arbitrary key/value metadata to + store for this server. Both keys and values must be <=255 + characters. + :param files: (optional, deprecated) A dict of files to overwrite + on the server upon boot. Keys are file names (i.e. + ``/etc/passwd``) and values are the file contents (either as a + string or as a file-like object). A maximum of five entries is + allowed, and each file must be 10k or less. + :param reservation_id: a UUID for the set of servers being requested. + :param min_count: (optional extension) The minimum number of servers to + launch. + :param max_count: (optional extension) The maximum number of servers to + launch. + :param security_groups: A list of security group names + :param userdata: user data to pass to be exposed by the metadata + server this can be a file type object as well or a string. + :param key_name: (optional extension) name of previously created + keypair to inject into the instance. + :param availability_zone: Name of the availability zone for instance + placement. + :param block_device_mapping: (optional) A dict of block + device mappings for this server. + :param block_device_mapping_v2: (optional) A dict of block + device mappings for this server. + :param nics: (optional extension) an ordered list of nics to be + added to this server, with information about connected networks, + fixed IPs, port etc. + :param scheduler_hints: (optional extension) arbitrary key-value pairs + specified by the client to help boot an instance + :param config_drive: (optional extension) value for config drive + either boolean, or volume-id + :param disk_config: (optional extension) control how the disk is + partitioned when the server is created. possible values are 'AUTO' + or 'MANUAL'. + :param admin_pass: (optional extension) add a user supplied admin + password. + :param wait: (optional) Wait for the address to appear as assigned + to the server. Defaults to False. + :param timeout: (optional) Seconds to wait, defaults to 60. + See the ``wait`` parameter. + :param reuse_ips: (optional) Whether to attempt to reuse pre-existing + floating ips should a floating IP be needed (defaults to True) + :param network: (optional) Network dict or name or ID to attach the + server to. Mutually exclusive with the nics parameter. Can also + be a list of network names or IDs or network dicts. + :param boot_from_volume: Whether to boot from volume. 'boot_volume' + implies True, but boot_from_volume=True with no boot_volume is + valid and will create a volume from the image and use that. + :param volume_size: When booting an image from volume, how big should + the created volume be? Defaults to 50. + :param nat_destination: Which network should a created floating IP + be attached to, if it's not possible to infer from the cloud's + configuration. (Optional, defaults to None) + :param group: ServerGroup dict, name or id to boot the server in. + If a group is provided in both scheduler_hints and in the group + param, the group param will win. (Optional, defaults to None) + + :returns: The created compute ``Server`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + # TODO(shade) Image is optional but flavor is not - yet flavor comes + # after image in the argument list. Doh. + if not flavor: + raise TypeError( + "create_server() missing 1 required argument: 'flavor'" + ) + if not image and not boot_volume: + raise TypeError( + "create_server() requires either 'image' or 'boot_volume'" + ) + + # TODO(mordred) Add support for description starting in 2.19 + security_groups = kwargs.get('security_groups', []) + if security_groups and not isinstance(kwargs['security_groups'], list): + security_groups = [security_groups] + if security_groups: + kwargs['security_groups'] = [] + for sec_group in security_groups: + kwargs['security_groups'].append(dict(name=sec_group)) + if 'userdata' in kwargs: + user_data = kwargs.pop('userdata') + if user_data: + kwargs['user_data'] = self._encode_server_userdata(user_data) + for desired, given in ( + ('OS-DCF:diskConfig', 'disk_config'), + ('config_drive', 'config_drive'), + ('key_name', 'key_name'), + ('metadata', 'meta'), + ('adminPass', 'admin_pass'), + ): + value = kwargs.pop(given, None) + if value: + kwargs[desired] = value + + if group: + if isinstance(group, dict): + if not isinstance(group, resource.Resource): + warnings.warn( + "Support for passing server group as a raw dict has " + "been deprecated for removal. Consider passing a " + "string name or ID or a ServerGroup object instead.", + os_warnings.RemovedInSDK60Warning, + ) + group_id = group['id'] + else: # object + group_obj = self.compute.find_server_group( + group, ignore_missing=False + ) + group_id = group_obj['id'] + if 'scheduler_hints' not in kwargs: + kwargs['scheduler_hints'] = {} + kwargs['scheduler_hints']['group'] = group_id + + kwargs.setdefault('max_count', kwargs.get('max_count', 1)) + kwargs.setdefault('min_count', kwargs.get('min_count', 1)) + + if 'nics' in kwargs and not isinstance(kwargs['nics'], list): + if isinstance(kwargs['nics'], dict): + # Be nice and help the user out + kwargs['nics'] = [kwargs['nics']] + else: + raise exceptions.SDKException( + 'nics parameter to create_server takes a list of dicts. ' + 'Got: {nics}'.format(nics=kwargs['nics']) + ) + + if network and ('nics' not in kwargs or not kwargs['nics']): + nics = [] + if not isinstance(network, list): + network = [network] + for net in network: + if isinstance(net, dict): + if not isinstance(net, resource.Resource): + warnings.warn( + "Support for passing network as a raw dict has " + "been deprecated for removal. Consider passing a " + "string name or ID or a Network object instead.", + os_warnings.RemovedInSDK60Warning, + ) + network_id = net['id'] + else: + network_obj = self.network.find_network( + net, ignore_missing=False + ) + network_id = network_obj['id'] + nics.append({'net-id': network_id}) + + kwargs['nics'] = nics + if not network and ('nics' not in kwargs or not kwargs['nics']): + default_network = self.get_default_network() + if default_network: + kwargs['nics'] = [{'net-id': default_network['id']}] + + networks = [] + for nic in kwargs.pop('nics', []): + net = {} + if 'net-id' in nic: + # TODO(mordred) Make sure this is in uuid format + net['uuid'] = nic.pop('net-id') + # If there's a net-id, ignore net-name + nic.pop('net-name', None) + elif 'net-name' in nic: + net_name = nic.pop('net-name') + nic_net = self.network.find_network( + net_name, ignore_missing=False + ) + net['uuid'] = nic_net['id'] + for ip_key in ('v4-fixed-ip', 'v6-fixed-ip', 'fixed_ip'): + fixed_ip = nic.pop(ip_key, None) + if fixed_ip and net.get('fixed_ip'): + raise exceptions.SDKException( + "Only one of v4-fixed-ip, v6-fixed-ip or fixed_ip " + "may be given" + ) + if fixed_ip: + net['fixed_ip'] = fixed_ip + for key in ('port', 'port-id'): + if key in nic: + net['port'] = nic.pop(key) + # A tag supported only in server microversion 2.32-2.36 or >= 2.42 + # Bumping the version to 2.42 to support the 'tag' implementation + if 'tag' in nic: + utils.require_microversion(self.compute, '2.42') + net['tag'] = nic.pop('tag') + if nic: + raise exceptions.SDKException( + f"Additional unsupported keys given for server network " + f"creation: {nic.keys()}" + ) + networks.append(net) + if networks: + kwargs['networks'] = networks + else: + # If user has not passed networks - let Nova try the best; + # note earlier microversions expect this to be blank. + if utils.supports_microversion(self.compute, '2.37'): + kwargs['networks'] = 'auto' + + if image: + if isinstance(image, dict): + if not isinstance(image, resource.Resource): + warnings.warn( + "Support for passing image as a raw dict has " + "been deprecated for removal. Consider passing a " + "string name or ID or an Image object instead.", + os_warnings.RemovedInSDK60Warning, + ) + kwargs['imageRef'] = image['id'] + else: + image_obj = self.image.find_image(image, ignore_missing=False) + kwargs['imageRef'] = image_obj.id + + if isinstance(flavor, dict): + if not isinstance(flavor, resource.Resource): + warnings.warn( + "Support for passing flavor as a raw dict has " + "been deprecated for removal. Consider passing a " + "string name or ID or a Flavor object instead.", + os_warnings.RemovedInSDK60Warning, + ) + kwargs['flavorRef'] = flavor['id'] + else: + kwargs['flavorRef'] = self.get_flavor(flavor, get_extra=False).id + + if volumes is None: + volumes = [] + + # nova cli calls this boot_volume. Let's be the same + if root_volume and not boot_volume: + boot_volume = root_volume + + kwargs = self._get_boot_from_volume_kwargs( + image=image, + boot_from_volume=boot_from_volume, + boot_volume=boot_volume, + volume_size=str(volume_size), + terminate_volume=terminate_volume, + volumes=volumes, + kwargs=kwargs, + ) + + kwargs['name'] = name + + server = self.compute.create_server(**kwargs) + # TODO(mordred) We're only testing this in functional tests. We need + # to add unit tests for this too. + admin_pass = server.admin_password or kwargs.get('admin_pass') + if not wait: + server = self.compute.get_server(server.id) + if server['status'] == 'ERROR': + if ( + 'fault' in server + and server['fault'] is not None + and 'message' in server['fault'] + ): + raise exceptions.SDKException( + "Error in creating the server. " + "Compute service reports fault: {reason}".format( + reason=server['fault']['message'] + ), + extra_data=dict(server=server), + ) + + raise exceptions.SDKException( + "Error in creating the server " + "(no further information available)", + extra_data=dict(server=server), + ) + + server = meta.add_server_interfaces(self, server) + + else: + server = self.wait_for_server( + server, + auto_ip=auto_ip, + ips=ips, + ip_pool=ip_pool, + reuse=reuse_ips, + timeout=timeout, + nat_destination=nat_destination, + ) + + server.admin_password = admin_pass + return server + + def _get_boot_from_volume_kwargs( + self, + image, + boot_from_volume, + boot_volume, + volume_size, + terminate_volume, + volumes, + kwargs, + ): + """Return block device mappings + + :param image: Image dict, name or id to boot with. + + """ + # TODO(mordred) We're only testing this in functional tests. We need + # to add unit tests for this too. + if boot_volume or boot_from_volume or volumes: + kwargs.setdefault('block_device_mapping_v2', []) + else: + return kwargs + + # If we have boot_from_volume but no root volume, then we're + # booting an image from volume + if boot_volume: + if isinstance(boot_volume, dict): + if not isinstance(boot_volume, resource.Resource): + warnings.warn( + "Support for passing boot_volume as a raw dict has " + "been deprecated for removal. Consider passing a " + "string name or ID or a Volume object instead.", + os_warnings.RemovedInSDK60Warning, + ) + volume_id = boot_volume['id'] + else: + volume = self.block_storage.find_volume( + boot_volume, ignore_missing=False + ) + volume_id = volume['id'] + block_mapping = { + 'boot_index': '0', + 'delete_on_termination': terminate_volume, + 'destination_type': 'volume', + 'uuid': volume_id, + 'source_type': 'volume', + } + kwargs['block_device_mapping_v2'].append(block_mapping) + kwargs['imageRef'] = '' + elif boot_from_volume: + if isinstance(image, dict): + if not isinstance(image, resource.Resource): + warnings.warn( + "Support for passing image as a raw dict has " + "been deprecated for removal. Consider passing a " + "string name or ID or an Image object instead.", + os_warnings.RemovedInSDK60Warning, + ) + image_obj = image + else: + image_obj = self.image.find_image(image, ignore_missing=False) + + block_mapping = { + 'boot_index': '0', + 'delete_on_termination': terminate_volume, + 'destination_type': 'volume', + 'uuid': image_obj['id'], + 'source_type': 'image', + 'volume_size': volume_size, + } + kwargs['imageRef'] = '' + kwargs['block_device_mapping_v2'].append(block_mapping) + + if volumes and kwargs['imageRef']: + # If we're attaching volumes on boot but booting from an image, + # we need to specify that in the BDM. + block_mapping = { + 'boot_index': 0, + 'delete_on_termination': True, + 'destination_type': 'local', + 'source_type': 'image', + 'uuid': kwargs['imageRef'], + } + kwargs['block_device_mapping_v2'].append(block_mapping) + + for volume in volumes: + if isinstance(volume, dict): + if not isinstance(volume, resource.Resource): + warnings.warn( + "Support for passing volumes as a list of raw dicts " + "been deprecated for removal. Consider passing a list " + "of string name or ID or ServerGroup objects instead.", + os_warnings.RemovedInSDK60Warning, + ) + volume_id = volume['id'] + else: + volume_obj = self.block_storage.find_volume( + volume, ignore_missing=False + ) + volume_id = volume_obj['id'] + block_mapping = { + 'boot_index': '-1', + 'delete_on_termination': False, + 'destination_type': 'volume', + 'uuid': volume_id, + 'source_type': 'volume', + } + kwargs['block_device_mapping_v2'].append(block_mapping) + return kwargs + + def wait_for_server( + self, + server, + auto_ip=True, + ips=None, + ip_pool=None, + reuse=True, + timeout=180, + nat_destination=None, + ): + """ + Wait for a server to reach ACTIVE status. + """ + server_id = server['id'] + timeout_message = "Timeout waiting for the server to come up." + start_time = time.time() + + for count in utils.iterate_timeout( + timeout, + timeout_message, + wait=min(5, timeout), + ): + try: + server = self.get_server(server_id) + except Exception: # noqa: S112 + # if it hasn't appeared yet, that's okay + continue + if not server: + continue + + # We have more work to do, but the details of that are + # hidden from the user. So, calculate remaining timeout + # and pass it down into the IP stack. + remaining_timeout = timeout - int(time.time() - start_time) + if remaining_timeout <= 0: + raise exceptions.ResourceTimeout(timeout_message) + + server = self.get_active_server( + server=server, + reuse=reuse, + auto_ip=auto_ip, + ips=ips, + ip_pool=ip_pool, + wait=True, + timeout=remaining_timeout, + nat_destination=nat_destination, + ) + + if server is not None and server['status'] == 'ACTIVE': + return server + + def get_active_server( + self, + server, + auto_ip=True, + ips=None, + ip_pool=None, + reuse=True, + wait=False, + timeout=180, + nat_destination=None, + ): + if server['status'] == 'ERROR': + if ( + 'fault' in server + and server['fault'] is not None + and 'message' in server['fault'] + ): + raise exceptions.SDKException( + "Error in creating the server. " + "Compute service reports fault: {reason}".format( + reason=server['fault']['message'] + ), + extra_data=dict(server=server), + ) + + raise exceptions.SDKException( + "Error in creating the server " + "(no further information available)", + extra_data=dict(server=server), + ) + + if server['status'] == 'ACTIVE': + if server.get('addresses'): + return self.add_ips_to_server( + server, + auto_ip, + ips, + ip_pool, + reuse=reuse, + nat_destination=nat_destination, + wait=wait, + timeout=timeout, + ) + + self.log.debug( + f'Server {server["id"]} reached ACTIVE state without ' + f'being allocated an IP address. Deleting server.', + ) + try: + self._delete_server(server=server, wait=wait, timeout=timeout) + except Exception as e: + raise exceptions.SDKException( + f'Server reached ACTIVE state without being ' + f'allocated an IP address AND then could not ' + f'be deleted: {e}', + extra_data=dict(server=server), + ) + raise exceptions.SDKException( + 'Server reached ACTIVE state without being ' + 'allocated an IP address.', + extra_data=dict(server=server), + ) + return None + + def rebuild_server( + self, + server_id, + image_id, + admin_pass=None, + detailed=False, + bare=False, + wait=False, + timeout=180, + ): + """Rebuild a server. + + :param server_id: + :param image_id: + :param admin_pass: + :param detailed: + :param bare: + :param wait: + :param timeout: + :returns: A compute ``Server`` object. + """ + kwargs = {} + if image_id: + kwargs['image'] = image_id + if admin_pass: + kwargs['admin_password'] = admin_pass + + server = self.compute.rebuild_server(server_id, **kwargs) + if not wait: + return self._expand_server(server, bare=bare, detailed=detailed) + + admin_pass = server.get('adminPass') or admin_pass + server = self.compute.wait_for_server(server, wait=timeout) + if server['status'] == 'ACTIVE': + server.admin_password = admin_pass + + return self._expand_server(server, detailed=detailed, bare=bare) + + def set_server_metadata(self, name_or_id, metadata): + """Set metadata in a server instance. + + :param str name_or_id: The name or ID of the server instance to update. + :param dict metadata: A dictionary with the key=value pairs + to set in the server instance. It only updates the key=value pairs + provided. Existing ones will remain untouched. + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + server = self.get_server(name_or_id, bare=True) + if not server: + raise exceptions.SDKException(f'Invalid Server {name_or_id}') + + self.compute.set_server_metadata(server=server.id, **metadata) + + def delete_server_metadata(self, name_or_id, metadata_keys): + """Delete metadata from a server instance. + + :param str name_or_id: The name or ID of the server instance + to update. + :param metadata_keys: A list with the keys to be deleted + from the server instance. + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + server = self.get_server(name_or_id, bare=True) + if not server: + raise exceptions.SDKException(f'Invalid Server {name_or_id}') + + self.compute.delete_server_metadata( + server=server.id, keys=metadata_keys + ) + + def delete_server( + self, + name_or_id, + wait=False, + timeout=180, + delete_ips=False, + delete_ip_retry=1, + ): + """Delete a server instance. + + :param name_or_id: name or ID of the server to delete + :param bool wait: If true, waits for server to be deleted. + :param int timeout: Seconds to wait for server deletion. + :param bool delete_ips: If true, deletes any floating IPs + associated with the instance. + :param int delete_ip_retry: Number of times to retry deleting + any floating ips, should the first try be unsuccessful. + + :returns: True if delete succeeded, False otherwise if the + server does not exist. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + # If delete_ips is True, we need the server to not be bare. + server = self.compute.find_server(name_or_id, ignore_missing=True) + if not server: + return False + + # This portion of the code is intentionally left as a separate + # private method in order to avoid an unnecessary API call to get + # a server we already have. + return self._delete_server( + server, + wait=wait, + timeout=timeout, + delete_ips=delete_ips, + delete_ip_retry=delete_ip_retry, + ) + + def _delete_server_floating_ips(self, server, delete_ip_retry): + # Does the server have floating ips in its + # addresses dict? If not, skip this. + server_floats = meta.find_nova_interfaces( + server['addresses'], ext_tag='floating' + ) + for fip in server_floats: + try: + ip = self.get_floating_ip( + id=None, filters={'floating_ip_address': fip['addr']} + ) + except exceptions.NotFoundException: + # We're deleting. If it doesn't exist - awesome + # NOTE(mordred) If the cloud is a nova FIP cloud but + # floating_ip_source is set to neutron, this + # can lead to a FIP leak. + continue + if not ip: + continue + deleted = self.delete_floating_ip(ip['id'], retry=delete_ip_retry) + if not deleted: + raise exceptions.SDKException( + "Tried to delete floating ip {floating_ip} " + "associated with server {id} but there was " + "an error deleting it. Not deleting server.".format( + floating_ip=ip['floating_ip_address'], id=server['id'] + ) + ) + + def _delete_server( + self, + server, + wait=False, + timeout=180, + delete_ips=False, + delete_ip_retry=1, + ): + if not server: + return False + + if delete_ips and self._has_floating_ips() and server['addresses']: + self._delete_server_floating_ips(server, delete_ip_retry) + + try: + self.compute.delete_server(server) + except exceptions.NotFoundException: + return False + except Exception: + raise + + if not wait: + return True + + if not isinstance(server, _server.Server): + # We might come here with Munch object (at the moment). + # If this is the case - convert it into real server to be able to + # use wait_for_delete + server = _server.Server(id=server['id']) + self.compute.wait_for_delete(server, wait=timeout) + + return True + + @_utils.valid_kwargs('name', 'description') + def update_server(self, name_or_id, detailed=False, bare=False, **kwargs): + """Update a server. + + :param name_or_id: Name of the server to be updated. + :param detailed: Whether or not to add detailed additional information. + Defaults to False. + :param bare: Whether to skip adding any additional information to the + server record. Defaults to False, meaning the addresses dict will + be populated as needed from neutron. Setting to True implies + detailed = False. + :param name: New name for the server + :param description: New description for the server + + :returns: The updated compute ``Server`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + server = self.compute.find_server(name_or_id, ignore_missing=False) + server = self.compute.update_server(server, **kwargs) + + return self._expand_server(server, bare=bare, detailed=detailed) + + def create_server_group(self, name, policies=None, policy=None): + """Create a new server group. + + :param name: Name of the server group being created + :param policies: List of policies for the server group. + + :returns: The created compute ``ServerGroup`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + sg_attrs = {'name': name} + if policies: + sg_attrs['policies'] = policies + if policy: + sg_attrs['policy'] = policy + return self.compute.create_server_group(**sg_attrs) + + def delete_server_group(self, name_or_id): + """Delete a server group. + + :param name_or_id: Name or ID of the server group to delete + + :returns: True if delete succeeded, False otherwise + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + server_group = self.get_server_group(name_or_id) + if not server_group: + self.log.debug( + "Server group %s not found for deleting", name_or_id + ) + return False + + self.compute.delete_server_group(server_group, ignore_missing=False) + return True + + def create_flavor( + self, + name, + ram, + vcpus, + disk, + description=None, + flavorid="auto", + ephemeral=0, + swap=0, + rxtx_factor=1.0, + is_public=True, + ): + """Create a new flavor. + + :param name: Descriptive name of the flavor + :param ram: Memory in MB for the flavor + :param vcpus: Number of VCPUs for the flavor + :param disk: Size of local disk in GB + :param description: Description of the flavor + :param flavorid: ID for the flavor (optional) + :param ephemeral: Ephemeral space size in GB + :param swap: Swap space in MB + :param rxtx_factor: RX/TX factor + :param is_public: Make flavor accessible to the public + + :returns: The created compute ``Flavor`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + attrs = { + 'disk': disk, + 'ephemeral': ephemeral, + 'id': flavorid, + 'is_public': is_public, + 'name': name, + 'ram': ram, + 'rxtx_factor': rxtx_factor, + 'swap': swap, + 'vcpus': vcpus, + 'description': description, + } + if flavorid == 'auto': + attrs['id'] = None + + return self.compute.create_flavor(**attrs) + + def delete_flavor(self, name_or_id): + """Delete a flavor + + :param name_or_id: ID or name of the flavor to delete. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + try: + flavor = self.compute.find_flavor(name_or_id, ignore_missing=True) + if not flavor: + self.log.debug("Flavor %s not found for deleting", name_or_id) + return False + self.compute.delete_flavor(flavor) + return True + except exceptions.SDKException: + raise exceptions.SDKException( + f"Unable to delete flavor {name_or_id}" + ) + + def set_flavor_specs(self, flavor_id, extra_specs): + """Add extra specs to a flavor + + :param string flavor_id: ID of the flavor to update. + :param dict extra_specs: Dictionary of key-value pairs. + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + :raises: :class:`~openstack.exceptions.BadRequestException` if flavor + ID is not found. + """ + self.compute.create_flavor_extra_specs(flavor_id, extra_specs) + + def unset_flavor_specs(self, flavor_id, keys): + """Delete extra specs from a flavor + + :param string flavor_id: ID of the flavor to update. + :param keys: List of spec keys to delete. + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + :raises: :class:`~openstack.exceptions.BadRequestException` if flavor + ID is not found. + """ + for key in keys: + self.compute.delete_flavor_extra_specs_property(flavor_id, key) + + def add_flavor_access(self, flavor_id, project_id): + """Grant access to a private flavor for a project/tenant. + + :param string flavor_id: ID of the private flavor. + :param string project_id: ID of the project/tenant. + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + self.compute.flavor_add_tenant_access(flavor_id, project_id) + + def remove_flavor_access(self, flavor_id, project_id): + """Revoke access from a private flavor for a project/tenant. + + :param string flavor_id: ID of the private flavor. + :param string project_id: ID of the project/tenant. + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + self.compute.flavor_remove_tenant_access(flavor_id, project_id) + + def list_flavor_access(self, flavor_id): + """List access from a private flavor for a project/tenant. + + :param string flavor_id: ID of the private flavor. + + :returns: List of dicts with flavor_id and tenant_id attributes. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + return self.compute.get_flavor_access(flavor_id) + + def list_hypervisors(self, filters=None): + """List all hypervisors + + :param filters: Additional query parameters passed to the API server. + + :returns: A list of compute ``Hypervisor`` objects. + """ + if not filters: + filters = {} + + return list(self.compute.hypervisors(details=True, **filters)) + + def search_aggregates(self, name_or_id=None, filters=None): + """Seach host aggregates. + + :param name: Aggregate name or id. + :param dict filters: A dictionary of meta data to use for further + filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + { + 'availability_zone': 'nova', + 'metadata': {'cpu_allocation_ratio': '1.0'}, + } + + :returns: A list of compute ``Aggregate`` objects matching the search + criteria. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + aggregates = self.list_aggregates() + return _utils._filter_list(aggregates, name_or_id, filters) + + def list_aggregates(self, filters=None): + """List all available host aggregates. + + :returns: A list of compute ``Aggregate`` objects. + """ + if not filters: + filters = {} + + return self.compute.aggregates(**filters) + + def get_aggregate(self, name_or_id, filters=None): + """Get an aggregate by name or ID. + + :param name_or_id: Name or ID of the aggregate. + :param dict filters: **DEPRECATED** A dictionary of meta data to use + for further filtering. Elements of this dictionary may, themselves, + be dictionaries. Example:: + + { + 'availability_zone': 'nova', + 'metadata': {'cpu_allocation_ratio': '1.0'}, + } + + :returns: An aggregate dict or None if no matching aggregate is + found. + """ + if filters is not None: + warnings.warn( + "The 'filters' argument is deprecated; use " + "'search_aggregates' instead", + os_warnings.RemovedInSDK60Warning, + ) + + return self.compute.find_aggregate(name_or_id, ignore_missing=True) + + def create_aggregate(self, name, availability_zone=None): + """Create a new host aggregate. + + :param name: Name of the host aggregate being created + :param availability_zone: Availability zone to assign hosts + + :returns: The created compute ``Aggregate`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + return self.compute.create_aggregate( + name=name, availability_zone=availability_zone + ) + + @_utils.valid_kwargs('name', 'availability_zone') + def update_aggregate(self, name_or_id, **kwargs): + """Update a host aggregate. + + :param name_or_id: Name or ID of the aggregate being updated. + :param name: New aggregate name + :param availability_zone: Availability zone to assign to hosts + + :returns: The updated compute ``Aggregate`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + aggregate = self.get_aggregate(name_or_id) + return self.compute.update_aggregate(aggregate, **kwargs) + + def delete_aggregate(self, name_or_id): + """Delete a host aggregate. + + :param name_or_id: Name or ID of the host aggregate to delete. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if isinstance(name_or_id, str | bytes) and not name_or_id.isdigit(): + aggregate = self.get_aggregate(name_or_id) + if not aggregate: + self.log.debug( + "Aggregate %s not found for deleting", name_or_id + ) + return False + name_or_id = aggregate.id + try: + self.compute.delete_aggregate(name_or_id, ignore_missing=False) + return True + except exceptions.NotFoundException: + self.log.debug("Aggregate %s not found for deleting", name_or_id) + return False + + def set_aggregate_metadata(self, name_or_id, metadata): + """Set aggregate metadata, replacing the existing metadata. + + :param name_or_id: Name of the host aggregate to update + :param metadata: Dict containing metadata to replace (Use + {'key': None} to remove a key) + + :returns: a dict representing the new host aggregate. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + aggregate = self.get_aggregate(name_or_id) + if not aggregate: + raise exceptions.SDKException( + f"Host aggregate {name_or_id} not found." + ) + + return self.compute.set_aggregate_metadata(aggregate, metadata) + + def add_host_to_aggregate(self, name_or_id, host_name): + """Add a host to an aggregate. + + :param name_or_id: Name or ID of the host aggregate. + :param host_name: Host to add. + + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + aggregate = self.get_aggregate(name_or_id) + if not aggregate: + raise exceptions.SDKException( + f"Host aggregate {name_or_id} not found." + ) + + return self.compute.add_host_to_aggregate(aggregate, host_name) + + def remove_host_from_aggregate(self, name_or_id, host_name): + """Remove a host from an aggregate. + + :param name_or_id: Name or ID of the host aggregate. + :param host_name: Host to remove. + + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + aggregate = self.get_aggregate(name_or_id) + if not aggregate: + raise exceptions.SDKException( + f"Host aggregate {name_or_id} not found." + ) + + return self.compute.remove_host_from_aggregate(aggregate, host_name) + + def set_compute_quotas(self, name_or_id, **kwargs): + """Set a quota in a project + + :param name_or_id: project name or id + :param kwargs: key/value pairs of quota name and quota value + + :raises: :class:`~openstack.exceptions.SDKException` if the resource to + set the quota does not exist. + """ + identity = utils.ensure_service_version(self.identity, '3') + project = identity.find_project(name_or_id, ignore_missing=False) + kwargs['force'] = True + self.compute.update_quota_set(project=project, **kwargs) + + def get_compute_quotas(self, name_or_id): + """Get quota for a project + + :param name_or_id: project name or id + + :returns: A compute ``QuotaSet`` object if found, else None. + :raises: :class:`~openstack.exceptions.SDKException` if it's not a + valid project + """ + identity = utils.ensure_service_version(self.identity, '3') + proj = identity.find_project(name_or_id, ignore_missing=False) + return self.compute.get_quota_set(proj) + + def delete_compute_quotas(self, name_or_id): + """Delete quota for a project + + :param name_or_id: project name or id + + :raises: :class:`~openstack.exceptions.SDKException` if it's not a + valid project or the nova client call failed + """ + identity = utils.ensure_service_version(self.identity, '3') + proj = identity.find_project(name_or_id, ignore_missing=False) + self.compute.revert_quota_set(proj) + + def get_compute_usage(self, name_or_id, start=None, end=None): + """Get usage for a specific project + + :param name_or_id: project name or id + :param start: :class:`datetime.datetime` or string. Start date in UTC + Defaults to 2010-07-06T12:00:00Z (the date the OpenStack project + was started) + :param end: :class:`datetime.datetime` or string. End date in UTC. + Defaults to now + + :returns: A :class:`~openstack.compute.v2.usage.Usage` object + :raises: :class:`~openstack.exceptions.SDKException` if it's not a + valid project + """ + + def parse_date(date): + try: + return iso8601.parse_date(date) + except iso8601.iso8601.ParseError: + # Yes. This is an exception mask. However,iso8601 is an + # implementation detail - and the error message is actually + # less informative. + raise exceptions.SDKException( + f"Date given, {date}, is invalid. Please pass in a date " + f"string in ISO 8601 format (YYYY-MM-DDTHH:MM:SS)" + ) + + if isinstance(start, str): + start = parse_date(start) + if isinstance(end, str): + end = parse_date(end) + + identity = utils.ensure_service_version(self.identity, '3') + project = identity.find_project(name_or_id, ignore_missing=False) + + return self.compute.get_usage(project, start, end) + + def _encode_server_userdata(self, userdata): + if hasattr(userdata, 'read'): + userdata = userdata.read() + + if not isinstance(userdata, bytes): + # If the userdata passed in is bytes, just send it unmodified + if not isinstance(userdata, str): + raise TypeError(f"{type(userdata)} can't be encoded") + # If it's not bytes, make it bytes + userdata = userdata.encode('utf-8', 'strict') + + # Once we have base64 bytes, make them into a utf-8 string for REST + return base64.b64encode(userdata).decode('utf-8') + + def get_openstack_vars(self, server): + return meta.get_hostvars_from_server(self, server) + + def _expand_server_vars(self, server): + # Used by nodepool + # TODO(mordred) remove after these make it into what we + # actually want the API to be. + return meta.expand_server_vars(self, server) + + def _remove_novaclient_artifacts(self, item): + # Remove novaclient artifacts + item.pop('links', None) + item.pop('NAME_ATTR', None) + item.pop('HUMAN_ID', None) + item.pop('human_id', None) + item.pop('request_ids', None) + item.pop('x_openstack_request_ids', None) + + def _normalize_server(self, server): + ret = utils.Munch() + # Copy incoming server because of shared dicts in unittests + # Wrap the copy in munch so that sub-dicts are properly munched + server = utils.Munch(server) + + self._remove_novaclient_artifacts(server) + + ret['id'] = server.pop('id') + ret['name'] = server.pop('name') + + server['flavor'].pop('links', None) + ret['flavor'] = server.pop('flavor') + # From original_names from sdk + server.pop('flavorRef', None) + + # OpenStack can return image as a string when you've booted + # from volume + image = server.pop('image', None) + if str(image) != image: + image = utils.Munch(id=image['id']) + + ret['image'] = image + # From original_names from sdk + server.pop('imageRef', None) + # From original_names from sdk + ret['block_device_mapping'] = server.pop('block_device_mapping_v2', {}) + + project_id = server.pop('tenant_id', '') + project_id = server.pop('project_id', project_id) + + az = _pop_or_get( + server, 'OS-EXT-AZ:availability_zone', None, self.strict_mode + ) + # the server resource has this already, but it's missing az info + # from the resource. + # TODO(mordred) create_server is still normalizing servers that aren't + # from the resource layer. + ret['location'] = server.pop( + 'location', + self._get_current_location(project_id=project_id, zone=az), + ) + + # Ensure volumes is always in the server dict, even if empty + ret['volumes'] = _pop_or_get( + server, + 'os-extended-volumes:volumes_attached', + [], + self.strict_mode, + ) + + config_drive = server.pop( + 'has_config_drive', server.pop('config_drive', False) + ) + ret['has_config_drive'] = _to_bool(config_drive) + + host_id = server.pop('hostId', server.pop('host_id', None)) + ret['host_id'] = host_id + + ret['progress'] = _pop_int(server, 'progress') + + # Leave these in so that the general properties handling works + ret['disk_config'] = _pop_or_get( + server, 'OS-DCF:diskConfig', None, self.strict_mode + ) + for key in ( + 'OS-EXT-STS:power_state', + 'OS-EXT-STS:task_state', + 'OS-EXT-STS:vm_state', + 'OS-SRV-USG:launched_at', + 'OS-SRV-USG:terminated_at', + 'OS-EXT-SRV-ATTR:hypervisor_hostname', + 'OS-EXT-SRV-ATTR:instance_name', + 'OS-EXT-SRV-ATTR:user_data', + 'OS-EXT-SRV-ATTR:host', + 'OS-EXT-SRV-ATTR:hostname', + 'OS-EXT-SRV-ATTR:kernel_id', + 'OS-EXT-SRV-ATTR:launch_index', + 'OS-EXT-SRV-ATTR:ramdisk_id', + 'OS-EXT-SRV-ATTR:reservation_id', + 'OS-EXT-SRV-ATTR:root_device_name', + 'OS-SCH-HNT:scheduler_hints', + ): + short_key = key.split(':')[1] + ret[short_key] = _pop_or_get(server, key, None, self.strict_mode) + + # Protect against security_groups being None + ret['security_groups'] = server.pop('security_groups', None) or [] + + # NOTE(mnaser): The Nova API returns the creation date in `created` + # however the Shade contract returns `created_at` for + # all resources. + ret['created_at'] = server.get('created') + + for field in _SERVER_FIELDS: + ret[field] = server.pop(field, None) + if not ret['networks']: + ret['networks'] = {} + + ret['interface_ip'] = '' + + ret['properties'] = server.copy() + + # Backwards compat + if not self.strict_mode: + ret['hostId'] = host_id + ret['config_drive'] = config_drive + ret['project_id'] = project_id + ret['tenant_id'] = project_id + # TODO(efried): This is hardcoded to 'compute' because this method + # should only ever be used by the compute proxy. (That said, it + # doesn't appear to be used at all, so can we get rid of it?) + ret['region'] = self.config.get_region_name('compute') + ret['cloud'] = self.config.name + ret['az'] = az + for key, val in ret['properties'].items(): + ret.setdefault(key, val) + return ret diff --git a/openstack/cloud/_dns.py b/openstack/cloud/_dns.py new file mode 100644 index 0000000000..41b915fbdf --- /dev/null +++ b/openstack/cloud/_dns.py @@ -0,0 +1,267 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.cloud import _utils +from openstack.cloud import openstackcloud +from openstack import exceptions +from openstack import resource + + +class DnsCloudMixin(openstackcloud._OpenStackCloudMixin): + def list_zones(self, filters=None): + """List all available zones. + + :returns: A list of zones dicts. + + """ + if not filters: + filters = {} + return list(self.dns.zones(allow_unknown_params=True, **filters)) + + def get_zone(self, name_or_id, filters=None): + """Get a zone by name or ID. + + :param name_or_id: Name or ID of the zone + :param filters: + A dictionary of meta data to use for further filtering + + :returns: A zone dict or None if no matching zone is found. + + """ + if not filters: + filters = {} + return self.dns.find_zone( + name_or_id=name_or_id, ignore_missing=True, **filters + ) + + def search_zones(self, name_or_id=None, filters=None): + zones = self.list_zones(filters) + return _utils._filter_list(zones, name_or_id, filters) + + def create_zone( + self, + name, + zone_type=None, + email=None, + description=None, + ttl=None, + masters=None, + ): + """Create a new zone. + + :param name: Name of the zone being created. + :param zone_type: Type of the zone (primary/secondary) + :param email: Email of the zone owner (only + applies if zone_type is primary) + :param description: Description of the zone + :param ttl: TTL (Time to live) value in seconds + :param masters: Master nameservers (only applies + if zone_type is secondary) + + :returns: a dict representing the created zone. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + + # We capitalize in case the user passes time in lowercase, as + # designate call expects PRIMARY/SECONDARY + if zone_type is not None: + zone_type = zone_type.upper() + if zone_type not in ('PRIMARY', 'SECONDARY'): + raise exceptions.SDKException( + f"Invalid type {zone_type}, valid choices are PRIMARY or " + f"SECONDARY" + ) + + zone = { + "name": name, + "email": email, + "description": description, + } + if ttl is not None: + zone["ttl"] = ttl + + if zone_type is not None: + zone["type"] = zone_type + + if masters is not None: + zone["masters"] = masters + + try: + return self.dns.create_zone(**zone) + except exceptions.SDKException: + raise exceptions.SDKException(f"Unable to create zone {name}") + + @_utils.valid_kwargs('email', 'description', 'ttl', 'masters') + def update_zone(self, name_or_id, **kwargs): + """Update a zone. + + :param name_or_id: Name or ID of the zone being updated. + :param email: Email of the zone owner (only + applies if zone_type is primary) + :param description: Description of the zone + :param ttl: TTL (Time to live) value in seconds + :param masters: Master nameservers (only applies + if zone_type is secondary) + + :returns: a dict representing the updated zone. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + zone = self.get_zone(name_or_id) + if not zone: + raise exceptions.SDKException(f"Zone {name_or_id} not found.") + + return self.dns.update_zone(zone['id'], **kwargs) + + def delete_zone(self, name_or_id): + """Delete a zone. + + :param name_or_id: Name or ID of the zone being deleted. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + + zone = self.dns.find_zone(name_or_id, ignore_missing=True) + if not zone: + self.log.debug("Zone %s not found for deleting", name_or_id) + return False + + self.dns.delete_zone(zone) + + return True + + def list_recordsets(self, zone): + """List all available recordsets. + + :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance + of the zone managing the recordset. + + :returns: A list of recordsets. + + """ + if isinstance(zone, resource.Resource): + zone_obj = zone + else: + zone_obj = self.get_zone(zone) + if zone_obj is None: + raise exceptions.SDKException(f"Zone {zone} not found.") + return list(self.dns.recordsets(zone_obj)) + + def get_recordset(self, zone, name_or_id): + """Get a recordset by name or ID. + + :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance + of the zone managing the recordset. + :param name_or_id: Name or ID of the recordset + + :returns: A recordset dict or None if no matching recordset is + found. + + """ + if isinstance(zone, resource.Resource): + zone_obj = zone + else: + zone_obj = self.get_zone(zone) + if not zone_obj: + raise exceptions.SDKException(f"Zone {name_or_id} not found.") + return self.dns.find_recordset( + zone=zone_obj, name_or_id=name_or_id, ignore_missing=True + ) + + def search_recordsets(self, zone, name_or_id=None, filters=None): + recordsets = self.list_recordsets(zone=zone) + return _utils._filter_list(recordsets, name_or_id, filters) + + def create_recordset( + self, zone, name, recordset_type, records, description=None, ttl=None + ): + """Create a recordset. + + :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance + of the zone managing the recordset. + :param name: Name of the recordset + :param recordset_type: Type of the recordset + :param records: List of the recordset definitions + :param description: Description of the recordset + :param ttl: TTL value of the recordset + + :returns: a dict representing the created recordset. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if isinstance(zone, resource.Resource): + zone_obj = zone + else: + zone_obj = self.get_zone(zone) + if not zone_obj: + raise exceptions.SDKException(f"Zone {zone} not found.") + + # We capitalize the type in case the user sends in lowercase + recordset_type = recordset_type.upper() + + body = {'name': name, 'type': recordset_type, 'records': records} + + if description: + body['description'] = description + + if ttl: + body['ttl'] = ttl + + return self.dns.create_recordset(zone=zone_obj, **body) + + @_utils.valid_kwargs('description', 'ttl', 'records') + def update_recordset(self, zone, name_or_id, **kwargs): + """Update a recordset. + + :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance + of the zone managing the recordset. + :param name_or_id: Name or ID of the recordset being updated. + :param records: List of the recordset definitions + :param description: Description of the recordset + :param ttl: TTL (Time to live) value in seconds of the recordset + + :returns: a dict representing the updated recordset. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + + rs = self.get_recordset(zone, name_or_id) + if not rs: + raise exceptions.SDKException(f"Recordset {name_or_id} not found.") + + rs = self.dns.update_recordset(recordset=rs, **kwargs) + + return rs + + def delete_recordset(self, zone, name_or_id): + """Delete a recordset. + + :param zone: Name, ID or :class:`openstack.dns.v2.zone.Zone` instance + of the zone managing the recordset. + :param name_or_id: Name or ID of the recordset being deleted. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + + recordset = self.get_recordset(zone, name_or_id) + if not recordset: + self.log.debug("Recordset %s not found for deleting", name_or_id) + return False + + self.dns.delete_recordset(recordset, ignore_missing=False) + + return True diff --git a/openstack/cloud/_identity.py b/openstack/cloud/_identity.py new file mode 100644 index 0000000000..c8036e6607 --- /dev/null +++ b/openstack/cloud/_identity.py @@ -0,0 +1,1676 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import warnings + +from openstack.cloud import _utils +from openstack.cloud import openstackcloud +from openstack import exceptions +from openstack import utils +from openstack import warnings as os_warnings + + +class IdentityCloudMixin(openstackcloud._OpenStackCloudMixin): + def _get_project_id_param_dict(self, name_or_id): + if name_or_id: + project = self.get_project(name_or_id) + if not project: + return {} + if utils.supports_version(self.identity, '3'): + return {'default_project_id': project['id']} + else: + return {'tenant_id': project['id']} + else: + return {} + + def _get_domain_id_param_dict(self, domain_id): + """Get a useable domain.""" + + # Keystone v3 requires domains for user and project creation. v2 does + # not. However, keystone v2 does not allow user creation by non-admin + # users, so we can throw an error to the user that does not need to + # mention api versions + if utils.supports_version(self.identity, '3'): + if not domain_id: + raise exceptions.SDKException( + "User or project creation requires an explicit domain_id " + "argument." + ) + else: + return {'domain_id': domain_id} + else: + return {} + + def _get_identity_params(self, domain_id=None, project=None): + """Get the domain and project/tenant parameters if needed. + + keystone v2 and v3 are divergent enough that we need to pass or not + pass project or tenant_id or domain or nothing in a sane manner. + """ + ret = {} + ret.update(self._get_domain_id_param_dict(domain_id)) + ret.update(self._get_project_id_param_dict(project)) + return ret + + def list_projects(self, domain_id=None, name_or_id=None, filters=None): + """List projects. + + With no parameters, returns a full listing of all visible projects. + + :param domain_id: Domain ID to scope the searched projects. + :param name_or_id: Name or ID of the project(s). + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A list of identity ``Project`` objects. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + if not filters: + filters = {} + query = dict(**filters) + if name_or_id: + query['name'] = name_or_id + if domain_id: + query['domain_id'] = domain_id + + return list(identity.projects(**query)) + + def search_projects(self, name_or_id=None, filters=None, domain_id=None): + """Backwards compatibility method for search_projects + + search_projects originally had a parameter list that was name_or_id, + filters and list had domain_id first. This method exists in this form + to allow code written with positional parameter to still work. But + really, use keyword arguments. + + :param name_or_id: Name or ID of the project(s). + :param filters: dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + :param domain_id: Domain ID to scope the searched projects. + :returns: A list of identity ``Project`` objects. + """ + projects = self.list_projects(domain_id=domain_id, filters=filters) + return _utils._filter_list(projects, name_or_id, filters) + + def get_project(self, name_or_id, filters=None, domain_id=None): + """Get exactly one project. + + :param name_or_id: Name or unique ID of the project. + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + :param domain_id: Domain ID to scope the retrieved project. + + :returns: An identity ``Project`` object. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + if filters is not None: + warnings.warn( + "the 'filters' argument is deprecated; use " + "'search_projects' instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_projects( + name_or_id, filters, domain_id=domain_id + ) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + return entities[0] + + return identity.find_project( + name_or_id=name_or_id, domain_id=domain_id + ) + + def update_project( + self, + name_or_id, + enabled=None, + domain_id=None, + **kwargs, + ): + """Update a project + + :param name_or_id: Name or unique ID of the project. + :param enabled: Whether the project is enabled or not. + :param domain_id: Domain ID to scope the retrieved project. + :returns: An identity ``Project`` object. + """ + identity = utils.ensure_service_version(self.identity, '3') + + project = identity.find_project( + name_or_id=name_or_id, + domain_id=domain_id, + ignore_missing=False, + ) + if not project: + raise exceptions.SDKException(f"Project {name_or_id} not found.") + if enabled is not None: + kwargs.update({'enabled': enabled}) + project = identity.update_project(project, **kwargs) + return project + + def create_project( + self, + name, + domain_id, + description=None, + enabled=True, + **kwargs, + ): + """Create a project. + + :param name: + :param domain_id: + :param description: + :param enabled: + :returns: An identity ``Project`` object. + """ + identity = utils.ensure_service_version(self.identity, '3') + + attrs = dict( + name=name, + description=description, + domain_id=domain_id, + is_enabled=enabled, + ) + if kwargs: + attrs.update(kwargs) + return identity.create_project(**attrs) + + def delete_project(self, name_or_id, domain_id=None): + """Delete a project. + + :param name_or_id: Name or unique ID of the project. + :param domain_id: Domain ID to scope the retrieved project. + + :returns: True if delete succeeded, False if the project was not found. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call + """ + identity = utils.ensure_service_version(self.identity, '3') + + try: + project = identity.find_project( + name_or_id=name_or_id, domain_id=domain_id, ignore_missing=True + ) + if not project: + self.log.debug("Project %s not found for deleting", name_or_id) + return False + identity.delete_project(project) + return True + except exceptions.SDKException: + self.log.exception(f"Error in deleting project {name_or_id}") + return False + + @_utils.valid_kwargs('domain_id', 'name') + def list_users(self, **kwargs): + """List users. + + :param name: + :param domain_id: Domain ID to scope the retrieved users. + + :returns: A list of identity ``User`` objects. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + return list(identity.users(**kwargs)) + + def search_users(self, name_or_id=None, filters=None, domain_id=None): + """Search users. + + :param name_or_id: Name or ID of the user(s). + :param domain_id: Domain ID to scope the retrieved users. + :param filters: dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A list of identity ``User`` objects + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + kwargs = {} + # NOTE(jdwidari) if name_or_id isn't UUID like then make use of server- + # side filter for user name https://bit.ly/2qh0Ijk + # especially important when using LDAP and using page to limit results + if name_or_id and not _utils._is_uuid_like(name_or_id): + kwargs['name'] = name_or_id + if domain_id: + kwargs['domain_id'] = domain_id + users = self.list_users(**kwargs) + return _utils._filter_list(users, name_or_id, filters) + + # TODO(stephenfin): Remove 'filters' in a future major version + def get_user(self, name_or_id, filters=None, domain_id=None): + """Get exactly one user. + + :param name_or_id: Name or unique ID of the user. + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + :param domain_id: Domain ID to scope the retrieved user. + + :returns: an identity ``User`` object + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + if filters is not None: + warnings.warn( + "the 'filters' argument is deprecated; use " + "'search_user' instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_users( + name_or_id, filters, domain_id=domain_id + ) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + return identity.find_user(name_or_id, domain_id=domain_id) + + # TODO(stephenfin): Remove normalize since it doesn't do anything + def get_user_by_id(self, user_id, normalize=None): + """Get a user by ID. + + :param string user_id: user ID + :returns: an identity ``User`` object + """ + identity = utils.ensure_service_version(self.identity, '3') + + if normalize is not None: + warnings.warn( + "The 'normalize' field is unnecessary and will be removed in " + "a future release.", + os_warnings.RemovedInSDK60Warning, + ) + + return identity.get_user(user_id) + + @_utils.valid_kwargs( + 'name', + 'email', + 'enabled', + 'domain_id', + 'password', + 'description', + 'default_project', + ) + def update_user(self, name_or_id, **kwargs): + identity = utils.ensure_service_version(self.identity, '3') + + user_kwargs = {} + if kwargs.get('domain_id'): + user_kwargs['domain_id'] = kwargs['domain_id'] + user = self.get_user(name_or_id, **user_kwargs) + + # TODO(mordred) When this changes to REST, force interface=admin + # in the adapter call if it's an admin force call (and figure out how + # to make that disctinction) + # NOTE(samueldmq): now this is a REST call and domain_id is dropped + # if None. keystoneclient drops keys with None values. + if 'domain_id' in kwargs and kwargs['domain_id'] is None: + del kwargs['domain_id'] + user = identity.update_user(user, **kwargs) + + return user + + def create_user( + self, + name, + password=None, + email=None, + default_project=None, + enabled=True, + domain_id=None, + description=None, + ): + """Create a user.""" + identity = utils.ensure_service_version(self.identity, '3') + + params = self._get_identity_params(domain_id, default_project) + params.update({'name': name, 'email': email, 'enabled': enabled}) + if password is not None: + params['password'] = password + if description is not None: + params['description'] = description + + user = identity.create_user(**params) + + return user + + @_utils.valid_kwargs('domain_id') + def delete_user(self, name_or_id, **kwargs): + identity = utils.ensure_service_version(self.identity, '3') + + try: + user = self.get_user(name_or_id, **kwargs) + if not user: + self.log.debug(f"User {name_or_id} not found for deleting") + return False + + identity.delete_user(user) + return True + + except exceptions.SDKException: + self.log.exception(f"Error in deleting user {name_or_id}") + return False + + def _get_user_and_group(self, user_name_or_id, group_name_or_id): + user = self.get_user(user_name_or_id) + if not user: + raise exceptions.SDKException(f'User {user_name_or_id} not found') + + group = self.get_group(group_name_or_id) + if not group: + raise exceptions.SDKException( + f'Group {group_name_or_id} not found' + ) + + return (user, group) + + def add_user_to_group(self, name_or_id, group_name_or_id): + """Add a user to a group. + + :param name_or_id: Name or unique ID of the user. + :param group_name_or_id: Group name or ID + + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call + """ + identity = utils.ensure_service_version(self.identity, '3') + + user, group = self._get_user_and_group(name_or_id, group_name_or_id) + + identity.add_user_to_group(user, group) + + def is_user_in_group(self, name_or_id, group_name_or_id): + """Check to see if a user is in a group. + + :param name_or_id: Name or unique ID of the user. + :param group_name_or_id: Group name or ID + + :returns: True if user is in the group, False otherwise + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call + """ + identity = utils.ensure_service_version(self.identity, '3') + + user, group = self._get_user_and_group(name_or_id, group_name_or_id) + + return identity.check_user_in_group(user, group) + + def remove_user_from_group(self, name_or_id, group_name_or_id): + """Remove a user from a group. + + :param name_or_id: Name or unique ID of the user. + :param group_name_or_id: Group name or ID + + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call + """ + identity = utils.ensure_service_version(self.identity, '3') + + user, group = self._get_user_and_group(name_or_id, group_name_or_id) + + identity.remove_user_from_group(user, group) + + @_utils.valid_kwargs('type', 'service_type', 'description') + def create_service(self, name, enabled=True, **kwargs): + """Create a service. + + :param name: Service name. + :param type: Service type. (type or service_type required.) + :param service_type: Service type. (type or service_type required.) + :param description: Service description (optional). + :param enabled: Whether the service is enabled (v3 only) + + :returns: an identity ``Service`` object + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + type_ = kwargs.pop('type', None) + service_type = kwargs.pop('service_type', None) + + # TODO(mordred) When this changes to REST, force interface=admin + # in the adapter call + kwargs['type'] = type_ or service_type + kwargs['is_enabled'] = enabled + kwargs['name'] = name + + return identity.create_service(**kwargs) + + @_utils.valid_kwargs( + 'name', 'enabled', 'type', 'service_type', 'description' + ) + def update_service(self, name_or_id, **kwargs): + identity = utils.ensure_service_version(self.identity, '3') + + # NOTE(SamYaple): Keystone v3 only accepts 'type' but shade accepts + # both 'type' and 'service_type' with a preference + # towards 'type' + type_ = kwargs.pop('type', None) + service_type = kwargs.pop('service_type', None) + if type_ or service_type: + kwargs['type'] = type_ or service_type + + service = self.get_service(name_or_id) + return identity.update_service(service, **kwargs) + + def list_services(self): + """List all Keystone services. + + :returns: A list of identity ``Service`` object + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + return list(identity.services()) + + def search_services(self, name_or_id=None, filters=None): + """Search Keystone services. + + :param name_or_id: Name or ID of the service(s). + :param filters: dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: a list of identity ``Service`` objects + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + services = self.list_services() + return _utils._filter_list(services, name_or_id, filters) + + # TODO(stephenfin): Remove 'filters' in a future major version + def get_service(self, name_or_id, filters=None): + """Get exactly one Keystone service. + + :param name_or_id: Name or unique ID of the service. + + :returns: an identity ``Service`` object + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call or if multiple matches are + found. + """ + identity = utils.ensure_service_version(self.identity, '3') + + if filters is not None: + warnings.warn( + "the 'filters' argument is deprecated; use " + "'search_services' instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_services(name_or_id, filters) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + return entities[0] + + return identity.find_service(name_or_id=name_or_id) + + def delete_service(self, name_or_id): + """Delete a Keystone service. + + :param name_or_id: Name or unique ID of the service. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call + """ + identity = utils.ensure_service_version(self.identity, '3') + + service = self.get_service(name_or_id=name_or_id) + if service is None: + self.log.debug("Service %s not found for deleting", name_or_id) + return False + + try: + identity.delete_service(service) + return True + except exceptions.SDKException: + self.log.exception( + 'Failed to delete service {id}'.format(id=service['id']) + ) + return False + + @_utils.valid_kwargs('public_url', 'internal_url', 'admin_url') + def create_endpoint( + self, + service_name_or_id, + url=None, + interface=None, + region=None, + enabled=True, + **kwargs, + ): + """Create a Keystone endpoint. + + :param service_name_or_id: Service name or id for this endpoint. + :param url: URL of the endpoint + :param interface: Interface type of the endpoint + :param public_url: Endpoint public URL. + :param internal_url: Endpoint internal URL. + :param admin_url: Endpoint admin URL. + :param region: Endpoint region. + :param enabled: Whether the endpoint is enabled + + :returns: A list of identity ``Endpoint`` objects + :raises: :class:`~openstack.exceptions.SDKException` if the service + cannot be found or if something goes wrong during the OpenStack API + call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + public_url = kwargs.pop('public_url', None) + internal_url = kwargs.pop('internal_url', None) + admin_url = kwargs.pop('admin_url', None) + + if (url or interface) and (public_url or internal_url or admin_url): + raise exceptions.SDKException( + "create_endpoint takes either url and interface OR " + "public_url, internal_url, admin_url" + ) + + service = self.get_service(name_or_id=service_name_or_id) + if service is None: + raise exceptions.SDKException( + f"service {service_name_or_id} not found" + ) + + endpoints_args = [] + if url: + # v3 in use, v3-like arguments, one endpoint created + endpoints_args.append( + { + 'url': url, + 'interface': interface, + 'service_id': service['id'], + 'enabled': enabled, + 'region_id': region, + } + ) + else: + # v3 in use, v2.0-like arguments, one endpoint created for each + # interface url provided + endpoint_args = { + 'region_id': region, + 'enabled': enabled, + 'service_id': service['id'], + } + if public_url: + endpoint_args.update( + {'url': public_url, 'interface': 'public'} + ) + endpoints_args.append(endpoint_args.copy()) + if internal_url: + endpoint_args.update( + {'url': internal_url, 'interface': 'internal'} + ) + endpoints_args.append(endpoint_args.copy()) + if admin_url: + endpoint_args.update({'url': admin_url, 'interface': 'admin'}) + endpoints_args.append(endpoint_args.copy()) + + endpoints = [] + for args in endpoints_args: + endpoints.append(identity.create_endpoint(**args)) + return endpoints + + @_utils.valid_kwargs( + 'enabled', 'service_name_or_id', 'url', 'interface', 'region' + ) + def update_endpoint(self, endpoint_id, **kwargs): + identity = utils.ensure_service_version(self.identity, '3') + + service_name_or_id = kwargs.pop('service_name_or_id', None) + if service_name_or_id is not None: + kwargs['service_id'] = service_name_or_id + if 'region' in kwargs: + kwargs['region_id'] = kwargs.pop('region') + + return identity.update_endpoint(endpoint_id, **kwargs) + + def list_endpoints(self): + """List Keystone endpoints. + + :returns: A list of identity ``Endpoint`` objects + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + return list(identity.endpoints()) + + def search_endpoints(self, id=None, filters=None): + """List Keystone endpoints. + + :param id: ID of endpoint(s). + :param filters: dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A list of identity ``Endpoint`` objects + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + # NOTE(SamYaple): With keystone v3 we can filter directly via the + # the keystone api, but since the return of all the endpoints even in + # large environments is small, we can continue to filter in shade just + # like the v2 api. + endpoints = self.list_endpoints() + return _utils._filter_list(endpoints, id, filters) + + # TODO(stephenfin): Remove 'filters' since it's a noop + def get_endpoint(self, id, filters=None): + """Get exactly one Keystone endpoint. + + :param id: ID of endpoint. + :returns: An identity ``Endpoint`` object + """ + identity = utils.ensure_service_version(self.identity, '3') + + if filters is not None: + warnings.warn( + "the 'filters' argument is deprecated; use " + "'search_endpoints' instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_endpoints(id, filters) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {id}", + ) + + return entities[0] + + return identity.find_endpoint(name_or_id=id) + + def delete_endpoint(self, id): + """Delete a Keystone endpoint. + + :param id: ID of the endpoint to delete. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + endpoint = self.get_endpoint(id=id) + if endpoint is None: + self.log.debug("Endpoint %s not found for deleting", id) + return False + + try: + identity.delete_endpoint(id) + return True + except exceptions.SDKException: + self.log.exception(f"Failed to delete endpoint {id}") + return False + + def create_domain(self, name, description=None, enabled=True): + """Create a domain. + + :param name: The name of the domain. + :param description: A description of the domain. + :param enabled: Is the domain enabled or not (default True). + :returns: The created identity ``Endpoint`` object. + :raises: :class:`~openstack.exceptions.SDKException` if the domain + cannot be created. + """ + identity = utils.ensure_service_version(self.identity, '3') + + domain_ref = {'name': name, 'enabled': enabled} + if description is not None: + domain_ref['description'] = description + return identity.create_domain(**domain_ref) + + # TODO(stephenfin): domain_id and name_or_id are the same thing now; + # deprecate one of them + def update_domain( + self, + domain_id=None, + name=None, + description=None, + enabled=None, + name_or_id=None, + ): + """Update a Keystone domain + + :param domain_id: + :param name: + :param description: + :param enabled: + :param name_or_id: Name or unique ID of the domain. + :returns: The updated identity ``Domain`` object. + :raises: :class:`~openstack.exceptions.SDKException` if the domain + cannot be updated + """ + identity = utils.ensure_service_version(self.identity, '3') + + if domain_id is None: + if name_or_id is None: + raise exceptions.SDKException( + "You must pass either domain_id or name_or_id value" + ) + dom = self.get_domain(None, name_or_id) + if dom is None: + raise exceptions.SDKException( + f"Domain {name_or_id} not found for updating" + ) + domain_id = dom['id'] + + domain_ref = {} + domain_ref.update({'name': name} if name else {}) + domain_ref.update({'description': description} if description else {}) + domain_ref.update({'enabled': enabled} if enabled is not None else {}) + return identity.update_domain(domain_id, **domain_ref) + + # TODO(stephenfin): domain_id and name_or_id are the same thing now; + # deprecate one of them + def delete_domain(self, domain_id=None, name_or_id=None): + """Delete a Keystone domain. + + :param domain_id: ID of the domain to delete. + :param name_or_id: Name or unique ID of the domain. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + try: + if domain_id is None: + if name_or_id is None: + raise exceptions.SDKException( + "You must pass either domain_id or name_or_id value" + ) + dom = self.get_domain(name_or_id=name_or_id) + if dom is None: + self.log.debug( + "Domain %s not found for deleting", name_or_id + ) + return False + domain_id = dom['id'] + + # A domain must be disabled before deleting + identity.update_domain(domain_id, is_enabled=False) + identity.delete_domain(domain_id, ignore_missing=False) + + return True + except exceptions.SDKException: + self.log.exception(f"Failed to delete domain {domain_id}") + raise + + def list_domains(self, **filters): + """List Keystone domains. + + :returns: A list of identity ``Domain`` objects. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + return list(identity.domains(**filters)) + + # TODO(stephenfin): These arguments are backwards from everything else. + def search_domains(self, filters=None, name_or_id=None): + """Search Keystone domains. + + :param name_or_id: Name or ID of the domain(s). + :param filters: dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: a list of identity ``Domain`` objects + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + if filters is None: + filters = {} + if name_or_id is not None: + domains = self.list_domains() + return _utils._filter_list(domains, name_or_id, filters) + else: + return self.list_domains(**filters) + + # TODO(stephenfin): domain_id and name_or_id are the same thing now; + # deprecate one of them + # TODO(stephenfin): Remove 'filters' in a future major version + def get_domain(self, domain_id=None, name_or_id=None, filters=None): + """Get exactly one Keystone domain. + + :param domain_id: ID of the domain. + :param name_or_id: Name or unique ID of the domain. + :param filters: **DEPRECATED** A dictionary of meta data to use for + further filtering. Elements of this dictionary may, themselves, be + dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: an identity ``Domain`` object + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + if filters is not None: + warnings.warn( + "The 'filters' argument is deprecated for removal. It is a " + "no-op and can be safely removed.", + os_warnings.RemovedInSDK60Warning, + ) + + if domain_id is None: + return identity.find_domain(name_or_id, ignore_missing=True) + else: + return identity.get_domain(domain_id) + + @_utils.valid_kwargs('domain_id') + def list_groups(self, **kwargs): + """List Keystone groups. + + :param domain_id: Domain ID. + + :returns: A list of identity ``Group`` objects + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + return list(identity.groups(**kwargs)) + + def search_groups(self, name_or_id=None, filters=None, domain_id=None): + """Search Keystone groups. + + :param name_or_id: Name or ID of the group(s). + :param filters: dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + :param domain_id: Domain ID to scope the searched groups. + + :returns: A list of identity ``Group`` objects + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + groups = self.list_groups(domain_id=domain_id) + return _utils._filter_list(groups, name_or_id, filters) + + # TODO(stephenfin): Remove 'filters' in a future major version + def get_group(self, name_or_id, filters=None, domain_id=None): + """Get exactly one Keystone group. + + :param name_or_id: Name or unique ID of the group(s). + :param domain_id: Domain ID to scope the retrieved group. + + :returns: An identity ``Group`` object + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + if filters is not None: + warnings.warn( + "the 'filters' argument is deprecated; use " + "'search_projects' instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_groups( + name_or_id, filters, domain_id=domain_id + ) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + return entities[0] + + return identity.find_group(name_or_id=name_or_id, domain_id=domain_id) + + def create_group(self, name, description, domain=None): + """Create a group. + + :param string name: Group name. + :param string description: Group description. + :param string domain: Domain name or ID for the group. + + :returns: An identity ``Group`` object + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + group_ref = {'name': name} + if description: + group_ref['description'] = description + if domain: + dom = self.get_domain(domain) + if not dom: + raise exceptions.SDKException( + f"Creating group {name} failed: Invalid domain {domain}" + ) + group_ref['domain_id'] = dom['id'] + + group = identity.create_group(**group_ref) + + return group + + def update_group( + self, + name_or_id, + name=None, + description=None, + **kwargs, + ): + """Update an existing group + + :param name_or_id: Name or unique ID of the group. + :param name: New group name. + :param description: New group description. + + :returns: The updated identity ``Group`` object. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + group = identity.find_group(name_or_id, ignore_missing=False, **kwargs) + + group_ref = {} + if name: + group_ref['name'] = name + if description: + group_ref['description'] = description + + group = identity.update_group(group, **group_ref) + + return group + + def delete_group(self, name_or_id): + """Delete a group + + :param name_or_id: Name or unique ID of the group. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + try: + group = identity.find_group(name_or_id, ignore_missing=True) + if group is None: + self.log.debug("Group %s not found for deleting", name_or_id) + return False + + identity.delete_group(group) + + return True + + except exceptions.SDKException: + self.log.exception(f"Unable to delete group {name_or_id}") + return False + + def list_roles(self, **kwargs): + """List Keystone roles. + + :returns: A list of identity ``Role`` objects + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + return list(identity.roles(**kwargs)) + + def search_roles(self, name_or_id=None, filters=None, domain_id=None): + """Seach Keystone roles. + + :param name: Name or ID of the role(s). + :param filters: dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Example:: + + "[?last_name==`Smith`] | [?other.gender]==`Female`]" + :param domain_id: Domain ID to scope the searched roles. + + :returns: a list of identity ``Role`` objects + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + roles = self.list_roles(domain_id=domain_id) + return _utils._filter_list(roles, name_or_id, filters) + + # TODO(stephenfin): Remove 'filters' in a future major version + def get_role(self, name_or_id, filters=None, domain_id=None): + """Get a Keystone role. + + :param name_or_id: Name or unique ID of the role. + :param domain_id: Domain ID to scope the retrieved role. + + :returns: An identity ``Role`` object if found, else None. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + if filters is not None: + warnings.warn( + "the 'filters' argument is deprecated; use " + "'search_roles' instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_roles( + name_or_id, filters, domain_id=domain_id + ) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + return entities[0] + + return identity.find_role(name_or_id=name_or_id, domain_id=domain_id) + + def _keystone_v3_role_assignments(self, **filters): + identity = utils.ensure_service_version(self.identity, '3') + + # NOTE(samueldmq): different parameters have different representation + # patterns as query parameters in the call to the list role assignments + # API. The code below handles each set of patterns separately and + # renames the parameters names accordingly, ignoring 'effective', + # 'include_names' and 'include_subtree' whose do not need any renaming. + for k in ('group', 'role', 'user'): + if k in filters: + try: + filters[k + '.id'] = filters[k].id + except AttributeError: + # Also this goes away in next patches + filters[k + '.id'] = filters[k] + del filters[k] + for k in ('project', 'domain'): + if k in filters: + try: + filters['scope.' + k + '.id'] = filters[k].id + except AttributeError: + # NOTE(gtema): will be dropped once domains are switched to + # proxy + filters['scope.' + k + '.id'] = filters[k] + del filters[k] + if 'inherited_to' in filters: + filters['scope.OS-INHERIT:inherited_to'] = filters['inherited_to'] + del filters['inherited_to'] + elif 'os_inherit_extension_inherited_to' in filters: + warnings.warn( + "os_inherit_extension_inherited_to is deprecated. Use " + "inherited_to instead.", + os_warnings.RemovedInSDK50Warning, + ) + filters['scope.OS-INHERIT:inherited_to'] = filters[ + 'os_inherit_extension_inherited_to' + ] + del filters['os_inherit_extension_inherited_to'] + + return list(identity.role_assignments(**filters)) + + def list_role_assignments(self, filters=None): + """List Keystone role assignments + + :param dict filters: Dict of filter conditions. Acceptable keys are: + + * 'user' (string) - User ID to be used as query filter. + * 'group' (string) - Group ID to be used as query filter. + * 'project' (string) - Project ID to be used as query filter. + * 'domain' (string) - Domain ID to be used as query filter. + * 'system' (string) - System name to be used as query filter. + * 'role' (string) - Role ID to be used as query filter. + * 'inherited_to' (string) - Return inherited + role assignments for either 'projects' or 'domains'. + * 'os_inherit_extension_inherited_to' (string) - Deprecated; use + 'inherited_to' instead. + * 'effective' (boolean) - Return effective role assignments. + * 'include_subtree' (boolean) - Include subtree + + 'user' and 'group' are mutually exclusive, as are 'domain' and + 'project'. + + :returns: A list of identity + :class:`openstack.identity.v3.role_assignment.RoleAssignment` + objects + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + # NOTE(samueldmq): although 'include_names' is a valid query parameter + # in the keystone v3 list role assignments API, it would have NO effect + # on shade due to normalization. It is not documented as an acceptable + # filter in the docs above per design! + + if not filters: + filters = {} + + # NOTE(samueldmq): the docs above say filters are *IDs*, though if + # dict or Resource objects are passed, this still works for backwards + # compatibility as keystoneclient allows either IDs or objects to be + # passed in. + # TODO(samueldmq): fix the docs above to advertise Resource objects + # can be provided as parameters too + for k, v in filters.items(): + if isinstance(v, dict): + filters[k] = v['id'] + + for k in ['role', 'group', 'user']: + if k in filters: + filters[f'{k}_id'] = filters.pop(k) + + for k in ['domain', 'project']: + if k in filters: + filters[f'scope_{k}_id'] = filters.pop(k) + + if 'system' in filters: + system_scope = filters.pop('system') + filters['scope.system'] = system_scope + + if 'os_inherit_extension_inherited_to' in filters: + warnings.warn( + "os_inherit_extension_inherited_to is deprecated. Use " + "inherited_to instead.", + os_warnings.RemovedInSDK50Warning, + ) + filters['inherited_to'] = filters.pop( + 'os_inherit_extension_inherited_to' + ) + + return list(identity.role_assignments(**filters)) + + @_utils.valid_kwargs('domain_id') + def create_role(self, name, **kwargs): + """Create a Keystone role. + + :param string name: The name of the role. + :param domain_id: domain id (v3) + :returns: an identity ``Role`` object + :raises: :class:`~openstack.exceptions.SDKException` if the role cannot + be created + """ + identity = utils.ensure_service_version(self.identity, '3') + + kwargs['name'] = name + return identity.create_role(**kwargs) + + @_utils.valid_kwargs('domain_id') + def update_role(self, name_or_id, name, **kwargs): + """Update a Keystone role. + + :param name_or_id: Name or unique ID of the role. + :param string name: The new role name + :param domain_id: domain id + :returns: an identity ``Role`` object + :raises: :class:`~openstack.exceptions.SDKException` if the role cannot + be created + """ + identity = utils.ensure_service_version(self.identity, '3') + + role = self.get_role(name_or_id, **kwargs) + if role is None: + self.log.debug("Role %s not found for updating", name_or_id) + return False + + return identity.update_role(role, name=name, **kwargs) + + @_utils.valid_kwargs('domain_id') + def delete_role(self, name_or_id, **kwargs): + """Delete a Keystone role. + + :param name_or_id: Name or unique ID of the role. + :param domain_id: domain id (v3) + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + identity = utils.ensure_service_version(self.identity, '3') + + role = self.get_role(name_or_id, **kwargs) + if role is None: + self.log.debug("Role %s not found for deleting", name_or_id) + return False + + try: + identity.delete_role(role) + return True + except exceptions.SDKException: + self.log.exception(f"Unable to delete role {name_or_id}") + raise + + def _get_grant_revoke_params( + self, + role, + user=None, + group=None, + project=None, + domain=None, + system=None, + ): + identity = utils.ensure_service_version(self.identity, '3') + + data = {} + search_args = {} + if domain: + data['domain'] = identity.find_domain(domain, ignore_missing=False) + # We have domain. We should use it for further searching user, + # group, role, project + search_args['domain_id'] = data['domain'].id + + data['role'] = identity.find_role(role, ignore_missing=False) + + if user and group: + raise exceptions.SDKException( + 'Specify either a group or a user, not both' + ) + if user is None and group is None: + raise exceptions.SDKException( + 'Must specify either a user or a group' + ) + if project is None and domain is None and system is None: + raise exceptions.SDKException( + 'Must specify either a domain, project or system' + ) + + if user: + data['user'] = identity.find_user( + user, ignore_missing=False, **search_args + ) + if group: + data['group'] = identity.find_group( + group, ignore_missing=False, **search_args + ) + if project: + data['project'] = identity.find_project( + project, ignore_missing=False, **search_args + ) + + return data + + def grant_role( + self, + name_or_id, + user=None, + group=None, + project=None, + domain=None, + system=None, + inherited=False, + wait=False, + timeout=60, + ): + """Grant a role to a user. + + :param string name_or_id: Name or unique ID of the role. + :param string user: The name or id of the user. + :param string group: The name or id of the group. (v3) + :param string project: The name or id of the project. + :param string domain: The id of the domain. (v3) + :param bool system: The name of the system. (v3) + :param bool inherited: Whether the role assignment is inherited. (v3) + :param bool wait: Wait for role to be granted + :param int timeout: Timeout to wait for role to be granted + + NOTE: domain is a required argument when the grant is on a project, + user or group specified by name. In that situation, they are all + considered to be in that domain. If different domains are in use in + the same role grant, it is required to specify those by ID. + + NOTE: for wait and timeout, sometimes granting roles is not + instantaneous. + + NOTE: precedence is given first to project, then domain, then system + + :returns: True if the role is assigned, otherwise False + :raises: :class:`~openstack.exceptions.SDKException` if the role cannot + be granted + """ + identity = utils.ensure_service_version(self.identity, '3') + + data = self._get_grant_revoke_params( + name_or_id, + user=user, + group=group, + project=project, + domain=domain, + system=system, + ) + + user = data.get('user') + group = data.get('group') + project = data.get('project') + domain = data.get('domain') + role = data.get('role') + + if project: + # Proceed with project - precedence over domain and system + if user: + has_role = identity.validate_user_has_project_role( + project, user, role, inherited=inherited + ) + if has_role: + self.log.debug('Assignment already exists') + return False + identity.assign_project_role_to_user( + project, user, role, inherited=inherited + ) + else: + has_role = identity.validate_group_has_project_role( + project, group, role, inherited=inherited + ) + if has_role: + self.log.debug('Assignment already exists') + return False + identity.assign_project_role_to_group( + project, group, role, inherited=inherited + ) + elif domain: + # Proceed with domain - precedence over system + if user: + has_role = identity.validate_user_has_domain_role( + domain, user, role, inherited=inherited + ) + if has_role: + self.log.debug('Assignment already exists') + return False + identity.assign_domain_role_to_user( + domain, user, role, inherited=inherited + ) + else: + has_role = identity.validate_group_has_domain_role( + domain, group, role, inherited=inherited + ) + if has_role: + self.log.debug('Assignment already exists') + return False + identity.assign_domain_role_to_group( + domain, group, role, inherited=inherited + ) + else: + # Proceed with system + # System name must be 'all' due to checks performed in + # _get_grant_revoke_params + if user: + has_role = identity.validate_user_has_system_role( + user, role, system + ) + if has_role: + self.log.debug('Assignment already exists') + return False + identity.assign_system_role_to_user(user, role, system) + else: + has_role = identity.validate_group_has_system_role( + group, role, system + ) + if has_role: + self.log.debug('Assignment already exists') + return False + identity.assign_system_role_to_group(group, role, system) + return True + + def revoke_role( + self, + name_or_id, + user=None, + group=None, + project=None, + domain=None, + system=None, + inherited=False, + wait=False, + timeout=60, + ): + """Revoke a role from a user. + + :param string name_or_id: Name or unique ID of the role. + :param string user: The name or id of the user. + :param string group: The name or id of the group. (v3) + :param string project: The name or id of the project. + :param string domain: The id of the domain. (v3) + :param bool system: The name of the system. (v3) + :param bool inherited: Whether the role assignment is inherited. + :param bool wait: Wait for role to be revoked + :param int timeout: Timeout to wait for role to be revoked + + NOTE: for wait and timeout, sometimes revoking roles is not + instantaneous. + + NOTE: project is required for keystone v2 + + NOTE: precedence is given first to project, then domain, then system + + :returns: True if the role is revoke, otherwise False + :raises: :class:`~openstack.exceptions.SDKException` if the role cannot + be removed + """ + identity = utils.ensure_service_version(self.identity, '3') + + data = self._get_grant_revoke_params( + name_or_id, + user=user, + group=group, + project=project, + domain=domain, + system=system, + ) + + user = data.get('user') + group = data.get('group') + project = data.get('project') + domain = data.get('domain') + role = data.get('role') + + if project: + # Proceed with project - precedence over domain and system + if user: + has_role = identity.validate_user_has_project_role( + project, user, role, inherited=inherited + ) + if not has_role: + self.log.debug('Assignment does not exists') + return False + identity.unassign_project_role_from_user( + project, user, role, inherited=inherited + ) + else: + has_role = identity.validate_group_has_project_role( + project, group, role, inherited=inherited + ) + if not has_role: + self.log.debug('Assignment does not exists') + return False + identity.unassign_project_role_from_group( + project, group, role, inherited=inherited + ) + elif domain: + # Proceed with domain - precedence over system + if user: + has_role = identity.validate_user_has_domain_role( + domain, user, role, inherited=inherited + ) + if not has_role: + self.log.debug('Assignment does not exists') + return False + identity.unassign_domain_role_from_user( + domain, user, role, inherited=inherited + ) + else: + has_role = identity.validate_group_has_domain_role( + domain, group, role, inherited=inherited + ) + if not has_role: + self.log.debug('Assignment does not exists') + return False + identity.unassign_domain_role_from_group( + domain, group, role, inherited=inherited + ) + else: + # Proceed with system + # System name must be 'all' due to checks performed in + # _get_grant_revoke_params + if user: + has_role = identity.validate_user_has_system_role( + user, role, system + ) + if not has_role: + self.log.debug('Assignment does not exist') + return False + identity.unassign_system_role_from_user(user, role, system) + else: + has_role = identity.validate_group_has_system_role( + group, role, system + ) + if not has_role: + self.log.debug('Assignment does not exist') + return False + identity.unassign_system_role_from_group(group, role, system) + return True diff --git a/openstack/cloud/_image.py b/openstack/cloud/_image.py new file mode 100644 index 0000000000..c88c7010ec --- /dev/null +++ b/openstack/cloud/_image.py @@ -0,0 +1,401 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import typing as ty +import warnings + +from openstack.cloud import _utils +from openstack.cloud import openstackcloud +from openstack import exceptions +from openstack import utils +from openstack import warnings as os_warnings + +if ty.TYPE_CHECKING: + import concurrent.futures + from keystoneauth1 import session as ks_session + from oslo_config import cfg + + from openstack.config import cloud_region + from openstack import service_description + + +class ImageCloudMixin(openstackcloud._OpenStackCloudMixin): + def __init__( + self, + cloud: str | None = None, + config: ty.Optional['cloud_region.CloudRegion'] = None, + session: ty.Optional['ks_session.Session'] = None, + app_name: str | None = None, + app_version: str | None = None, + extra_services: list['service_description.ServiceDescription'] + | None = None, + strict: bool = False, + use_direct_get: bool | None = None, + task_manager: ty.Any = None, + rate_limit: float | dict[str, float] | None = None, + oslo_conf: ty.Optional['cfg.ConfigOpts'] = None, + service_types: list[str] | None = None, + global_request_id: str | None = None, + strict_proxies: bool = False, + pool_executor: ty.Optional['concurrent.futures.Executor'] = None, + **kwargs: ty.Any, + ): + super().__init__( + cloud=cloud, + config=config, + session=session, + app_name=app_name, + app_version=app_version, + extra_services=extra_services, + strict=strict, + use_direct_get=use_direct_get, + task_manager=task_manager, + rate_limit=rate_limit, + oslo_conf=oslo_conf, + service_types=service_types, + global_request_id=global_request_id, + strict_proxies=strict_proxies, + pool_executor=pool_executor, + **kwargs, + ) + + self.image_api_use_tasks = self.config.config['image_api_use_tasks'] + + def search_images(self, name_or_id=None, filters=None): + images = self.list_images() + return _utils._filter_list(images, name_or_id, filters) + + def list_images(self, filter_deleted=True, show_all=False): + """Get available images. + + :param filter_deleted: Control whether deleted images are returned. + :param show_all: Show all images, including images that are shared + but not accepted. (By default in glance v2 shared image that + have not been accepted are not shown) show_all will override the + value of filter_deleted to False. + :returns: A list of glance images. + """ + if show_all: + filter_deleted = False + # First, try to actually get images from glance, it's more efficient + images = [] + params = {} + image_list = [] + if utils.supports_version(self.image, '2'): + if show_all: + params['member_status'] = 'all' + image_list = list(self.image.images(**params)) + + for image in image_list: + # The cloud might return DELETED for invalid images. + # While that's cute and all, that's an implementation detail. + if not filter_deleted: + images.append(image) + elif image.status.lower() != 'deleted': + images.append(image) + return images + + def get_image(self, name_or_id, filters=None): + """Get an image by name or ID. + + :param name_or_id: Name or ID of the image. + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + :returns: An image :class:`openstack.image.v2.image.Image` object. + """ + if filters is not None: + warnings.warn( + "The 'filters' argument is deprecated; use 'search_images' " + "instead", + os_warnings.RemovedInSDK60Warning, + ) + entities = self.search_images(name_or_id, filters) + if not entities: + return None + + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}", + ) + + return entities[0] + + return self.image.find_image(name_or_id) + + def get_image_by_id(self, id): + """Get a image by ID + + :param id: ID of the image. + :returns: An image :class:`openstack.image.v2.image.Image` object. + """ + return self.image.get_image(id) + + def download_image( + self, + name_or_id, + output_path=None, + output_file=None, + chunk_size=1024 * 1024, + stream=False, + ): + """Download an image by name or ID + + :param str name_or_id: Name or ID of the image. + :param output_path: the output path to write the image to. Either this + or output_file must be specified + :param output_file: a file object (or file-like object) to write the + image data to. Only write() will be called on this object. Either + this or output_path must be specified + :param int chunk_size: size in bytes to read from the wire and buffer + at one time. Defaults to 1024 * 1024 = 1 MiB + :param: bool stream: whether to stream the output in chunk_size. + + :returns: When output_path and output_file are not given - the bytes + comprising the given Image when stream is False, otherwise a + :class:`requests.Response` instance. When output_path or + output_file are given - an image + :class:`~openstack.image.v2.image.Image` instance. + :raises: :class:`~openstack.exceptions.SDKException` in the event + download_image is called without exactly one of either output_path + or output_file + :raises: :class:`~openstack.exceptions.BadRequestException` if no + images are found matching the name or ID provided + """ + if output_path is None and output_file is None: + raise exceptions.SDKException( + 'No output specified, an output path or file object ' + 'is necessary to write the image data to' + ) + elif output_path is not None and output_file is not None: + raise exceptions.SDKException( + 'Both an output path and file object were provided, ' + 'however only one can be used at once' + ) + + image = self.image.find_image(name_or_id, ignore_missing=False) + + return self.image.download_image( + image, + output=output_file or output_path, + chunk_size=chunk_size, + stream=stream, + ) + + def get_image_exclude(self, name_or_id, exclude): + for image in self.search_images(name_or_id): + if exclude: + if exclude not in image.name: + return image + else: + return image + return None + + def get_image_name(self, image_id, exclude=None): + image = self.get_image_exclude(image_id, exclude) + if image: + return image.name + return None + + def get_image_id(self, image_name, exclude=None): + image = self.get_image_exclude(image_name, exclude) + if image: + return image.id + return None + + def wait_for_image(self, image, timeout=3600): + image_id = image['id'] + for count in utils.iterate_timeout( + timeout, "Timeout waiting for image to snapshot" + ): + image = self.get_image(image_id) + if not image: + continue + if image['status'] == 'active': + return image + elif image['status'] == 'error': + raise exceptions.SDKException( + f'Image {image_id} hit error state' + ) + + def delete_image( + self, + name_or_id, + wait=False, + timeout=3600, + delete_objects=True, + ): + """Delete an existing image. + + :param name_or_id: Name of the image to be deleted. + :param wait: If True, waits for image to be deleted. + :param timeout: Seconds to wait for image deletion. None is forever. + :param delete_objects: If True, also deletes uploaded swift objects. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` if there are + problems deleting. + """ + image = self.get_image(name_or_id) + if not image: + return False + self.image.delete_image(image) + + # Task API means an image was uploaded to swift + # TODO(gtema) does it make sense to move this into proxy? + if self.image_api_use_tasks and ( + self.image._IMAGE_OBJECT_KEY in image.properties + or self.image._SHADE_IMAGE_OBJECT_KEY in image.properties + ): + container, objname = image.properties.get( + self.image._IMAGE_OBJECT_KEY, + image.properties.get(self.image._SHADE_IMAGE_OBJECT_KEY), + ).split('/', 1) + self.object_store.delete_object( + objname, + container=container, + ) + + if wait: + for count in utils.iterate_timeout( + timeout, "Timeout waiting for the image to be deleted." + ): + if self.get_image(image.id) is None: + break + return True + + def create_image( + self, + name, + filename=None, + container=None, + md5=None, + sha256=None, + disk_format=None, + container_format=None, + disable_vendor_agent=True, + wait=False, + timeout=3600, + tags=None, + allow_duplicates=False, + meta=None, + volume=None, + **kwargs, + ): + """Upload an image. + + :param str name: Name of the image to create. If it is a pathname + of an image, the name will be constructed from the + extensionless basename of the path. + :param str filename: The path to the file to upload, if needed. + (optional, defaults to None) + :param str container: Name of the container in swift where images + should be uploaded for import if the cloud requires such a thing. + (optiona, defaults to 'images') + :param str md5: md5 sum of the image file. If not given, an md5 will + be calculated. + :param str sha256: sha256 sum of the image file. If not given, an md5 + will be calculated. + :param str disk_format: The disk format the image is in. (optional, + defaults to the os-client-config config value for this cloud) + :param str container_format: The container format the image is in. + (optional, defaults to the os-client-config config value for this + cloud) + :param list tags: List of tags for this image. Each tag is a string + of at most 255 chars. + :param bool disable_vendor_agent: Whether or not to append metadata + flags to the image to inform the cloud in question to not expect a + vendor agent to be runing. (optional, defaults to True) + :param bool wait: If true, waits for image to be created. Defaults to + true - however, be aware that one of the upload methods is always + synchronous. + :param timeout: Seconds to wait for image creation. None is forever. + :param allow_duplicates: If true, skips checks that enforce unique + image name. (optional, defaults to False) + :param meta: A dict of key/value pairs to use for metadata that + bypasses automatic type conversion. + :param volume: Name or ID or volume object of a volume to create an + image from. Mutually exclusive with (optional, defaults to None) + + Additional kwargs will be passed to the image creation as additional + metadata for the image and will have all values converted to string + except for min_disk, min_ram, size and virtual_size which will be + converted to int. + + If you are sure you have all of your data types correct or have an + advanced need to be explicit, use meta. If you are just a normal + consumer, using kwargs is likely the right choice. + + If a value is in meta and kwargs, meta wins. + + :returns: An image :class:`openstack.image.v2.image.Image` object. + :raises: :class:`~openstack.exceptions.SDKException` if there are + problems uploading + """ + if volume: + image = self.block_storage.create_image( + name=name, + volume=volume, + allow_duplicates=allow_duplicates, + container_format=container_format, + disk_format=disk_format, + wait=wait, + timeout=timeout, + ) + else: + image = self.image.create_image( + name, + filename=filename, + container=container, + md5=md5, + sha256=sha256, + disk_format=disk_format, + container_format=container_format, + disable_vendor_agent=disable_vendor_agent, + wait=wait, + timeout=timeout, + tags=tags, + allow_duplicates=allow_duplicates, + meta=meta, + **kwargs, + ) + + if not wait: + return image + + try: + for count in utils.iterate_timeout( + timeout, "Timeout waiting for the image to finish." + ): + image_obj = self.get_image(image.id) + if image_obj and image_obj.status not in ('queued', 'saving'): + return image_obj + except exceptions.ResourceTimeout: + self.log.debug( + "Timeout waiting for image to become ready. Deleting." + ) + self.delete_image(image.id, wait=True) + raise + + def update_image_properties( + self, image=None, name_or_id=None, meta=None, **properties + ): + image = image or name_or_id + return self.image.update_image_properties( + image=image, meta=meta, **properties + ) diff --git a/openstack/cloud/_network.py b/openstack/cloud/_network.py new file mode 100644 index 0000000000..5a0046b55c --- /dev/null +++ b/openstack/cloud/_network.py @@ -0,0 +1,2710 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.cloud import _network_common +from openstack.cloud import _utils +from openstack.cloud import exc +from openstack import exceptions +from openstack import utils + + +class NetworkCloudMixin(_network_common.NetworkCommonCloudMixin): + def _neutron_extensions(self): + extensions = set() + for extension in self.network.extensions(): + extensions.add(extension['alias']) + return extensions + + def _has_neutron_extension(self, extension_alias): + return extension_alias in self._neutron_extensions() + + # TODO(stephenfin): Deprecate this in favour of the 'list' function + def search_networks(self, name_or_id=None, filters=None): + """Search networks + + :param name_or_id: Name or ID of the desired network. + :param filters: A dict containing additional filters to use. e.g. + {'router:external': True} + + :returns: A list of network ``Network`` objects matching the search + criteria. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + query = {} + if name_or_id: + query['name'] = name_or_id + if filters: + query.update(filters) + return list(self.network.networks(**query)) + + # TODO(stephenfin): Deprecate this in favour of the 'list' function + def search_routers(self, name_or_id=None, filters=None): + """Search routers + + :param name_or_id: Name or ID of the desired router. + :param filters: A dict containing additional filters to use. e.g. + {'admin_state_up': True} + + :returns: A list of network ``Router`` objects matching the search + criteria. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + query = {} + if name_or_id: + query['name'] = name_or_id + if filters: + query.update(filters) + return list(self.network.routers(**query)) + + # TODO(stephenfin): Deprecate this in favour of the 'list' function + def search_subnets(self, name_or_id=None, filters=None): + """Search subnets + + :param name_or_id: Name or ID of the desired subnet. + :param filters: A dict containing additional filters to use. e.g. + {'enable_dhcp': True} + + :returns: A list of network ``Subnet`` objects matching the search + criteria. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + query = {} + if name_or_id: + query['name'] = name_or_id + if filters: + query.update(filters) + return list(self.network.subnets(**query)) + + # TODO(stephenfin): Deprecate this in favour of the 'list' function + def search_ports(self, name_or_id=None, filters=None): + """Search ports + + :param name_or_id: Name or ID of the desired port. + :param filters: A dict containing additional filters to use. e.g. + {'device_id': '2711c67a-b4a7-43dd-ace7-6187b791c3f0'} + + :returns: A list of network ``Port`` objects matching the search + criteria. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + # If the filter is a string, do not push the filter down to neutron; + # get all the ports and filter locally. + # TODO(stephenfin): '_filter_list' can handle a dict - pass it down + if isinstance(filters, str): + pushdown_filters = None + else: + pushdown_filters = filters + ports = self.list_ports(pushdown_filters) + return _utils._filter_list(ports, name_or_id, filters) + + def list_networks(self, filters=None): + """List all available networks. + + :param filters: (optional) A dict of filter conditions to push down. + :returns: A list of network ``Network`` objects. + """ + # If the cloud is running nova-network, just return an empty list. + if not self.has_service('network'): + return [] + + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + return list(self.network.networks(**filters)) + + def list_routers(self, filters=None): + """List all available routers. + + :param filters: (optional) A dict of filter conditions to push down + :returns: A list of network ``Router`` objects. + """ + # If the cloud is running nova-network, just return an empty list. + if not self.has_service('network'): + return [] + + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + return list(self.network.routers(**filters)) + + def list_subnets(self, filters=None): + """List all available subnets. + + :param filters: (optional) A dict of filter conditions to push down + :returns: A list of network ``Subnet`` objects. + """ + # If the cloud is running nova-network, just return an empty list. + if not self.has_service('network'): + return [] + + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + return list(self.network.subnets(**filters)) + + def list_ports(self, filters=None): + """List all available ports. + + :param filters: (optional) A dict of filter conditions to push down + :returns: A list of network ``Port`` objects. + """ + # If the cloud is running nova-network, just return an empty list. + if not self.has_service('network'): + return [] + + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + + return list(self.network.ports(**filters)) + + # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this + def get_qos_policy(self, name_or_id, filters=None): + """Get a QoS policy by name or ID. + + :param name_or_id: Name or ID of the policy. + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A network ``QoSPolicy`` object if found, else None. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + if not filters: + filters = {} + return self.network.find_qos_policy( + name_or_id=name_or_id, ignore_missing=True, **filters + ) + + # TODO(stephenfin): Deprecate this in favour of the 'list' function + def search_qos_policies(self, name_or_id=None, filters=None): + """Search QoS policies + + :param name_or_id: Name or ID of the desired policy. + :param filters: a dict containing additional filters to use. e.g. + {'shared': True} + + :returns: A list of network ``QosPolicy`` objects matching the search + criteria. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + query = {} + if name_or_id: + query['name'] = name_or_id + if filters: + query.update(filters) + return list(self.network.qos_policies(**query)) + + def list_qos_rule_types(self, filters=None): + """List all available QoS rule types. + + :param filters: (optional) A dict of filter conditions to push down + :returns: A list of network ``QosRuleType`` objects. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + return list(self.network.qos_rule_types(**filters)) + + # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this + def get_qos_rule_type_details(self, rule_type, filters=None): + """Get a QoS rule type details by rule type name. + + :param rule_type: Name of the QoS rule type. + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A network ``QoSRuleType`` object if found, else None. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + if not self._has_neutron_extension('qos-rule-type-details'): + raise exc.OpenStackCloudUnavailableExtension( + 'qos-rule-type-details extension is not available ' + 'on target cloud' + ) + + return self.network.get_qos_rule_type(rule_type) + + def list_qos_policies(self, filters=None): + """List all available QoS policies. + + :param filters: (optional) A dict of filter conditions to push down + :returns: A list of network ``QosPolicy`` objects. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + return list(self.network.qos_policies(**filters)) + + # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this + def get_network(self, name_or_id, filters=None): + """Get a network by name or ID. + + :param name_or_id: Name or ID of the network. + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A network ``Network`` object if found, else None. + """ + if not filters: + filters = {} + return self.network.find_network( + name_or_id=name_or_id, ignore_missing=True, **filters + ) + + def get_network_by_id(self, id): + """Get a network by ID + + :param id: ID of the network. + :returns: A network ``Network`` object if found, else None. + """ + return self.network.get_network(id) + + # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this + def get_router(self, name_or_id, filters=None): + """Get a router by name or ID. + + :param name_or_id: Name or ID of the router. + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A network ``Router`` object if found, else None. + """ + if not filters: + filters = {} + return self.network.find_router( + name_or_id=name_or_id, ignore_missing=True, **filters + ) + + # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this + def get_subnet(self, name_or_id, filters=None): + """Get a subnet by name or ID. + + :param name_or_id: Name or ID of the subnet. + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + :returns: A network ``Subnet`` object if found, else None. + """ + if not filters: + filters = {} + return self.network.find_subnet( + name_or_id=name_or_id, ignore_missing=True, **filters + ) + + def get_subnet_by_id(self, id): + """Get a subnet by ID + + :param id: ID of the subnet. + :returns: A network ``Subnet`` object if found, else None. + """ + return self.network.get_subnet(id) + + # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this + def get_port(self, name_or_id, filters=None): + """Get a port by name or ID. + + :param name_or_id: Name or ID of the port. + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A network ``Port`` object if found, else None. + """ + if not filters: + filters = {} + return self.network.find_port( + name_or_id=name_or_id, ignore_missing=True, **filters + ) + + def get_port_by_id(self, id): + """Get a port by ID + + :param id: ID of the port. + :returns: A network ``Port`` object if found, else None. + """ + return self.network.get_port(id) + + def get_subnetpool(self, name_or_id): + """Get a subnetpool by name or ID. + + :param name_or_id: Name or ID of the subnetpool. + + :returns: A network ``Subnetpool`` object if found, else None. + """ + return self.network.find_subnet_pool( + name_or_id=name_or_id, ignore_missing=True + ) + + def create_network( + self, + name, + shared=False, + admin_state_up=True, + external=False, + provider=None, + project_id=None, + availability_zone_hints=None, + port_security_enabled=None, + mtu_size=None, + dns_domain=None, + ): + """Create a network. + + :param string name: Name of the network being created. + :param bool shared: Set the network as shared. + :param bool admin_state_up: Set the network administrative state to up. + :param bool external: Whether this network is externally accessible. + :param dict provider: A dict of network provider options. Example:: + + {'network_type': 'vlan', 'segmentation_id': 'vlan1'} + + :param string project_id: Specify the project ID this network + will be created on (admin-only). + :param types.ListType availability_zone_hints: A list of availability + zone hints. + :param bool port_security_enabled: Enable / Disable port security + :param int mtu_size: maximum transmission unit value to address + fragmentation. Minimum value is 68 for IPv4, and 1280 for IPv6. + :param string dns_domain: Specify the DNS domain associated with + this network. + :returns: The created network ``Network`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + network = { + 'name': name, + 'admin_state_up': admin_state_up, + } + + if shared: + network['shared'] = shared + + if project_id is not None: + network['project_id'] = project_id + + if availability_zone_hints is not None: + if not isinstance(availability_zone_hints, list): + raise exceptions.SDKException( + "Parameter 'availability_zone_hints' must be a list" + ) + if not self._has_neutron_extension('network_availability_zone'): + raise exc.OpenStackCloudUnavailableExtension( + 'network_availability_zone extension is not available on ' + 'target cloud' + ) + network['availability_zone_hints'] = availability_zone_hints + + if provider: + if not isinstance(provider, dict): + raise exceptions.SDKException( + "Parameter 'provider' must be a dict" + ) + # Only pass what we know + for attr in ( + 'physical_network', + 'network_type', + 'segmentation_id', + ): + if attr in provider: + arg = "provider:" + attr + network[arg] = provider[attr] + + # Do not send 'router:external' unless it is explicitly + # set since sending it *might* cause "Forbidden" errors in + # some situations. It defaults to False in the client, anyway. + if external: + network['router:external'] = True + + if port_security_enabled is not None: + if not isinstance(port_security_enabled, bool): + raise exceptions.SDKException( + "Parameter 'port_security_enabled' must be a bool" + ) + network['port_security_enabled'] = port_security_enabled + + if mtu_size: + if not isinstance(mtu_size, int): + raise exceptions.SDKException( + "Parameter 'mtu_size' must be an integer." + ) + if not mtu_size >= 68: + raise exceptions.SDKException( + "Parameter 'mtu_size' must be greater than 67." + ) + + network['mtu'] = mtu_size + + if dns_domain: + network['dns_domain'] = dns_domain + + network = self.network.create_network(**network) + + # Reset cache so the new network is picked up + self._reset_network_caches() + return network + + @_utils.valid_kwargs( + "name", + "shared", + "admin_state_up", + "external", + "provider", + "mtu_size", + "port_security_enabled", + "dns_domain", + ) + def update_network(self, name_or_id, **kwargs): + """Update a network. + + :param string name_or_id: Name or ID of the network being updated. + :param string name: New name of the network. + :param bool shared: Set the network as shared. + :param bool admin_state_up: Set the network administrative state to up. + :param bool external: Whether this network is externally accessible. + :param dict provider: A dict of network provider options. Example:: + + {'network_type': 'vlan', 'segmentation_id': 'vlan1'} + + :param int mtu_size: New maximum transmission unit value to address + fragmentation. Minimum value is 68 for IPv4, and 1280 for IPv6. + :param bool port_security_enabled: Enable or disable port security. + :param string dns_domain: Specify the DNS domain associated with + this network. + + :returns: The updated network ``Network`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + provider = kwargs.pop('provider', None) + if provider: + if not isinstance(provider, dict): + raise exceptions.SDKException( + "Parameter 'provider' must be a dict" + ) + for key in ('physical_network', 'network_type', 'segmentation_id'): + if key in provider: + kwargs['provider:' + key] = provider.pop(key) + + if 'external' in kwargs: + kwargs['router:external'] = kwargs.pop('external') + + if 'port_security_enabled' in kwargs: + if not isinstance(kwargs['port_security_enabled'], bool): + raise exceptions.SDKException( + "Parameter 'port_security_enabled' must be a bool" + ) + + if 'mtu_size' in kwargs: + if not isinstance(kwargs['mtu_size'], int): + raise exceptions.SDKException( + "Parameter 'mtu_size' must be an integer." + ) + if kwargs['mtu_size'] < 68: + raise exceptions.SDKException( + "Parameter 'mtu_size' must be greater than 67." + ) + kwargs['mtu'] = kwargs.pop('mtu_size') + + network = self.get_network(name_or_id) + if not network: + raise exceptions.SDKException(f"Network {name_or_id} not found.") + + network = self.network.update_network(network, **kwargs) + + self._reset_network_caches() + + return network + + def delete_network(self, name_or_id): + """Delete a network. + + :param name_or_id: Name or ID of the network being deleted. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + network = self.get_network(name_or_id) + if not network: + self.log.debug("Network %s not found for deleting", name_or_id) + return False + + self.network.delete_network(network) + + # Reset cache so the deleted network is removed + self._reset_network_caches() + + return True + + def set_network_quotas(self, name_or_id, **kwargs): + """Set a network quota in a project + + :param name_or_id: project name or id + :param kwargs: key/value pairs of quota name and quota value + + :raises: :class:`~openstack.exceptions.SDKException` if the resource to + set the quota does not exist. + """ + identity = utils.ensure_service_version(self.identity, '3') + proj = identity.find_project(name_or_id, ignore_missing=True) + if not proj: + raise exceptions.SDKException( + f"Project {name_or_id} was requested by was not found " + f"on the cloud" + ) + self.network.update_quota(proj.id, **kwargs) + + def get_network_quotas(self, name_or_id, details=False): + """Get network quotas for a project + + :param name_or_id: project name or id + :param details: if set to True it will return details about usage + of quotas by given project + + :returns: A network ``Quota`` object if found, else None. + :raises: :class:`~openstack.exceptions.SDKException` if it's not a + valid project + """ + identity = utils.ensure_service_version(self.identity, '3') + proj = identity.find_project(name_or_id, ignore_missing=True) + if not proj: + raise exc.OpenStackCloudException( + f"Project {name_or_id} was requested by was not found " + f"on the cloud" + ) + return self.network.get_quota(proj.id, details) + + def get_network_extensions(self): + """Get Cloud provided network extensions + + :returns: A set of Neutron extension aliases. + """ + return self._neutron_extensions() + + def delete_network_quotas(self, name_or_id): + """Delete network quotas for a project + + :param name_or_id: project name or id + + :returns: dict with the quotas + :raises: :class:`~openstack.exceptions.SDKException` if it's not a + valid project or the network client call failed + """ + identity = utils.ensure_service_version(self.identity, '3') + proj = identity.find_project(name_or_id, ignore_missing=True) + if not proj: + raise exceptions.SDKException( + f"Project {name_or_id} was requested by was not found " + f"on the cloud" + ) + self.network.delete_quota(proj.id) + + @_utils.valid_kwargs( + 'action', + 'description', + 'destination_firewall_group_id', + 'destination_ip_address', + 'destination_port', + 'enabled', + 'ip_version', + 'name', + 'project_id', + 'protocol', + 'shared', + 'source_firewall_group_id', + 'source_ip_address', + 'source_port', + ) + def create_firewall_rule(self, **kwargs): + """ + Creates firewall rule. + + :param action: Action performed on traffic. + Valid values: allow, deny + Defaults to deny. + :param description: Human-readable description. + :param destination_firewall_group_id: ID of destination firewall group. + :param destination_ip_address: IPv4-, IPv6 address or CIDR. + :param destination_port: Port or port range (e.g. 80:90) + :param bool enabled: Status of firewall rule. You can disable rules + without disassociating them from firewall policies. Defaults to + True. + :param int ip_version: IP Version. Valid values: 4, 6 Defaults to 4. + :param name: Human-readable name. + :param project_id: Project id. + :param protocol: IP protocol. Valid values: icmp, tcp, udp, null + :param bool shared: Visibility to other projects. Defaults to False. + :param source_firewall_group_id: ID of source firewall group. + :param source_ip_address: IPv4-, IPv6 address or CIDR. + :param source_port: Port or port range (e.g. 80:90) + :raises: BadRequestException if parameters are malformed + :returns: The created network ``FirewallRule`` object. + """ + return self.network.create_firewall_rule(**kwargs) + + def delete_firewall_rule(self, name_or_id, filters=None): + """ + Deletes firewall rule. + + Prints debug message in case to-be-deleted resource was not found. + + :param name_or_id: firewall rule name or id + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :raises: DuplicateResource on multiple matches + :returns: True if resource is successfully deleted, False otherwise. + :rtype: bool + """ + if not filters: + filters = {} + try: + firewall_rule = self.network.find_firewall_rule( + name_or_id, ignore_missing=False, **filters + ) + self.network.delete_firewall_rule( + firewall_rule, ignore_missing=False + ) + except exceptions.NotFoundException: + self.log.debug( + 'Firewall rule %s not found for deleting', name_or_id + ) + return False + return True + + # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this + def get_firewall_rule(self, name_or_id, filters=None): + """ + Retrieves a single firewall rule. + + :param name_or_id: firewall rule name or id + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :raises: DuplicateResource on multiple matches + :returns: A network ``FirewallRule`` object if found, else None. + """ + if not filters: + filters = {} + return self.network.find_firewall_rule( + name_or_id, ignore_missing=True, **filters + ) + + def list_firewall_rules(self, filters=None): + """ + Lists firewall rules. + + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A list of network ``FirewallRule`` objects. + :rtype: list[FirewallRule] + """ + if not filters: + filters = {} + return list(self.network.firewall_rules(**filters)) + + @_utils.valid_kwargs( + 'action', + 'description', + 'destination_firewall_group_id', + 'destination_ip_address', + 'destination_port', + 'enabled', + 'ip_version', + 'name', + 'project_id', + 'protocol', + 'shared', + 'source_firewall_group_id', + 'source_ip_address', + 'source_port', + ) + def update_firewall_rule(self, name_or_id, filters=None, **kwargs): + """ + Updates firewall rule. + + :param name_or_id: firewall rule name or id + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :param kwargs: firewall rule update parameters. + See create_firewall_rule docstring for valid parameters. + :returns: The updated network ``FirewallRule`` object. + :raises: BadRequestException if parameters are malformed + :raises: NotFoundException if resource is not found + """ + if not filters: + filters = {} + firewall_rule = self.network.find_firewall_rule( + name_or_id, ignore_missing=False, **filters + ) + + return self.network.update_firewall_rule(firewall_rule, **kwargs) + + def _get_firewall_rule_ids(self, name_or_id_list, filters=None): + """ + Takes a list of firewall rule name or ids, looks them up and returns + a list of firewall rule ids. + + Used by `create_firewall_policy` and `update_firewall_policy`. + + :param list[str] name_or_id_list: firewall rule name or id list + :param dict filters: optional filters + :raises: DuplicateResource on multiple matches + :raises: NotFoundException if resource is not found + :return: list of firewall rule ids + :rtype: list[str] + """ + if not filters: + filters = {} + ids_list = [] + for name_or_id in name_or_id_list: + ids_list.append( + self.network.find_firewall_rule( + name_or_id, ignore_missing=False, **filters + )['id'] + ) + return ids_list + + @_utils.valid_kwargs( + 'audited', + 'description', + 'firewall_rules', + 'name', + 'project_id', + 'shared', + ) + def create_firewall_policy(self, **kwargs): + """ + Create firewall policy. + + :param bool audited: Status of audition of firewall policy. + Set to False each time the firewall policy or the associated + firewall rules are changed. Has to be explicitly set to True. + :param description: Human-readable description. + :param list[str] firewall_rules: List of associated firewall rules. + :param name: Human-readable name. + :param project_id: Project id. + :param bool shared: Visibility to other projects. + Defaults to False. + :raises: BadRequestException if parameters are malformed + :raises: NotFoundException if a resource from firewall_list not found + :returns: The created network ``FirewallPolicy`` object. + """ + if 'firewall_rules' in kwargs: + kwargs['firewall_rules'] = self._get_firewall_rule_ids( + kwargs['firewall_rules'] + ) + + return self.network.create_firewall_policy(**kwargs) + + def delete_firewall_policy(self, name_or_id, filters=None): + """ + Deletes firewall policy. + Prints debug message in case to-be-deleted resource was not found. + + :param name_or_id: firewall policy name or id + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :raises: DuplicateResource on multiple matches + :returns: True if resource is successfully deleted, False otherwise. + :rtype: bool + """ + if not filters: + filters = {} + try: + firewall_policy = self.network.find_firewall_policy( + name_or_id, ignore_missing=False, **filters + ) + self.network.delete_firewall_policy( + firewall_policy, ignore_missing=False + ) + except exceptions.NotFoundException: + self.log.debug( + 'Firewall policy %s not found for deleting', name_or_id + ) + return False + return True + + # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this + def get_firewall_policy(self, name_or_id, filters=None): + """ + Retrieves a single firewall policy. + + :param name_or_id: firewall policy name or id + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :raises: DuplicateResource on multiple matches + :returns: A network ``FirewallPolicy`` object if found, else None. + """ + if not filters: + filters = {} + return self.network.find_firewall_policy( + name_or_id, ignore_missing=True, **filters + ) + + def list_firewall_policies(self, filters=None): + """ + Lists firewall policies. + + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A list of network ``FirewallPolicy`` objects. + :rtype: list[FirewallPolicy] + """ + if not filters: + filters = {} + return list(self.network.firewall_policies(**filters)) + + @_utils.valid_kwargs( + 'audited', + 'description', + 'firewall_rules', + 'name', + 'project_id', + 'shared', + ) + def update_firewall_policy(self, name_or_id, filters=None, **kwargs): + """ + Updates firewall policy. + + :param name_or_id: firewall policy name or id + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. + Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :param kwargs: firewall policy update parameters + See create_firewall_policy docstring for valid parameters. + :returns: The updated network ``FirewallPolicy`` object. + :raises: BadRequestException if parameters are malformed + :raises: DuplicateResource on multiple matches + :raises: NotFoundException if resource is not found + """ + if not filters: + filters = {} + firewall_policy = self.network.find_firewall_policy( + name_or_id, ignore_missing=False, **filters + ) + + if 'firewall_rules' in kwargs: + kwargs['firewall_rules'] = self._get_firewall_rule_ids( + kwargs['firewall_rules'] + ) + + return self.network.update_firewall_policy(firewall_policy, **kwargs) + + def insert_rule_into_policy( + self, + name_or_id, + rule_name_or_id, + insert_after=None, + insert_before=None, + filters=None, + ): + """Add firewall rule to a policy. + + Adds firewall rule to the firewall_rules list of a firewall policy. + Short-circuits and returns the firewall policy early if the firewall + rule id is already present in the firewall_rules list. + This method doesn't do re-ordering. If you want to move a firewall rule + or down the list, you have to remove and re-add it. + + :param name_or_id: firewall policy name or id + :param rule_name_or_id: firewall rule name or id + :param insert_after: rule name or id that should precede added rule + :param insert_before: rule name or id that should succeed added rule + :param dict filters: optional filters + :raises: DuplicateResource on multiple matches + :raises: NotFoundException if firewall policy or any of the firewall + rules (inserted, after, before) is not found. + :return: updated firewall policy + :rtype: FirewallPolicy + """ + if not filters: + filters = {} + firewall_policy = self.network.find_firewall_policy( + name_or_id, ignore_missing=False, **filters + ) + + firewall_rule = self.network.find_firewall_rule( + rule_name_or_id, ignore_missing=False + ) + # short-circuit if rule already in firewall_rules list + # the API can't do any re-ordering of existing rules + if firewall_rule['id'] in firewall_policy['firewall_rules']: + self.log.debug( + 'Firewall rule %s already associated with firewall policy %s', + rule_name_or_id, + name_or_id, + ) + return firewall_policy + + pos_params = {} + if insert_after is not None: + pos_params['insert_after'] = self.network.find_firewall_rule( + insert_after, ignore_missing=False + )['id'] + + if insert_before is not None: + pos_params['insert_before'] = self.network.find_firewall_rule( + insert_before, ignore_missing=False + )['id'] + + return self.network.insert_rule_into_policy( + firewall_policy['id'], firewall_rule['id'], **pos_params + ) + + def remove_rule_from_policy( + self, name_or_id, rule_name_or_id, filters=None + ): + """ + Remove firewall rule from firewall policy's firewall_rules list. + Short-circuits and returns firewall policy early if firewall rule + is already absent from the firewall_rules list. + + :param name_or_id: firewall policy name or id + :param rule_name_or_id: firewall rule name or id + :param dict filters: optional filters + :raises: DuplicateResource on multiple matches + :raises: NotFoundException if firewall policy is not found + :return: updated firewall policy + :rtype: FirewallPolicy + """ + if not filters: + filters = {} + firewall_policy = self.network.find_firewall_policy( + name_or_id, ignore_missing=False, **filters + ) + + firewall_rule = self.network.find_firewall_rule(rule_name_or_id) + if not firewall_rule: + # short-circuit: if firewall rule is not found, + # return current firewall policy + self.log.debug( + 'Firewall rule %s not found for removing', rule_name_or_id + ) + return firewall_policy + + if firewall_rule['id'] not in firewall_policy['firewall_rules']: + # short-circuit: if firewall rule id is not associated, + # log it to debug and return current firewall policy + self.log.debug( + 'Firewall rule %s not associated with firewall policy %s', + rule_name_or_id, + name_or_id, + ) + return firewall_policy + + return self.network.remove_rule_from_policy( + firewall_policy['id'], firewall_rule['id'] + ) + + @_utils.valid_kwargs( + 'admin_state_up', + 'description', + 'egress_firewall_policy', + 'ingress_firewall_policy', + 'name', + 'ports', + 'project_id', + 'shared', + ) + def create_firewall_group(self, **kwargs): + """ + Creates firewall group. The keys egress_firewall_policy and + ingress_firewall_policy are looked up and mapped as + egress_firewall_policy_id and ingress_firewall_policy_id respectively. + Port name or ids list is transformed to port ids list before the POST + request. + + :param bool admin_state_up: State of firewall group. + Will block all traffic if set to False. Defaults to True. + :param description: Human-readable description. + :param egress_firewall_policy: Name or id of egress firewall policy. + :param ingress_firewall_policy: Name or id of ingress firewall policy. + :param name: Human-readable name. + :param list[str] ports: List of associated ports (name or id) + :param project_id: Project id. + :param shared: Visibility to other projects. Defaults to False. + :raises: BadRequestException if parameters are malformed + :raises: DuplicateResource on multiple matches + :raises: NotFoundException if (ingress-, egress-) firewall policy or + a port is not found. + :returns: The created network ``FirewallGroup`` object. + """ + self._lookup_ingress_egress_firewall_policy_ids(kwargs) + if 'ports' in kwargs: + kwargs['ports'] = self._get_port_ids(kwargs['ports']) + return self.network.create_firewall_group(**kwargs) + + def delete_firewall_group(self, name_or_id, filters=None): + """ + Deletes firewall group. + Prints debug message in case to-be-deleted resource was not found. + + :param name_or_id: firewall group name or id + :param dict filters: optional filters + :raises: DuplicateResource on multiple matches + :returns: True if resource is successfully deleted, False otherwise. + :rtype: bool + """ + if not filters: + filters = {} + try: + firewall_group = self.network.find_firewall_group( + name_or_id, ignore_missing=False, **filters + ) + self.network.delete_firewall_group( + firewall_group, ignore_missing=False + ) + except exceptions.NotFoundException: + self.log.debug( + 'Firewall group %s not found for deleting', name_or_id + ) + return False + return True + + # TODO(stephenfin): Deprecate 'filters'; users should use 'list' for this + def get_firewall_group(self, name_or_id, filters=None): + """ + Retrieves firewall group. + + :param name_or_id: firewall group name or id + :param dict filters: optional filters + :raises: DuplicateResource on multiple matches + :returns: A network ``FirewallGroup`` object if found, else None. + """ + if not filters: + filters = {} + return self.network.find_firewall_group( + name_or_id, ignore_missing=True, **filters + ) + + def list_firewall_groups(self, filters=None): + """ + Lists firewall groups. + + :returns: A list of network ``FirewallGroup`` objects. + """ + if not filters: + filters = {} + return list(self.network.firewall_groups(**filters)) + + @_utils.valid_kwargs( + 'admin_state_up', + 'description', + 'egress_firewall_policy', + 'ingress_firewall_policy', + 'name', + 'ports', + 'project_id', + 'shared', + ) + def update_firewall_group(self, name_or_id, filters=None, **kwargs): + """ + Updates firewall group. + To unset egress- or ingress firewall policy, set egress_firewall_policy + or ingress_firewall_policy to None. You can also set + egress_firewall_policy_id and ingress_firewall_policy_id directly, + which will skip the policy lookups. + + :param name_or_id: firewall group name or id + :param dict filters: optional filters + :param kwargs: firewall group update parameters + See create_firewall_group docstring for valid parameters. + :returns: The updated network ``FirewallGroup`` object. + :raises: BadRequestException if parameters are malformed + :raises: DuplicateResource on multiple matches + :raises: NotFoundException if firewall group, a firewall policy + (egress, ingress) or port is not found + """ + if not filters: + filters = {} + firewall_group = self.network.find_firewall_group( + name_or_id, ignore_missing=False, **filters + ) + self._lookup_ingress_egress_firewall_policy_ids(kwargs) + + if 'ports' in kwargs: + kwargs['ports'] = self._get_port_ids(kwargs['ports']) + return self.network.update_firewall_group(firewall_group, **kwargs) + + def _lookup_ingress_egress_firewall_policy_ids(self, firewall_group): + """ + Transforms firewall_group dict IN-PLACE. Takes the value of the keys + egress_firewall_policy and ingress_firewall_policy, looks up the + policy ids and maps them to egress_firewall_policy_id and + ingress_firewall_policy_id. Old keys which were used for the lookup + are deleted. + + :param dict firewall_group: firewall group dict + :raises: DuplicateResource on multiple matches + :raises: NotFoundException if a firewall policy is not found + """ + for key in ('egress_firewall_policy', 'ingress_firewall_policy'): + if key not in firewall_group: + continue + if firewall_group[key] is None: + val = None + else: + val = self.network.find_firewall_policy( + firewall_group[key], ignore_missing=False + )['id'] + firewall_group[key + '_id'] = val + del firewall_group[key] + + @_utils.valid_kwargs( + "name", "description", "shared", "default", "project_id" + ) + def create_qos_policy(self, **kwargs): + """Create a QoS policy. + + :param string name: Name of the QoS policy being created. + :param string description: Description of created QoS policy. + :param bool shared: Set the QoS policy as shared. + :param bool default: Set the QoS policy as default for project. + :param string project_id: Specify the project ID this QoS policy + will be created on (admin-only). + + :returns: The created network ``QosPolicy`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + default = kwargs.pop("default", None) + if default is not None: + if self._has_neutron_extension('qos-default'): + kwargs['is_default'] = default + else: + self.log.debug( + "'qos-default' extension is not available on target cloud" + ) + + return self.network.create_qos_policy(**kwargs) + + @_utils.valid_kwargs( + "name", "description", "shared", "default", "project_id" + ) + def update_qos_policy(self, name_or_id, **kwargs): + """Update an existing QoS policy. + + :param string name_or_id: Name or ID of the QoS policy to update. + :param string policy_name: The new name of the QoS policy. + :param string description: The new description of the QoS policy. + :param bool shared: If True, the QoS policy will be set as shared. + :param bool default: If True, the QoS policy will be set as default for + project. + + :returns: The updated network ``QosPolicyRule`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + default = kwargs.pop("default", None) + if default is not None: + if self._has_neutron_extension('qos-default'): + kwargs['is_default'] = default + else: + self.log.debug( + "'qos-default' extension is not available on target cloud" + ) + + if not kwargs: + self.log.debug("No QoS policy data to update") + return + + curr_policy = self.network.find_qos_policy( + name_or_id, ignore_missing=True + ) + if not curr_policy: + raise exceptions.SDKException( + f"QoS policy {name_or_id} not found." + ) + + return self.network.update_qos_policy(curr_policy, **kwargs) + + def delete_qos_policy(self, name_or_id): + """Delete a QoS policy. + + :param name_or_id: Name or ID of the policy being deleted. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + policy = self.network.find_qos_policy(name_or_id, ignore_missing=True) + if not policy: + self.log.debug("QoS policy %s not found for deleting", name_or_id) + return False + + self.network.delete_qos_policy(policy) + + return True + + # TODO(stephenfin): Deprecate this in favour of the 'list' function + def search_qos_bandwidth_limit_rules( + self, + policy_name_or_id, + rule_id=None, + filters=None, + ): + """Search QoS bandwidth limit rules + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rules should be associated. + :param string rule_id: ID of searched rule. + :param filters: a dict containing additional filters to use. e.g. + {'max_kbps': 1000} + + :returns: A list of network ``QoSBandwidthLimitRule`` objects matching + the search criteria. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + rules = self.list_qos_bandwidth_limit_rules(policy_name_or_id, filters) + return _utils._filter_list(rules, rule_id, filters) + + def list_qos_bandwidth_limit_rules(self, policy_name_or_id, filters=None): + """List all available QoS bandwidth limit rules. + + :param string policy_name_or_id: Name or ID of the QoS policy from + from rules should be listed. + :param filters: (optional) A dict of filter conditions to push down + :returns: A list of network ``QoSBandwidthLimitRule`` objects. + :raises: ``:class:`~openstack.exceptions.BadRequestException``` if QoS + policy will not be found. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=True + ) + if not policy: + raise exceptions.NotFoundException( + f"QoS policy {policy_name_or_id} not Found." + ) + + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + + return list( + self.network.qos_bandwidth_limit_rules( + qos_policy=policy, **filters + ) + ) + + def get_qos_bandwidth_limit_rule(self, policy_name_or_id, rule_id): + """Get a QoS bandwidth limit rule by name or ID. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule should be associated. + :param rule_id: ID of the rule. + :returns: A network ``QoSBandwidthLimitRule`` object if found, else + None. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=True + ) + if not policy: + raise exceptions.NotFoundException( + f"QoS policy {policy_name_or_id} not Found." + ) + + return self.network.get_qos_bandwidth_limit_rule(rule_id, policy) + + @_utils.valid_kwargs("max_burst_kbps", "direction") + def create_qos_bandwidth_limit_rule( + self, + policy_name_or_id, + max_kbps, + **kwargs, + ): + """Create a QoS bandwidth limit rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule should be associated. + :param int max_kbps: Maximum bandwidth limit value + (in kilobits per second). + :param int max_burst_kbps: Maximum burst value (in kilobits). + :param string direction: Ingress or egress. + The direction in which the traffic will be limited. + + :returns: The created network ``QoSBandwidthLimitRule`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=True + ) + if not policy: + raise exceptions.NotFoundException( + f"QoS policy {policy_name_or_id} not Found." + ) + + if kwargs.get("direction") is not None: + if not self._has_neutron_extension('qos-bw-limit-direction'): + kwargs.pop("direction") + self.log.debug( + "'qos-bw-limit-direction' extension is not available on " + "target cloud" + ) + + kwargs['max_kbps'] = max_kbps + + return self.network.create_qos_bandwidth_limit_rule(policy, **kwargs) + + @_utils.valid_kwargs("max_kbps", "max_burst_kbps", "direction") + def update_qos_bandwidth_limit_rule( + self, policy_name_or_id, rule_id, **kwargs + ): + """Update a QoS bandwidth limit rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule is associated. + :param string rule_id: ID of rule to update. + :param int max_kbps: Maximum bandwidth limit value + (in kilobits per second). + :param int max_burst_kbps: Maximum burst value (in kilobits). + :param string direction: Ingress or egress. + The direction in which the traffic will be limited. + + :returns: The updated network ``QoSBandwidthLimitRule`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=True + ) + if not policy: + raise exceptions.NotFoundException( + f"QoS policy {policy_name_or_id} not Found." + ) + + if kwargs.get("direction") is not None: + if not self._has_neutron_extension('qos-bw-limit-direction'): + kwargs.pop("direction") + self.log.debug( + "'qos-bw-limit-direction' extension is not available on " + "target cloud" + ) + + if not kwargs: + self.log.debug("No QoS bandwidth limit rule data to update") + return + + curr_rule = self.network.get_qos_bandwidth_limit_rule( + qos_rule=rule_id, qos_policy=policy + ) + if not curr_rule: + raise exceptions.SDKException( + "QoS bandwidth_limit_rule {rule_id} not found in policy " + "{policy_id}".format(rule_id=rule_id, policy_id=policy['id']) + ) + + return self.network.update_qos_bandwidth_limit_rule( + qos_rule=curr_rule, qos_policy=policy, **kwargs + ) + + def delete_qos_bandwidth_limit_rule(self, policy_name_or_id, rule_id): + """Delete a QoS bandwidth limit rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule is associated. + :param string rule_id: ID of rule to update. + + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=True + ) + if not policy: + raise exceptions.NotFoundException( + f"QoS policy {policy_name_or_id} not Found." + ) + + try: + self.network.delete_qos_bandwidth_limit_rule( + rule_id, policy, ignore_missing=False + ) + except exceptions.NotFoundException: + self.log.debug( + "QoS bandwidth limit rule {rule_id} not found in policy " + "{policy_id}. Ignoring.".format( + rule_id=rule_id, policy_id=policy['id'] + ) + ) + return False + + return True + + # TODO(stephenfin): Deprecate this in favour of the 'list' function + def search_qos_dscp_marking_rules( + self, + policy_name_or_id, + rule_id=None, + filters=None, + ): + """Search QoS DSCP marking rules + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rules should be associated. + :param string rule_id: ID of searched rule. + :param filters: a dict containing additional filters to use. e.g. + {'dscp_mark': 32} + + :returns: A list of network ``QoSDSCPMarkingRule`` objects matching the + search criteria. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + rules = self.list_qos_dscp_marking_rules(policy_name_or_id, filters) + return _utils._filter_list(rules, rule_id, filters) + + def list_qos_dscp_marking_rules(self, policy_name_or_id, filters=None): + """List all available QoS DSCP marking rules. + + :param string policy_name_or_id: Name or ID of the QoS policy from + from rules should be listed. + :param filters: (optional) A dict of filter conditions to push down + :returns: A list of network ``QoSDSCPMarkingRule`` objects. + :raises: ``:class:`~openstack.exceptions.BadRequestException``` if QoS + policy will not be found. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=True + ) + if not policy: + raise exceptions.NotFoundException( + f"QoS policy {policy_name_or_id} not Found." + ) + + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + + return list(self.network.qos_dscp_marking_rules(policy, **filters)) + + def get_qos_dscp_marking_rule(self, policy_name_or_id, rule_id): + """Get a QoS DSCP marking rule by name or ID. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule should be associated. + :param rule_id: ID of the rule. + :returns: A network ``QoSDSCPMarkingRule`` object if found, else None. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=True + ) + if not policy: + raise exceptions.NotFoundException( + f"QoS policy {policy_name_or_id} not Found." + ) + + return self.network.get_qos_dscp_marking_rule(rule_id, policy) + + def create_qos_dscp_marking_rule( + self, + policy_name_or_id, + dscp_mark, + ): + """Create a QoS DSCP marking rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule should be associated. + :param int dscp_mark: DSCP mark value + + :returns: The created network ``QoSDSCPMarkingRule`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=True + ) + if not policy: + raise exceptions.NotFoundException( + f"QoS policy {policy_name_or_id} not Found." + ) + + return self.network.create_qos_dscp_marking_rule( + policy, dscp_mark=dscp_mark + ) + + @_utils.valid_kwargs("dscp_mark") + def update_qos_dscp_marking_rule( + self, policy_name_or_id, rule_id, **kwargs + ): + """Update a QoS DSCP marking rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule is associated. + :param string rule_id: ID of rule to update. + :param int dscp_mark: DSCP mark value + + :returns: The updated network ``QoSDSCPMarkingRule`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=True + ) + if not policy: + raise exceptions.NotFoundException( + f"QoS policy {policy_name_or_id} not Found." + ) + + if not kwargs: + self.log.debug("No QoS DSCP marking rule data to update") + return + + curr_rule = self.network.get_qos_dscp_marking_rule(rule_id, policy) + if not curr_rule: + raise exceptions.SDKException( + "QoS dscp_marking_rule {rule_id} not found in policy " + "{policy_id}".format(rule_id=rule_id, policy_id=policy['id']) + ) + + return self.network.update_qos_dscp_marking_rule( + curr_rule, policy, **kwargs + ) + + def delete_qos_dscp_marking_rule(self, policy_name_or_id, rule_id): + """Delete a QoS DSCP marking rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule is associated. + :param string rule_id: ID of rule to update. + + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=True + ) + if not policy: + raise exceptions.NotFoundException( + f"QoS policy {policy_name_or_id} not Found." + ) + + try: + self.network.delete_qos_dscp_marking_rule( + rule_id, policy, ignore_missing=False + ) + except exceptions.NotFoundException: + self.log.debug( + "QoS DSCP marking rule {rule_id} not found in policy " + "{policy_id}. Ignoring.".format( + rule_id=rule_id, policy_id=policy['id'] + ) + ) + return False + + return True + + # TODO(stephenfin): Deprecate this in favour of the 'list' function + def search_qos_minimum_bandwidth_rules( + self, + policy_name_or_id, + rule_id=None, + filters=None, + ): + """Search QoS minimum bandwidth rules + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rules should be associated. + :param string rule_id: ID of searched rule. + :param filters: a dict containing additional filters to use. e.g. + {'min_kbps': 1000} + + :returns: A list of network ``QoSMinimumBandwidthRule`` objects + matching the search criteria. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + rules = self.list_qos_minimum_bandwidth_rules( + policy_name_or_id, filters + ) + return _utils._filter_list(rules, rule_id, filters) + + def list_qos_minimum_bandwidth_rules( + self, policy_name_or_id, filters=None + ): + """List all available QoS minimum bandwidth rules. + + :param string policy_name_or_id: Name or ID of the QoS policy from + from rules should be listed. + :param filters: (optional) A dict of filter conditions to push down + :returns: A list of network ``QoSMinimumBandwidthRule`` objects. + :raises: ``:class:`~openstack.exceptions.BadRequestException``` if QoS + policy will not be found. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=True + ) + if not policy: + raise exceptions.NotFoundException( + f"QoS policy {policy_name_or_id} not Found." + ) + + # Translate None from search interface to empty {} for kwargs below + if not filters: + filters = {} + + return list( + self.network.qos_minimum_bandwidth_rules(policy, **filters) + ) + + def get_qos_minimum_bandwidth_rule(self, policy_name_or_id, rule_id): + """Get a QoS minimum bandwidth rule by name or ID. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule should be associated. + :param rule_id: ID of the rule. + :returns: A network ``QoSMinimumBandwidthRule`` object if found, else + None. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=False + ) + + return self.network.get_qos_minimum_bandwidth_rule(rule_id, policy) + + @_utils.valid_kwargs("direction") + def create_qos_minimum_bandwidth_rule( + self, + policy_name_or_id, + min_kbps, + **kwargs, + ): + """Create a QoS minimum bandwidth limit rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule should be associated. + :param int min_kbps: Minimum bandwidth value (in kilobits per second). + :param string direction: Ingress or egress. + The direction in which the traffic will be available. + + :returns: The created network ``QoSMinimumBandwidthRule`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=False + ) + + kwargs['min_kbps'] = min_kbps + + return self.network.create_qos_minimum_bandwidth_rule(policy, **kwargs) + + @_utils.valid_kwargs("min_kbps", "direction") + def update_qos_minimum_bandwidth_rule( + self, policy_name_or_id, rule_id, **kwargs + ): + """Update a QoS minimum bandwidth rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule is associated. + :param string rule_id: ID of rule to update. + :param int min_kbps: Minimum bandwidth value (in kilobits per second). + :param string direction: Ingress or egress. + The direction in which the traffic will be available. + + :returns: The updated network ``QoSMinimumBandwidthRule`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=False + ) + + if not kwargs: + self.log.debug("No QoS minimum bandwidth rule data to update") + return + + curr_rule = self.network.get_qos_minimum_bandwidth_rule( + rule_id, policy + ) + if not curr_rule: + raise exceptions.SDKException( + "QoS minimum_bandwidth_rule {rule_id} not found in policy " + "{policy_id}".format(rule_id=rule_id, policy_id=policy['id']) + ) + + return self.network.update_qos_minimum_bandwidth_rule( + curr_rule, policy, **kwargs + ) + + def delete_qos_minimum_bandwidth_rule(self, policy_name_or_id, rule_id): + """Delete a QoS minimum bandwidth rule. + + :param string policy_name_or_id: Name or ID of the QoS policy to which + rule is associated. + :param string rule_id: ID of rule to delete. + + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not self._has_neutron_extension('qos'): + raise exc.OpenStackCloudUnavailableExtension( + 'QoS extension is not available on target cloud' + ) + + policy = self.network.find_qos_policy( + policy_name_or_id, ignore_missing=False + ) + + try: + self.network.delete_qos_minimum_bandwidth_rule( + rule_id, policy, ignore_missing=False + ) + except exceptions.NotFoundException: + self.log.debug( + "QoS minimum bandwidth rule {rule_id} not found in policy " + "{policy_id}. Ignoring.".format( + rule_id=rule_id, policy_id=policy['id'] + ) + ) + return False + + return True + + def add_router_interface(self, router, subnet_id=None, port_id=None): + """Attach a subnet to an internal router interface. + + Either a subnet ID or port ID must be specified for the internal + interface. Supplying both will result in an error. + + :param dict router: The dict object of the router being changed + :param string subnet_id: The ID of the subnet to use for the interface + :param string port_id: The ID of the port to use for the interface + + :returns: The raw response body from the request. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + return self.network.add_interface_to_router( + router=router, subnet_id=subnet_id, port_id=port_id + ) + + def remove_router_interface(self, router, subnet_id=None, port_id=None): + """Detach a subnet from an internal router interface. + + At least one of subnet_id or port_id must be supplied. + + If you specify both subnet and port ID, the subnet ID must + correspond to the subnet ID of the first IP address on the port + specified by the port ID. Otherwise an error occurs. + + :param dict router: The dict object of the router being changed + :param string subnet_id: The ID of the subnet to use for the interface + :param string port_id: The ID of the port to use for the interface + + :returns: None on success + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if not subnet_id and not port_id: + raise ValueError( + "At least one of subnet_id or port_id must be supplied." + ) + + self.network.remove_interface_from_router( + router=router, subnet_id=subnet_id, port_id=port_id + ) + + def list_router_interfaces(self, router, interface_type=None): + """List all interfaces for a router. + + :param dict router: A router dict object. + :param string interface_type: One of None, "internal", or "external". + Controls whether all, internal interfaces or external interfaces + are returned. + :returns: A list of network ``Port`` objects. + """ + # Find only router interface and gateway ports, ignore L3 HA ports etc. + ports = list(self.network.ports(device_id=router['id'])) + + router_interfaces = ( + [ + port + for port in ports + if ( + port['device_owner'] + in [ + 'network:router_interface', + 'network:router_interface_distributed', + 'network:ha_router_replicated_interface', + ] + ) + ] + if not interface_type or interface_type == 'internal' + else [] + ) + + router_gateways = ( + [ + port + for port in ports + if port['device_owner'] == 'network:router_gateway' + ] + if not interface_type or interface_type == 'external' + else [] + ) + + return router_interfaces + router_gateways + + def create_router( + self, + name=None, + admin_state_up=True, + ext_gateway_net_id=None, + enable_snat=None, + ext_fixed_ips=None, + project_id=None, + availability_zone_hints=None, + ): + """Create a logical router. + + :param string name: The router name. + :param bool admin_state_up: The administrative state of the router. + :param string ext_gateway_net_id: Network ID for the external gateway. + :param bool enable_snat: Enable Source NAT (SNAT) attribute. + :param ext_fixed_ips: + List of dictionaries of desired IP and/or subnet on the + external network. Example:: + + [ + { + "subnet_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b", + "ip_address": "192.168.10.2", + } + ] + + :param string project_id: Project ID for the router. + :param types.ListType availability_zone_hints: A list of availability + zone hints. + + :returns: The created network ``Router`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + router = {'admin_state_up': admin_state_up} + if project_id is not None: + router['project_id'] = project_id + if name: + router['name'] = name + ext_gw_info = self._build_external_gateway_info( + ext_gateway_net_id, enable_snat, ext_fixed_ips + ) + if ext_gw_info: + router['external_gateway_info'] = ext_gw_info + if availability_zone_hints is not None: + if not isinstance(availability_zone_hints, list): + raise exceptions.SDKException( + "Parameter 'availability_zone_hints' must be a list" + ) + if not self._has_neutron_extension('router_availability_zone'): + raise exc.OpenStackCloudUnavailableExtension( + 'router_availability_zone extension is not available on ' + 'target cloud' + ) + router['availability_zone_hints'] = availability_zone_hints + + return self.network.create_router(**router) + + def update_router( + self, + name_or_id, + name=None, + admin_state_up=None, + ext_gateway_net_id=None, + enable_snat=None, + ext_fixed_ips=None, + routes=None, + ): + """Update an existing logical router. + + :param string name_or_id: The name or UUID of the router to update. + :param string name: The new router name. + :param bool admin_state_up: The administrative state of the router. + :param string ext_gateway_net_id: + The network ID for the external gateway. + :param bool enable_snat: Enable Source NAT (SNAT) attribute. + :param ext_fixed_ips: + List of dictionaries of desired IP and/or subnet on the + external network. Example:: + + [ + { + "subnet_id": "8ca37218-28ff-41cb-9b10-039601ea7e6b", + "ip_address": "192.168.10.2", + } + ] + + :param list routes: + A list of dictionaries with destination and nexthop parameters. To + clear all routes pass an empty list ([]). + + Example:: + + [{"destination": "179.24.1.0/24", "nexthop": "172.24.3.99"}] + + :returns: The updated network ``Router`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + router = {} + if name: + router['name'] = name + if admin_state_up is not None: + router['admin_state_up'] = admin_state_up + ext_gw_info = self._build_external_gateway_info( + ext_gateway_net_id, enable_snat, ext_fixed_ips + ) + if ext_gw_info: + router['external_gateway_info'] = ext_gw_info + + if routes is not None: + if self._has_neutron_extension('extraroute'): + router['routes'] = routes + else: + self.log.warning( + 'extra routes extension is not available on target cloud' + ) + + if not router: + self.log.debug("No router data to update") + return + + curr_router = self.get_router(name_or_id) + if not curr_router: + raise exceptions.SDKException(f"Router {name_or_id} not found.") + + return self.network.update_router(curr_router, **router) + + def delete_router(self, name_or_id): + """Delete a logical router. + + If a name, instead of a unique UUID, is supplied, it is possible + that we could find more than one matching router since names are + not required to be unique. An error will be raised in this case. + + :param name_or_id: Name or ID of the router being deleted. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + router = self.network.find_router(name_or_id, ignore_missing=True) + if not router: + self.log.debug("Router %s not found for deleting", name_or_id) + return False + + self.network.delete_router(router) + + return True + + def create_subnet( + self, + network_name_or_id, + cidr=None, + ip_version=4, + enable_dhcp=False, + subnet_name=None, + tenant_id=None, + allocation_pools=None, + gateway_ip=None, + disable_gateway_ip=False, + dns_nameservers=None, + host_routes=None, + ipv6_ra_mode=None, + ipv6_address_mode=None, + prefixlen=None, + use_default_subnetpool=False, + subnetpool_name_or_id=None, + **kwargs, + ): + """Create a subnet on a specified network. + + :param string network_name_or_id: The unique name or ID of the attached + network. If a non-unique name is supplied, an exception is raised. + :param string cidr: The CIDR. Only one of ``cidr``, + ``use_default_subnetpool`` and ``subnetpool_name_or_id`` may be + specified at the same time. + :param int ip_version: The IP version, which is 4 or 6. + :param bool enable_dhcp: Set to ``True`` if DHCP is enabled and + ``False`` if disabled. Default is ``False``. + :param string subnet_name: The name of the subnet. + :param string tenant_id: The ID of the tenant who owns the network. + Only administrative users can specify a tenant ID other than their + own. + :param allocation_pools: A list of dictionaries of the start and end + addresses for the allocation pools. For example:: + + [{"start": "192.168.199.2", "end": "192.168.199.254"}] + + :param string gateway_ip: The gateway IP address. When you specify both + allocation_pools and gateway_ip, you must ensure that the gateway + IP does not overlap with the specified allocation pools. + :param bool disable_gateway_ip: Set to ``True`` if gateway IP address + is disabled and ``False`` if enabled. It is not allowed with + gateway_ip. Default is ``False``. + :param dns_nameservers: A list of DNS name servers for the subnet. For + example:: + + ["8.8.8.7", "8.8.8.8"] + + :param host_routes: A list of host route dictionaries for the subnet. + For example:: + + [ + {"destination": "0.0.0.0/0", "nexthop": "123.456.78.9"}, + {"destination": "192.168.0.0/24", "nexthop": "192.168.0.1"}, + ] + + :param string ipv6_ra_mode: IPv6 Router Advertisement mode. Valid + values are: 'dhcpv6-stateful', 'dhcpv6-stateless', or 'slaac'. + :param string ipv6_address_mode: IPv6 address mode. Valid values are: + 'dhcpv6-stateful', 'dhcpv6-stateless', or 'slaac'. + :param string prefixlen: The prefix length to use for subnet allocation + from a subnetpool. + :param bool use_default_subnetpool: Use the default subnetpool for + ``ip_version`` to obtain a CIDR. Only one of ``cidr``, + ``use_default_subnetpool`` and ``subnetpool_name_or_id`` may be + specified at the same time. + :param string subnetpool_name_or_id: The unique name or id of the + subnetpool to obtain a CIDR from. Only one of ``cidr``, + ``use_default_subnetpool`` and ``subnetpool_name_or_id`` may be + specified at the same time. + :param kwargs: Key value pairs to be passed to the Neutron API. + + :returns: The created network ``Subnet`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + + if tenant_id is not None: + filters = {'tenant_id': tenant_id} + else: + filters = None + + network = self.get_network(network_name_or_id, filters) + if not network: + raise exceptions.SDKException( + f"Network {network_name_or_id} not found." + ) + + if disable_gateway_ip and gateway_ip: + raise exceptions.SDKException( + 'arg:disable_gateway_ip is not allowed with arg:gateway_ip' + ) + + uses_subnetpool = use_default_subnetpool or subnetpool_name_or_id + if not cidr and not uses_subnetpool: + raise exceptions.SDKException( + 'arg:cidr is required when a subnetpool is not used' + ) + + if cidr and uses_subnetpool: + raise exceptions.SDKException( + 'arg:cidr and subnetpool may not be used at the same time' + ) + + if use_default_subnetpool and subnetpool_name_or_id: + raise exceptions.SDKException( + 'arg:use_default_subnetpool and arg:subnetpool_id may not be ' + 'used at the same time' + ) + + subnetpool = None + if subnetpool_name_or_id: + subnetpool = self.get_subnetpool(subnetpool_name_or_id) + if not subnetpool: + raise exceptions.SDKException( + f"Subnetpool {subnetpool_name_or_id} not found." + ) + + # Be friendly on ip_version and allow strings + if isinstance(ip_version, str): + try: + ip_version = int(ip_version) + except ValueError: + raise exceptions.SDKException('ip_version must be an integer') + + # The body of the neutron message for the subnet we wish to create. + # This includes attributes that are required or have defaults. + subnet = dict( + { + 'network_id': network['id'], + 'ip_version': ip_version, + 'enable_dhcp': enable_dhcp, + }, + **kwargs, + ) + + # Add optional attributes to the message. + if cidr: + subnet['cidr'] = cidr + if subnet_name: + subnet['name'] = subnet_name + if tenant_id: + subnet['tenant_id'] = tenant_id + if allocation_pools: + subnet['allocation_pools'] = allocation_pools + if gateway_ip: + subnet['gateway_ip'] = gateway_ip + if disable_gateway_ip: + subnet['gateway_ip'] = None + if dns_nameservers: + subnet['dns_nameservers'] = dns_nameservers + if host_routes: + subnet['host_routes'] = host_routes + if ipv6_ra_mode: + subnet['ipv6_ra_mode'] = ipv6_ra_mode + if ipv6_address_mode: + subnet['ipv6_address_mode'] = ipv6_address_mode + if prefixlen: + subnet['prefixlen'] = prefixlen + if use_default_subnetpool: + subnet['use_default_subnetpool'] = True + if subnetpool: + subnet['subnetpool_id'] = subnetpool["id"] + + return self.network.create_subnet(**subnet) + + def delete_subnet(self, name_or_id): + """Delete a subnet. + + If a name, instead of a unique UUID, is supplied, it is possible + that we could find more than one matching subnet since names are + not required to be unique. An error will be raised in this case. + + :param name_or_id: Name or ID of the subnet being deleted. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + subnet = self.network.find_subnet(name_or_id, ignore_missing=True) + if not subnet: + self.log.debug("Subnet %s not found for deleting", name_or_id) + return False + + self.network.delete_subnet(subnet) + + return True + + def update_subnet( + self, + name_or_id, + subnet_name=None, + enable_dhcp=None, + gateway_ip=None, + disable_gateway_ip=None, + allocation_pools=None, + dns_nameservers=None, + host_routes=None, + ): + """Update an existing subnet. + + :param string name_or_id: Name or ID of the subnet to update. + :param string subnet_name: The new name of the subnet. + :param bool enable_dhcp: Set to ``True`` if DHCP is enabled and + ``False`` if disabled. + :param string gateway_ip: The gateway IP address. When you specify both + allocation_pools and gateway_ip, you must ensure that the gateway + IP does not overlap with the specified allocation pools. + :param bool disable_gateway_ip: Set to ``True`` if gateway IP address + is disabled and ``False`` if enabled. It is not allowed with + gateway_ip. Default is ``False``. + :param allocation_pools: A list of dictionaries of the start and end + addresses for the allocation pools. For example:: + + [{"start": "192.168.199.2", "end": "192.168.199.254"}] + + :param dns_nameservers: A list of DNS name servers for the subnet. For + example:: + + ["8.8.8.7", "8.8.8.8"] + + :param host_routes: A list of host route dictionaries for the subnet. + For example:: + + [ + {"destination": "0.0.0.0/0", "nexthop": "123.456.78.9"}, + {"destination": "192.168.0.0/24", "nexthop": "192.168.0.1"}, + ] + + :returns: The updated network ``Subnet`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + subnet = {} + if subnet_name: + subnet['name'] = subnet_name + if enable_dhcp is not None: + subnet['enable_dhcp'] = enable_dhcp + if gateway_ip: + subnet['gateway_ip'] = gateway_ip + if disable_gateway_ip: + subnet['gateway_ip'] = None + if allocation_pools: + subnet['allocation_pools'] = allocation_pools + if dns_nameservers: + subnet['dns_nameservers'] = dns_nameservers + if host_routes: + subnet['host_routes'] = host_routes + + if not subnet: + self.log.debug("No subnet data to update") + return + + if disable_gateway_ip and gateway_ip: + raise exceptions.SDKException( + 'arg:disable_gateway_ip is not allowed with arg:gateway_ip' + ) + + curr_subnet = self.get_subnet(name_or_id) + if not curr_subnet: + raise exceptions.SDKException(f"Subnet {name_or_id} not found.") + + return self.network.update_subnet(curr_subnet, **subnet) + + @_utils.valid_kwargs( + 'name', + 'admin_state_up', + 'mac_address', + 'fixed_ips', + 'security_groups', + 'allowed_address_pairs', + 'extra_dhcp_opts', + 'device_owner', + 'device_id', + 'binding:vnic_type', + 'binding:profile', + 'port_security_enabled', + 'qos_policy_id', + 'binding:host_id', + 'project_id', + 'description', + 'dns_domain', + 'dns_name', + 'numa_affinity_policy', + 'propagate_uplink_status', + 'mac_learning_enabled', + ) + def create_port(self, network_id, **kwargs): + """Create a port + + :param network_id: The ID of the network. (Required) + :param name: A symbolic name for the port. (Optional) + :param admin_state_up: The administrative status of the port, + which is up (true, default) or down (false). (Optional) + :param mac_address: The MAC address. (Optional) + :param fixed_ips: List of ip_addresses and subnet_ids. See subnet_id + and ip_address. (Optional) For example:: + + [ + { + "ip_address": "10.29.29.13", + "subnet_id": "a78484c4-c380-4b47-85aa-21c51a2d8cbd", + }, + ..., + ] + + where + subnet_id: If you specify only a subnet ID, + OpenStack Networking allocates an available IP + from that subnet to the port. + ip_address: (Optional) If you specify both a subnet ID and + an IP address, OpenStack Networking tries to allocate + the specified address to the port. + :param security_groups: List of security group UUIDs. (Optional) + :param allowed_address_pairs: Allowed address pairs list (Optional) + For example:: + + [ + { + "ip_address": "23.23.23.1", + "mac_address": "fa:16:3e:c4:cd:3f", + }, + ..., + ] + + :param extra_dhcp_opts: Extra DHCP options. (Optional). + For example:: + + [{"opt_name": "opt name1", "opt_value": "value1"}, ...] + + :param device_owner: The ID of the entity that uses this port. + For example, a DHCP agent. (Optional) + :param device_id: The ID of the device that uses this port. + For example, a virtual server. (Optional) + :param binding vnic_type: The type of the created port. (Optional) + :param port_security_enabled: The security port state created on + the network. (Optional) + :param qos_policy_id: The ID of the QoS policy to apply for + port. (Optional) + :param project_id: The project in which to create the port. (Optional) + :param description: Description of the port. (Optional) + :param dns_domain: DNS domain relevant for the port. (Optional) + :param dns_name: DNS name of the port. (Optional) + :param numa_affinity_policy: the numa affinitiy policy. May be + "None", "required", "preferred" or "legacy". (Optional) + :param propagate_uplink_status: If the uplink status of the port should + be propagated. (Optional) + :param mac_learning_enabled: If mac learning should be enabled on the + port. (Optional) + + :returns: The created network ``Port`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + kwargs['network_id'] = network_id + + return self.network.create_port(**kwargs) + + @_utils.valid_kwargs( + 'name', + 'admin_state_up', + 'fixed_ips', + 'security_groups', + 'allowed_address_pairs', + 'extra_dhcp_opts', + 'device_owner', + 'device_id', + 'binding:vnic_type', + 'binding:profile', + 'port_security_enabled', + 'qos_policy_id', + 'binding:host_id', + ) + def update_port(self, name_or_id, **kwargs): + """Update a port + + Note: to unset an attribute use None value. To leave an attribute + untouched just omit it. + + :param name_or_id: name or ID of the port to update. (Required) + :param name: A symbolic name for the port. (Optional) + :param admin_state_up: The administrative status of the port, + which is up (true) or down (false). (Optional) + :param fixed_ips: List of ip_addresses and subnet_ids. (Optional) + If you specify only a subnet ID, OpenStack Networking allocates + an available IP from that subnet to the port. + If you specify both a subnet ID and an IP address, OpenStack + Networking tries to allocate the specified address to the port. + For example:: + + [ + { + "ip_address": "10.29.29.13", + "subnet_id": "a78484c4-c380-4b47-85aa-21c51a2d8cbd", + }, + ..., + ] + + :param security_groups: List of security group UUIDs. (Optional) + :param allowed_address_pairs: Allowed address pairs list (Optional) + For example:: + + [ + { + "ip_address": "23.23.23.1", + "mac_address": "fa:16:3e:c4:cd:3f", + }, + ..., + ] + + :param extra_dhcp_opts: Extra DHCP options. (Optional). + For example:: + + [{"opt_name": "opt name1", "opt_value": "value1"}, ...] + + :param device_owner: The ID of the entity that uses this port. + For example, a DHCP agent. (Optional) + :param device_id: The ID of the resource this port is attached to. + :param binding vnic_type: The type of the created port. (Optional) + :param port_security_enabled: The security port state created on + the network. (Optional) + :param qos_policy_id: The ID of the QoS policy to apply for port. + + :returns: The updated network ``Port`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + port = self.get_port(name_or_id=name_or_id) + if port is None: + raise exceptions.SDKException( + f"failed to find port '{name_or_id}'" + ) + + return self.network.update_port(port, **kwargs) + + def delete_port(self, name_or_id): + """Delete a port + + :param name_or_id: ID or name of the port to delete. + + :returns: True if delete succeeded, False otherwise. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + port = self.network.find_port(name_or_id, ignore_missing=True) + if port is None: + self.log.debug("Port %s not found for deleting", name_or_id) + return False + + self.network.delete_port(port) + + return True + + def _get_port_ids(self, name_or_id_list, filters=None): + """ + Takes a list of port names or ids, retrieves ports and returns a list + with port ids only. + + :param list[str] name_or_id_list: list of port names or ids + :param dict filters: optional filters + :raises: SDKException on multiple matches + :raises: NotFoundException if a port is not found + :return: list of port ids + :rtype: list[str] + """ + ids_list = [] + for name_or_id in name_or_id_list: + port = self.get_port(name_or_id, filters) + if not port: + raise exceptions.NotFoundException( + f'Port {name_or_id} not found' + ) + ids_list.append(port['id']) + return ids_list + + def _build_external_gateway_info( + self, ext_gateway_net_id, enable_snat, ext_fixed_ips + ): + info = {} + if ext_gateway_net_id: + info['network_id'] = ext_gateway_net_id + # Only send enable_snat if it is explicitly set. + if enable_snat is not None: + info['enable_snat'] = enable_snat + if ext_fixed_ips: + info['external_fixed_ips'] = ext_fixed_ips + if info: + return info + return None diff --git a/openstack/cloud/_network_common.py b/openstack/cloud/_network_common.py new file mode 100644 index 0000000000..98a03de378 --- /dev/null +++ b/openstack/cloud/_network_common.py @@ -0,0 +1,2235 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ipaddress +import threading +import time +import warnings + +from openstack.cloud import _utils +from openstack.cloud import exc +from openstack.cloud import meta +from openstack.cloud import openstackcloud +from openstack import exceptions +from openstack import proxy +from openstack import utils +from openstack import warnings as os_warnings + + +class NetworkCommonCloudMixin(openstackcloud._OpenStackCloudMixin): + """Shared networking functions used by Network and Compute classes.""" + + def __init__(self, *args, **kwargs): + super().__init__(*args, **kwargs) + + self._external_ipv4_names = self.config.get_external_ipv4_networks() + self._internal_ipv4_names = self.config.get_internal_ipv4_networks() + self._external_ipv6_names = self.config.get_external_ipv6_networks() + self._internal_ipv6_names = self.config.get_internal_ipv6_networks() + self._nat_destination = self.config.get_nat_destination() + self._nat_source = self.config.get_nat_source() + self._default_network = self.config.get_default_network() + + self._use_external_network = self.config.config.get( + 'use_external_network', True + ) + self._use_internal_network = self.config.config.get( + 'use_internal_network', True + ) + + self._networks_lock = threading.Lock() + self._reset_network_caches() + + self.private = self.config.config.get('private', False) + + self._floating_ip_source = self.config.config.get('floating_ip_source') + if self._floating_ip_source: + if self._floating_ip_source.lower() == 'none': + self._floating_ip_source = None + else: + self._floating_ip_source = self._floating_ip_source.lower() + + self.secgroup_source = self.config.config['secgroup_source'] + + # networks + + def use_external_network(self): + return self._use_external_network + + def use_internal_network(self): + return self._use_internal_network + + def _reset_network_caches(self): + # Variables to prevent us from going through the network finding + # logic again if we've done it once. This is different from just + # the cached value, since "None" is a valid value to find. + with self._networks_lock: + self._external_ipv4_networks = [] + self._external_ipv4_floating_networks = [] + self._internal_ipv4_networks = [] + self._external_ipv6_networks = [] + self._internal_ipv6_networks = [] + self._nat_destination_network = None + self._nat_source_network = None + self._default_network_network = None + self._network_list_stamp = False + + def _set_interesting_networks(self): + external_ipv4_networks = [] + external_ipv4_floating_networks = [] + internal_ipv4_networks = [] + external_ipv6_networks = [] + internal_ipv6_networks = [] + nat_destination = None + nat_source = None + default_network = None + + all_subnets = None + + # Filter locally because we have an or condition + try: + # TODO(mordred): Rackspace exposes neutron but it does not + # work. I think that overriding what the service catalog + # reports should be a thing os-client-config should handle + # in a vendor profile - but for now it does not. That means + # this search_networks can just totally fail. If it does + # though, that's fine, clearly the neutron introspection is + # not going to work. + if self.has_service('network'): + all_networks = list(self.network.networks()) + else: + all_networks = [] + except exceptions.SDKException: + self._network_list_stamp = True + return + + for network in all_networks: + # External IPv4 networks + if ( + network['name'] in self._external_ipv4_names + or network['id'] in self._external_ipv4_names + ): + external_ipv4_networks.append(network) + elif ( + ( + network.is_router_external + or network.provider_physical_network + ) + and network['name'] not in self._internal_ipv4_names + and network['id'] not in self._internal_ipv4_names + ): + external_ipv4_networks.append(network) + + # Internal networks + if ( + network['name'] in self._internal_ipv4_names + or network['id'] in self._internal_ipv4_names + ): + internal_ipv4_networks.append(network) + elif ( + not network.is_router_external + and not network.provider_physical_network + and network['name'] not in self._external_ipv4_names + and network['id'] not in self._external_ipv4_names + ): + internal_ipv4_networks.append(network) + + # External networks + if ( + network['name'] in self._external_ipv6_names + or network['id'] in self._external_ipv6_names + ): + external_ipv6_networks.append(network) + elif ( + network.is_router_external + and network['name'] not in self._internal_ipv6_names + and network['id'] not in self._internal_ipv6_names + ): + external_ipv6_networks.append(network) + + # Internal networks + if ( + network['name'] in self._internal_ipv6_names + or network['id'] in self._internal_ipv6_names + ): + internal_ipv6_networks.append(network) + elif ( + not network.is_router_external + and network['name'] not in self._external_ipv6_names + and network['id'] not in self._external_ipv6_names + ): + internal_ipv6_networks.append(network) + + # External Floating IPv4 networks + if self._nat_source in (network['name'], network['id']): + if nat_source: + raise exceptions.SDKException( + 'Multiple networks were found matching ' + f'{self._nat_source} which is the network configured ' + 'to be the NAT source. Please check your ' + 'cloud resources. It is probably a good idea ' + 'to configure this network by ID rather than ' + 'by name.' + ) + external_ipv4_floating_networks.append(network) + nat_source = network + elif self._nat_source is None: + if network.is_router_external: + external_ipv4_floating_networks.append(network) + nat_source = nat_source or network + + # NAT Destination + if self._nat_destination in (network['name'], network['id']): + if nat_destination: + raise exceptions.SDKException( + f'Multiple networks were found matching ' + f'{self._nat_destination} which is the network ' + f'configured to be the NAT destination. Please check ' + f'your cloud resources. It is probably a good idea ' + f'to configure this network by ID rather than ' + f'by name.' + ) + nat_destination = network + elif self._nat_destination is None: + # TODO(mordred) need a config value for floating + # ips for this cloud so that we can skip this + # No configured nat destination, we have to figured + # it out. + if all_subnets is None: + try: + if self.has_service('network'): + all_subnets = list(self.network.subnets()) + else: + all_subnets = [] + except exceptions.SDKException: + # Thanks Rackspace broken neutron + all_subnets = [] + + for subnet in all_subnets: + # TODO(mordred) trap for detecting more than + # one network with a gateway_ip without a config + if ( + 'gateway_ip' in subnet + and subnet['gateway_ip'] + and network['id'] == subnet['network_id'] + ): + nat_destination = network + break + + # Default network + if self._default_network in (network['name'], network['id']): + if default_network: + raise exceptions.SDKException( + 'Multiple networks were found matching ' + f'{self._default_network} which is the network ' + 'configured to be the default interface ' + 'network. Please check your cloud resources. ' + 'It is probably a good idea ' + 'to configure this network by ID rather than ' + 'by name.' + ) + default_network = network + + # Validate config vs. reality + for net_name in self._external_ipv4_names: + if net_name not in [net['name'] for net in external_ipv4_networks]: + raise exceptions.SDKException( + f"Networks: {net_name} was provided for external IPv4 " + "access and those networks could not be found" + ) + + for net_name in self._internal_ipv4_names: + if net_name not in [net['name'] for net in internal_ipv4_networks]: + raise exceptions.SDKException( + f"Networks: {net_name} was provided for internal IPv4 " + "access and those networks could not be found" + ) + + for net_name in self._external_ipv6_names: + if net_name not in [net['name'] for net in external_ipv6_networks]: + raise exceptions.SDKException( + f"Networks: {net_name} was provided for external IPv6 " + "access and those networks could not be found" + ) + + for net_name in self._internal_ipv6_names: + if net_name not in [net['name'] for net in internal_ipv6_networks]: + raise exceptions.SDKException( + f"Networks: {net_name} was provided for internal IPv6 " + "access and those networks could not be found" + ) + + if self._nat_destination and not nat_destination: + raise exceptions.SDKException( + f'Network {self._nat_destination} was configured to be the ' + 'destination for inbound NAT but it could not be ' + 'found' + ) + + if self._nat_source and not nat_source: + raise exceptions.SDKException( + f'Network {self._nat_source} was configured to be the ' + 'source for inbound NAT but it could not be ' + 'found' + ) + + if self._default_network and not default_network: + raise exceptions.SDKException( + f'Network {self._default_network} was configured to be the ' + 'default network interface but it could not be ' + 'found' + ) + + self._external_ipv4_networks = external_ipv4_networks + self._external_ipv4_floating_networks = external_ipv4_floating_networks + self._internal_ipv4_networks = internal_ipv4_networks + self._external_ipv6_networks = external_ipv6_networks + self._internal_ipv6_networks = internal_ipv6_networks + self._nat_destination_network = nat_destination + self._nat_source_network = nat_source + self._default_network_network = default_network + + def _find_interesting_networks(self): + if self._networks_lock.acquire(): + try: + if self._network_list_stamp: + return + if ( + not self._use_external_network + and not self._use_internal_network + ): + # Both have been flagged as skip - don't do a list + return + if not self.has_service('network'): + return + self._set_interesting_networks() + self._network_list_stamp = True + finally: + self._networks_lock.release() + + def get_nat_destination(self): + """Return the network that is configured to be the NAT destination. + + :returns: A network ``Network`` object if one is found + """ + self._find_interesting_networks() + return self._nat_destination_network + + def get_nat_source(self): + """Return the network that is configured to be the NAT destination. + + :returns: A network ``Network`` object if one is found + """ + self._find_interesting_networks() + return self._nat_source_network + + def get_default_network(self): + """Return the network that is configured to be the default interface. + + :returns: A network ``Network`` object if one is found + """ + self._find_interesting_networks() + return self._default_network_network + + def get_external_networks(self): + """Return the networks that are configured to route northbound. + + This should be avoided in favor of the specific ipv4/ipv6 method, + but is here for backwards compatibility. + + :returns: A list of network ``Network`` objects if any are found + """ + self._find_interesting_networks() + return list(self._external_ipv4_networks) + list( + self._external_ipv6_networks + ) + + def get_internal_networks(self): + """Return the networks that are configured to not route northbound. + + This should be avoided in favor of the specific ipv4/ipv6 method, + but is here for backwards compatibility. + + :returns: A list of network ``Network`` objects if any are found + """ + self._find_interesting_networks() + return list(self._internal_ipv4_networks) + list( + self._internal_ipv6_networks + ) + + def get_external_ipv4_networks(self): + """Return the networks that are configured to route northbound. + + :returns: A list of network ``Network`` objects if any are found + """ + self._find_interesting_networks() + return self._external_ipv4_networks + + def get_external_ipv4_floating_networks(self): + """Return the networks that are configured to route northbound. + + :returns: A list of network ``Network`` objects if any are found + """ + self._find_interesting_networks() + return self._external_ipv4_floating_networks + + def get_internal_ipv4_networks(self): + """Return the networks that are configured to not route northbound. + + :returns: A list of network ``Network`` objects if any are found + """ + self._find_interesting_networks() + return self._internal_ipv4_networks + + def get_external_ipv6_networks(self): + """Return the networks that are configured to route northbound. + + :returns: A list of network ``Network`` objects if any are found + """ + self._find_interesting_networks() + return self._external_ipv6_networks + + def get_internal_ipv6_networks(self): + """Return the networks that are configured to not route northbound. + + :returns: A list of network ``Network`` objects if any are found + """ + self._find_interesting_networks() + return self._internal_ipv6_networks + + # floating IPs + + def search_floating_ip_pools(self, name=None, filters=None): + pools = self.list_floating_ip_pools() + return _utils._filter_list(pools, name, filters) + + # With Neutron, there are some cases in which full server side filtering is + # not possible (e.g. nested attributes or list of objects) so we also need + # to use the client-side filtering + # The same goes for all neutron-related search/get methods! + def search_floating_ips(self, id=None, filters=None): + # `filters` could be a jmespath expression which Neutron server doesn't + # understand, obviously. + warnings.warn( + "search_floating_ips is deprecated. Use search_resource instead.", + os_warnings.RemovedInSDK50Warning, + ) + if self._use_neutron_floating() and isinstance(filters, dict): + return list(self.network.ips(**filters)) + else: + floating_ips = self.list_floating_ips() + return _utils._filter_list(floating_ips, id, filters) + + def _neutron_list_floating_ips(self, filters=None): + if not filters: + filters = {} + data = list(self.network.ips(**filters)) + return data + + def _nova_list_floating_ips(self): + try: + data = proxy._json_response(self.compute.get('/os-floating-ips')) + except exceptions.NotFoundException: + return [] + return self._get_and_munchify('floating_ips', data) + + def get_floating_ip(self, id, filters=None): + """Get a floating IP by ID + + :param id: ID of the floating IP. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A floating IP ``openstack.network.v2.floating_ip.FloatingIP`` + or None if no matching floating IP is found. + + """ + return _utils._get_entity(self, 'floating_ip', id, filters) + + def list_floating_ips(self, filters=None): + """List all available floating IPs. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of floating IP + ``openstack.network.v2.floating_ip.FloatingIP``. + """ + if not filters: + filters = {} + + if self._use_neutron_floating(): + try: + return self._neutron_list_floating_ips(filters) + except exceptions.NotFoundException as e: + # Nova-network don't support server-side floating ips + # filtering, so it's safer to return an empty list than + # to fallback to Nova which may return more results that + # expected. + if filters: + self.log.error( + "Neutron returned NotFound for floating IPs, which " + "means this cloud doesn't have neutron floating ips. " + "openstacksdk can't fallback to trying Nova since " + "nova doesn't support server-side filtering when " + "listing floating ips and filters were given. " + "If you do not think openstacksdk should be " + "attempting to list floating IPs on neutron, it is " + "possible to control the behavior by setting " + "floating_ip_source to 'nova' or None for cloud " + "%(cloud)r in 'clouds.yaml'.", + { + 'cloud': self.name, + }, + ) + # We can't fallback to nova because we push-down filters. + # We got a 404 which means neutron doesn't exist. If the + # user + return [] + + self.log.debug( + "Something went wrong talking to neutron API: " + "'%(msg)s'. Trying with Nova.", + {'msg': str(e)}, + ) + # Fall-through, trying with Nova + else: + if filters: + raise ValueError( + "nova-network doesn't support server-side floating IPs " + "filtering. Use the 'search_floating_ips' method instead" + ) + + floating_ips = self._nova_list_floating_ips() + return self._normalize_floating_ips(floating_ips) + + def list_floating_ip_pools(self): + """List all available floating IP pools. + + NOTE: This function supports the nova-net view of the world. nova-net + has been deprecated, so it's highly recommended to switch to using + neutron. `get_external_ipv4_floating_networks` is what you should + almost certainly be using. + + :returns: A list of floating IP pool objects + """ + data = proxy._json_response( + self.compute.get('os-floating-ip-pools'), + error_message="Error fetching floating IP pool list", + ) + pools = self._get_and_munchify('floating_ip_pools', data) + return [{'name': p['name']} for p in pools] + + def get_floating_ip_by_id(self, id): + """Get a floating ip by ID + + :param id: ID of the floating ip. + :returns: A floating ip + `:class:`~openstack.network.v2.floating_ip.FloatingIP`. + """ + error_message = f"Error getting floating ip with ID {id}" + + if self._use_neutron_floating(): + fip = self.network.get_ip(id) + return fip + else: + data = proxy._json_response( + self.compute.get(f'/os-floating-ips/{id}'), + error_message=error_message, + ) + return self._normalize_floating_ip( + self._get_and_munchify('floating_ip', data) + ) + + def _neutron_available_floating_ips( + self, network=None, project_id=None, server=None + ): + """Get a floating IP from a network. + + Return a list of available floating IPs or allocate a new one and + return it in a list of 1 element. + + :param network: A single network name or ID, or a list of them. + :param server: (server) Server the Floating IP is for + + :returns: a list of floating IP addresses. + :raises: :class:`~openstack.exceptions.BadRequestException` if an + external network that meets the specified criteria cannot be found. + """ + if project_id is None: + # Make sure we are only listing floatingIPs allocated the current + # tenant. This is the default behaviour of Nova + project_id = self.current_project_id + + if network: + if isinstance(network, str): + network = [network] + + # Use given list to get first matching external network + floating_network_id = None + for net in network: + for ext_net in self.get_external_ipv4_floating_networks(): + if net in (ext_net['name'], ext_net['id']): + floating_network_id = ext_net['id'] + break + if floating_network_id: + break + + if floating_network_id is None: + raise exceptions.NotFoundException( + f"unable to find external network {network}" + ) + else: + floating_network_id = self._get_floating_network_id() + + filters = { + 'port_id': None, + 'floating_network_id': floating_network_id, + 'project_id': project_id, + } + + floating_ips = self.list_floating_ips() + available_ips = _utils._filter_list( + floating_ips, name_or_id=None, filters=filters + ) + if available_ips: + return available_ips + + # No available IP found or we didn't try + # allocate a new Floating IP + f_ip = self._neutron_create_floating_ip( + network_id=floating_network_id, server=server + ) + + return [f_ip] + + def _nova_available_floating_ips(self, pool=None): + """Get available floating IPs from a floating IP pool. + + Return a list of available floating IPs or allocate a new one and + return it in a list of 1 element. + + :param pool: Nova floating IP pool name. + + :returns: a list of floating IP addresses. + :raises: :class:`~openstack.exceptions.BadRequestException` if a + floating IP pool is not specified and cannot be found. + """ + + with _utils.openstacksdk_exceptions( + f"Unable to create floating IP in pool {pool}" + ): + if pool is None: + pools = self.list_floating_ip_pools() + if not pools: + raise exceptions.NotFoundException( + "unable to find a floating ip pool" + ) + pool = pools[0]['name'] + + filters = {'instance_id': None, 'pool': pool} + + floating_ips = self._nova_list_floating_ips() + available_ips = _utils._filter_list( + floating_ips, name_or_id=None, filters=filters + ) + if available_ips: + return available_ips + + # No available IP found or we did not try. + # Allocate a new Floating IP + f_ip = self._nova_create_floating_ip(pool=pool) + + return [f_ip] + + def _find_floating_network_by_router(self): + """Find the network providing floating ips by looking at routers.""" + for router in self.network.routers(): + if router['admin_state_up']: + network_id = router.get('external_gateway_info', {}).get( + 'network_id' + ) + if network_id: + return network_id + + def available_floating_ip(self, network=None, server=None): + """Get a floating IP from a network or a pool. + + Return the first available floating IP or allocate a new one. + + :param network: Name or ID of the network. + :param server: Server the IP is for if known + + :returns: a (normalized) structure with a floating IP address + description. + """ + if self._use_neutron_floating(): + try: + f_ips = self._neutron_available_floating_ips( + network=network, server=server + ) + return f_ips[0] + except exceptions.NotFoundException as e: + self.log.debug( + "Something went wrong talking to neutron API: " + "'%(msg)s'. Trying with Nova.", + {'msg': str(e)}, + ) + # Fall-through, trying with Nova + + f_ips = self._normalize_floating_ips( + self._nova_available_floating_ips(pool=network) + ) + return f_ips[0] + + def _get_floating_network_id(self): + # Get first existing external IPv4 network + networks = self.get_external_ipv4_floating_networks() + if networks: + floating_network_id = networks[0]['id'] + else: + floating_network = self._find_floating_network_by_router() + if floating_network: + floating_network_id = floating_network + else: + raise exceptions.NotFoundException( + "unable to find an external network" + ) + return floating_network_id + + def create_floating_ip( + self, + network=None, + server=None, + fixed_address=None, + nat_destination=None, + port=None, + wait=False, + timeout=60, + ): + """Allocate a new floating IP from a network or a pool. + + :param network: Name or ID of the network + that the floating IP should come from. + :param server: (optional) Server dict for the server to create + the IP for and to which it should be attached. + :param fixed_address: (optional) Fixed IP to attach the floating + ip to. + :param nat_destination: (optional) Name or ID of the network + that the fixed IP to attach the floating + IP to should be on. + :param port: (optional) The port ID that the floating IP should be + attached to. Specifying a port conflicts + with specifying a server, fixed_address or + nat_destination. + :param wait: (optional) Whether to wait for the IP to be active. + Defaults to False. Only applies if a server is + provided. + :param timeout: (optional) How long to wait for the IP to be active. + Defaults to 60. Only applies if a server is + provided. + + :returns: a floating IP address + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if self._use_neutron_floating(): + try: + return self._neutron_create_floating_ip( + network_name_or_id=network, + server=server, + fixed_address=fixed_address, + nat_destination=nat_destination, + port=port, + wait=wait, + timeout=timeout, + ) + except exceptions.NotFoundException as e: + self.log.debug( + "Something went wrong talking to neutron API: " + "'%(msg)s'. Trying with Nova.", + {'msg': str(e)}, + ) + # Fall-through, trying with Nova + + if port: + raise exceptions.SDKException( + "This cloud uses nova-network which does not support " + "arbitrary floating-ip/port mappings. Please nudge " + "your cloud provider to upgrade the networking stack " + "to neutron, or alternately provide the server, " + "fixed_address and nat_destination arguments as appropriate" + ) + # Else, we are using Nova network + f_ips = self._normalize_floating_ips( + [self._nova_create_floating_ip(pool=network)] + ) + return f_ips[0] + + def _submit_create_fip(self, kwargs): + # Split into a method to aid in test mocking + return self.network.create_ip(**kwargs) + + def _neutron_create_floating_ip( + self, + network_name_or_id=None, + server=None, + fixed_address=None, + nat_destination=None, + port=None, + wait=False, + timeout=60, + network_id=None, + ): + if not network_id: + if network_name_or_id: + try: + network = self.network.find_network( + network_name_or_id, ignore_missing=False + ) + except exceptions.NotFoundException: + raise exceptions.NotFoundException( + "unable to find network for floating ips with ID " + f"{network_name_or_id}" + ) + network_id = network['id'] + else: + network_id = self._get_floating_network_id() + kwargs = { + 'floating_network_id': network_id, + } + if not port: + if server: + (port_obj, fixed_ip_address) = self._nat_destination_port( + server, + fixed_address=fixed_address, + nat_destination=nat_destination, + ) + if port_obj: + port = port_obj['id'] + if fixed_ip_address: + kwargs['fixed_ip_address'] = fixed_ip_address + if port: + kwargs['port_id'] = port + + fip = self._submit_create_fip(kwargs) + fip_id = fip['id'] + + if port: + # The FIP is only going to become active in this context + # when we've attached it to something, which only occurs + # if we've provided a port as a parameter + if wait: + try: + for count in utils.iterate_timeout( + timeout, + "Timeout waiting for the floating IP to be ACTIVE", + wait=min(5, timeout), + ): + fip = self.get_floating_ip(fip_id) + if fip and fip['status'] == 'ACTIVE': + break + except exceptions.ResourceTimeout: + self.log.error( + "Timed out on floating ip %(fip)s becoming active. " + "Deleting", + {'fip': fip_id}, + ) + try: + self.delete_floating_ip(fip_id) + except Exception as e: + self.log.error( + "FIP LEAK: Attempted to delete floating ip " + "%(fip)s but received %(exc)s exception: " + "%(err)s", + {'fip': fip_id, 'exc': e.__class__, 'err': str(e)}, + ) + raise + if fip['port_id'] != port: + if server: + raise exceptions.SDKException( + "Attempted to create FIP on port {port} for server " + "{server} but FIP has port {port_id}".format( + port=port, + port_id=fip['port_id'], + server=server['id'], + ) + ) + else: + raise exceptions.SDKException( + f"Attempted to create FIP on port {port} " + "but something went wrong" + ) + return fip + + def _nova_create_floating_ip(self, pool=None): + with _utils.openstacksdk_exceptions( + f"Unable to create floating IP in pool {pool}" + ): + if pool is None: + pools = self.list_floating_ip_pools() + if not pools: + raise exceptions.NotFoundException( + "unable to find a floating ip pool" + ) + pool = pools[0]['name'] + + data = proxy._json_response( + self.compute.post('/os-floating-ips', json=dict(pool=pool)) + ) + pool_ip = self._get_and_munchify('floating_ip', data) + # TODO(mordred) Remove this - it's just for compat + data = proxy._json_response( + self.compute.get( + '/os-floating-ips/{id}'.format(id=pool_ip['id']) + ) + ) + return self._get_and_munchify('floating_ip', data) + + def delete_floating_ip(self, floating_ip_id, retry=1): + """Deallocate a floating IP from a project. + + :param floating_ip_id: a floating IP address ID. + :param retry: number of times to retry. Optional, defaults to 1, + which is in addition to the initial delete call. + A value of 0 will also cause no checking of results to + occur. + + :returns: True if the IP address has been deleted, False if the IP + address was not found. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + for count in range(0, max(0, retry) + 1): + result = self._delete_floating_ip(floating_ip_id) + + if (retry == 0) or not result: + return result + + # neutron sometimes returns success when deleting a floating + # ip. That's awesome. SO - verify that the delete actually + # worked. Some clouds will set the status to DOWN rather than + # deleting the IP immediately. This is, of course, a bit absurd. + f_ip = self.get_floating_ip(id=floating_ip_id) + if not f_ip or f_ip['status'] == 'DOWN': + return True + + raise exceptions.SDKException( + "Attempted to delete Floating IP {ip} with ID {id} a total of " + "{retry} times. Although the cloud did not indicate any errors " + "the floating IP is still in existence. Aborting further " + "operations.".format( + id=floating_ip_id, + ip=f_ip['floating_ip_address'], + retry=retry + 1, + ) + ) + + def _delete_floating_ip(self, floating_ip_id): + if self._use_neutron_floating(): + try: + return self._neutron_delete_floating_ip(floating_ip_id) + except exceptions.NotFoundException as e: + self.log.debug( + "Something went wrong talking to neutron API: " + "'%(msg)s'. Trying with Nova.", + {'msg': str(e)}, + ) + return self._nova_delete_floating_ip(floating_ip_id) + + def _neutron_delete_floating_ip(self, floating_ip_id): + try: + self.network.delete_ip(floating_ip_id, ignore_missing=False) + except exceptions.NotFoundException: + return False + return True + + def _nova_delete_floating_ip(self, floating_ip_id): + try: + proxy._json_response( + self.compute.delete(f'/os-floating-ips/{floating_ip_id}'), + error_message=f'Unable to delete floating IP {floating_ip_id}', + ) + except exceptions.NotFoundException: + return False + return True + + def delete_unattached_floating_ips(self, retry=1): + """Safely delete unattached floating ips. + + If the cloud can safely purge any unattached floating ips without + race conditions, do so. + + Safely here means a specific thing. It means that you are not running + this while another process that might do a two step create/attach + is running. You can safely run this method while another process + is creating servers and attaching floating IPs to them if either that + process is using add_auto_ip from shade, or is creating the floating + IPs by passing in a server to the create_floating_ip call. + + :param retry: number of times to retry. Optional, defaults to 1, + which is in addition to the initial delete call. + A value of 0 will also cause no checking of results to occur. + + :returns: Number of Floating IPs deleted, False if none + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + processed = [] + if self._use_neutron_floating(): + for ip in self.list_floating_ips(): + if not bool(ip.port_id): + processed.append( + self.delete_floating_ip( + floating_ip_id=ip['id'], retry=retry + ) + ) + return len(processed) if all(processed) else False + + def _attach_ip_to_server( + self, + server, + floating_ip, + fixed_address=None, + wait=False, + timeout=60, + skip_attach=False, + nat_destination=None, + ): + """Attach a floating IP to a server. + + :param server: Server dict + :param floating_ip: Floating IP dict to attach + :param fixed_address: (optional) fixed address to which attach the + floating IP to. + :param wait: (optional) Wait for the address to appear as assigned + to the server. Defaults to False. + :param timeout: (optional) Seconds to wait, defaults to 60. + See the ``wait`` parameter. + :param skip_attach: (optional) Skip the actual attach and just do + the wait. Defaults to False. + :param nat_destination: The fixed network the server's port for the + FIP to attach to will come from. + + :returns: The server ``openstack.compute.v2.server.Server`` + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + # Short circuit if we're asking to attach an IP that's already + # attached + ext_ip = meta.get_server_ip(server, ext_tag='floating', public=True) + if not ext_ip and floating_ip['port_id']: + # When we came here from reuse_fip and created FIP it might be + # already attached, but the server info might be also + # old to check whether it belongs to us now, thus refresh + # the server data and try again. There are some clouds, which + # explicitely forbids FIP assign call if it is already assigned. + server = self.compute.get_server(server['id']) + ext_ip = meta.get_server_ip( + server, ext_tag='floating', public=True + ) + if ext_ip == floating_ip['floating_ip_address']: + return server + + if self._use_neutron_floating(): + if not skip_attach: + try: + self._neutron_attach_ip_to_server( + server=server, + floating_ip=floating_ip, + fixed_address=fixed_address, + nat_destination=nat_destination, + ) + except exceptions.NotFoundException as e: + self.log.debug( + "Something went wrong talking to neutron API: " + "'%(msg)s'. Trying with Nova.", + {'msg': str(e)}, + ) + # Fall-through, trying with Nova + else: + # Nova network + self._nova_attach_ip_to_server( + server_id=server['id'], + floating_ip_id=floating_ip['id'], + fixed_address=fixed_address, + ) + + if wait: + # Wait for the address to be assigned to the server + server_id = server['id'] + for _ in utils.iterate_timeout( + timeout, + "Timeout waiting for the floating IP to be attached.", + wait=min(5, timeout), + ): + server = self.compute.get_server(server_id) + ext_ip = meta.get_server_ip( + server, ext_tag='floating', public=True + ) + if ext_ip == floating_ip['floating_ip_address']: + return server + return server + + def _neutron_attach_ip_to_server( + self, server, floating_ip, fixed_address=None, nat_destination=None + ): + # Find an available port + (port, fixed_address) = self._nat_destination_port( + server, + fixed_address=fixed_address, + nat_destination=nat_destination, + ) + if not port: + raise exceptions.SDKException( + "unable to find a port for server {}".format(server['id']) + ) + + floating_ip_args = {'port_id': port['id']} + if fixed_address is not None: + floating_ip_args['fixed_ip_address'] = fixed_address + + return self.network.update_ip(floating_ip, **floating_ip_args) + + def _nova_attach_ip_to_server( + self, server_id, floating_ip_id, fixed_address=None + ): + f_ip = self.get_floating_ip(id=floating_ip_id) + if f_ip is None: + raise exceptions.SDKException( + f"unable to find floating IP {floating_ip_id}" + ) + error_message = ( + f"Error attaching IP {floating_ip_id} to instance {server_id}" + ) + body = {'address': f_ip['floating_ip_address']} + if fixed_address: + body['fixed_address'] = fixed_address + return proxy._json_response( + self.compute.post( + f'/servers/{server_id}/action', + json=dict(addFloatingIp=body), + ), + error_message=error_message, + ) + + def detach_ip_from_server(self, server_id, floating_ip_id): + """Detach a floating IP from a server. + + :param server_id: ID of a server. + :param floating_ip_id: Id of the floating IP to detach. + + :returns: True if the IP has been detached, or False if the IP wasn't + attached to any server. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if self._use_neutron_floating(): + try: + return self._neutron_detach_ip_from_server( + server_id=server_id, floating_ip_id=floating_ip_id + ) + except exceptions.NotFoundException as e: + self.log.debug( + "Something went wrong talking to neutron API: " + "'%(msg)s'. Trying with Nova.", + {'msg': str(e)}, + ) + # Fall-through, trying with Nova + + # Nova network + self._nova_detach_ip_from_server( + server_id=server_id, floating_ip_id=floating_ip_id + ) + + def _neutron_detach_ip_from_server(self, server_id, floating_ip_id): + f_ip = self.get_floating_ip(id=floating_ip_id) + if f_ip is None or not bool(f_ip.port_id): + return False + try: + self.network.update_ip(floating_ip_id, port_id=None) + except exceptions.SDKException: + raise exceptions.SDKException( + f"Error detaching IP {floating_ip_id} from server {server_id}" + ) + + return True + + def _nova_detach_ip_from_server(self, server_id, floating_ip_id): + f_ip = self.get_floating_ip(id=floating_ip_id) + if f_ip is None: + raise exceptions.SDKException( + f"unable to find floating IP {floating_ip_id}" + ) + error_message = ( + f"Error detaching IP {floating_ip_id} from instance {server_id}" + ) + return proxy._json_response( + self.compute.post( + f'/servers/{server_id}/action', + json=dict( + removeFloatingIp=dict(address=f_ip['floating_ip_address']) + ), + ), + error_message=error_message, + ) + + return True + + def _add_ip_from_pool( + self, + server, + network, + fixed_address=None, + reuse=True, + wait=False, + timeout=60, + nat_destination=None, + ): + """Add a floating IP to a server from a given pool + + This method reuses available IPs, when possible, or allocate new IPs + to the current tenant. + The floating IP is attached to the given fixed address or to the + first server port/fixed address + + :param server: Server dict + :param network: Name or ID of the network. + :param fixed_address: a fixed address + :param reuse: Try to reuse existing ips. Defaults to True. + :param wait: (optional) Wait for the address to appear as assigned + to the server. Defaults to False. + :param timeout: (optional) Seconds to wait, defaults to 60. + See the ``wait`` parameter. + :param nat_destination: (optional) the name of the network of the + port to associate with the floating ip. + + :returns: the updated server ``openstack.compute.v2.server.Server`` + """ + if reuse: + f_ip = self.available_floating_ip(network=network) + else: + start_time = time.time() + f_ip = self.create_floating_ip( + server=server, + network=network, + nat_destination=nat_destination, + fixed_address=fixed_address, + wait=wait, + timeout=timeout, + ) + timeout = timeout - (time.time() - start_time) + server = self.compute.get_server(server.id) + + # We run attach as a second call rather than in the create call + # because there are code flows where we will not have an attached + # FIP yet. However, even if it was attached in the create, we run + # the attach function below to get back the server dict refreshed + # with the FIP information. + return self._attach_ip_to_server( + server=server, + floating_ip=f_ip, + fixed_address=fixed_address, + wait=wait, + timeout=timeout, + nat_destination=nat_destination, + ) + + def add_ip_list( + self, + server, + ips, + wait=False, + timeout=60, + fixed_address=None, + nat_destination=None, + ): + """Attach a list of IPs to a server. + + :param server: a server object + :param ips: list of floating IP addresses or a single address + :param wait: (optional) Wait for the address to appear as assigned + to the server. Defaults to False. + :param timeout: (optional) Seconds to wait, defaults to 60. + See the ``wait`` parameter. + :param fixed_address: (optional) Fixed address of the server to + attach the IP to + :param nat_destination: (optional) Name or ID of the network that + the fixed IP to attach the floating IP should be on + + :returns: The updated server ``openstack.compute.v2.server.Server`` + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + + if type(ips) is not list: + ips = [ips] + + for ip in ips: + f_ip = self.get_floating_ip( + id=None, filters={'floating_ip_address': ip} + ) + server = self._attach_ip_to_server( + server=server, + floating_ip=f_ip, + wait=wait, + timeout=timeout, + fixed_address=fixed_address, + nat_destination=nat_destination, + ) + return server + + def add_auto_ip(self, server, wait=False, timeout=60, reuse=True): + """Add a floating IP to a server. + + This method is intended for basic usage. For advanced network + architecture (e.g. multiple external networks or servers with multiple + interfaces), use other floating IP methods. + + This method can reuse available IPs, or allocate new IPs to the current + project. + + :param server: a server dictionary. + :param reuse: Whether or not to attempt to reuse IPs, defaults + to True. + :param wait: (optional) Wait for the address to appear as assigned + to the server. Defaults to False. + :param timeout: (optional) Seconds to wait, defaults to 60. + See the ``wait`` parameter. + :param reuse: Try to reuse existing ips. Defaults to True. + + :returns: Floating IP address attached to server. + """ + server = self._add_auto_ip( + server, wait=wait, timeout=timeout, reuse=reuse + ) + return server['interface_ip'] or None + + def _add_auto_ip(self, server, wait=False, timeout=60, reuse=True): + skip_attach = False + created = False + if reuse: + f_ip = self.available_floating_ip(server=server) + else: + start_time = time.time() + f_ip = self.create_floating_ip( + server=server, wait=wait, timeout=timeout + ) + timeout = timeout - (time.time() - start_time) + if server: + # This gets passed in for both nova and neutron + # but is only meaningful for the neutron logic branch + skip_attach = True + created = True + + try: + # We run attach as a second call rather than in the create call + # because there are code flows where we will not have an attached + # FIP yet. However, even if it was attached in the create, we run + # the attach function below to get back the server dict refreshed + # with the FIP information. + return self._attach_ip_to_server( + server=server, + floating_ip=f_ip, + wait=wait, + timeout=timeout, + skip_attach=skip_attach, + ) + except exceptions.ResourceTimeout: + if self._use_neutron_floating() and created: + # We are here because we created an IP on the port + # It failed. Delete so as not to leak an unmanaged + # resource + self.log.error( + "Timeout waiting for floating IP to become " + "active. Floating IP %(ip)s:%(id)s was created for " + "server %(server)s but is being deleted due to " + "activation failure.", + { + 'ip': f_ip['floating_ip_address'], + 'id': f_ip['id'], + 'server': server['id'], + }, + ) + try: + self.delete_floating_ip(f_ip['id']) + except Exception as e: + self.log.error( + "FIP LEAK: Attempted to delete floating ip " + "%(fip)s but received %(exc)s exception: %(err)s", + {'fip': f_ip['id'], 'exc': e.__class__, 'err': str(e)}, + ) + raise e + raise + + def add_ips_to_server( + self, + server, + auto_ip=True, + ips=None, + ip_pool=None, + wait=False, + timeout=60, + reuse=True, + fixed_address=None, + nat_destination=None, + ): + if ip_pool: + server = self._add_ip_from_pool( + server, + ip_pool, + reuse=reuse, + wait=wait, + timeout=timeout, + fixed_address=fixed_address, + nat_destination=nat_destination, + ) + elif ips: + server = self.add_ip_list( + server, + ips, + wait=wait, + timeout=timeout, + fixed_address=fixed_address, + nat_destination=nat_destination, + ) + elif auto_ip: + if self._needs_floating_ip(server, nat_destination): + server = self._add_auto_ip( + server, wait=wait, timeout=timeout, reuse=reuse + ) + return server + + def _needs_floating_ip(self, server, nat_destination): + """Figure out if auto_ip should add a floating ip to this server. + + If the server has a floating ip it does not need another one. + + If the server does not have a fixed ip address it does not need a + floating ip. + + If self.private then the server does not need a floating ip. + + If the cloud runs nova, and the server has a private address and not a + public address, then the server needs a floating ip. + + If the server has a fixed ip address and no floating ip address and the + cloud has a network from which floating IPs come that is connected via + a router to the network from which the fixed ip address came, + then the server needs a floating ip. + + If the server has a fixed ip address and no floating ip address and the + cloud does not have a network from which floating ips come, or it has + one but that network is not connected to the network from which + the server's fixed ip address came via a router, then the + server does not need a floating ip. + """ + if not self._has_floating_ips(): + return False + + if server['addresses'] is None: + # fetch missing server details, e.g. because + # meta.add_server_interfaces() was not called + server = self.compute.get_server(server) + + if server['public_v4'] or any( + [ + any( + [ + address['OS-EXT-IPS:type'] == 'floating' + for address in addresses + ] + ) + for addresses in (server['addresses'] or {}).values() + ] + ): + return False + + if not server['private_v4'] and not any( + [ + any( + [ + address['OS-EXT-IPS:type'] == 'fixed' + for address in addresses + ] + ) + for addresses in (server['addresses'] or {}).values() + ] + ): + return False + + if self.private: + return False + + if not self.has_service('network'): + return True + + # No floating ip network - no FIPs + try: + self._get_floating_network_id() + except exceptions.SDKException: + return False + + (port_obj, fixed_ip_address) = self._nat_destination_port( + server, nat_destination=nat_destination + ) + + if not port_obj or not fixed_ip_address: + return False + + return True + + def _nat_destination_port( + self, server, fixed_address=None, nat_destination=None + ): + """Returns server port that is on a nat_destination network + + Find a port attached to the server which is on a network which + has a subnet which can be the destination of NAT. Such a network + is referred to in shade as a "nat_destination" network. So this + then is a function which returns a port on such a network that is + associated with the given server. + + :param server: Server dict. + :param fixed_address: Fixed ip address of the port + :param nat_destination: Name or ID of the network of the port. + """ + ports = list(self.network.ports(device_id=server['id'])) + if not ports: + return (None, None) + + port = None + if not fixed_address: + if len(ports) > 1: + if nat_destination: + nat_network = self.network.find_network( + nat_destination, ignore_missing=True + ) + if not nat_network: + raise exceptions.SDKException( + f'NAT Destination {nat_destination} was ' + f'configured but not found on the cloud. Please ' + f'check your config and your cloud and try again.' + ) + else: + nat_network = self.get_nat_destination() + + if not nat_network: + raise exceptions.SDKException( + f'Multiple ports were found for server {server["id"]} ' + f'but none of the networks are a valid NAT ' + f'destination, so it is impossible to add a ' + f'floating IP. If you have a network that is a valid ' + f'destination for NAT and we could not find it, ' + f'please file a bug. But also configure the ' + f'nat_destination property of the networks list in ' + f'your clouds.yaml file. If you do not have a ' + f'clouds.yaml file, please make one - your setup ' + f'is complicated.' + ) + + maybe_ports = [] + for maybe_port in ports: + if maybe_port['network_id'] == nat_network['id']: + maybe_ports.append(maybe_port) + if not maybe_ports: + raise exceptions.SDKException( + f'No port on server {server["id"]} was found matching ' + f'your NAT destination network {nat_network["name"]}.' + f'Please check your config' + ) + ports = maybe_ports + + # Select the most recent available IPv4 address + # To do this, sort the ports in reverse order by the created_at + # field which is a string containing an ISO DateTime (which + # thankfully sort properly) This way the most recent port created, + # if there are more than one, will be the arbitrary port we + # select. + for port in sorted( + ports, key=lambda p: p.get('created_at', 0), reverse=True + ): + for address in port.get('fixed_ips', list()): + try: + ip = ipaddress.ip_address(address['ip_address']) + except Exception: # noqa: S112 + # the address might be unset; ignore if so + continue + if ip.version == 4: + fixed_address = address['ip_address'] + return port, fixed_address + raise exceptions.SDKException( + "unable to find a free fixed IPv4 address for server " + "{}".format(server['id']) + ) + # unfortunately a port can have more than one fixed IP: + # we can't use the search_ports filtering for fixed_address as + # they are contained in a list. e.g. + # + # "fixed_ips": [ + # { + # "subnet_id": "008ba151-0b8c-4a67-98b5-0d2b87666062", + # "ip_address": "172.24.4.2" + # } + # ] + # + # Search fixed_address + for p in ports: + for fixed_ip in p['fixed_ips']: + if fixed_address == fixed_ip['ip_address']: + return (p, fixed_address) + return (None, None) + + def _has_floating_ips(self): + if not self._floating_ip_source: + return False + else: + return self._floating_ip_source in ('nova', 'neutron') + + def _use_neutron_floating(self): + return ( + self.has_service('network') + and self._floating_ip_source == 'neutron' + ) + + def _normalize_floating_ips(self, ips): + """Normalize the structure of floating IPs + + Unfortunately, not all the Neutron floating_ip attributes are available + with Nova and not all Nova floating_ip attributes are available with + Neutron. + This function extract attributes that are common to Nova and Neutron + floating IP resource. + If the whole structure is needed inside openstacksdk there are private + methods that returns "original" objects (e.g. + _neutron_allocate_floating_ip) + + :param list ips: A list of Neutron floating IPs. + + :returns: + A list of normalized dicts with the following attributes:: + + [ + { + "id": "this-is-a-floating-ip-id", + "fixed_ip_address": "192.0.2.10", + "floating_ip_address": "198.51.100.10", + "network": "this-is-a-net-or-pool-id", + "attached": True, + "status": "ACTIVE", + }, + ..., + ] + + """ + return [self._normalize_floating_ip(ip) for ip in ips] + + def _normalize_floating_ip(self, ip): + # Copy incoming floating ip because of shared dicts in unittests + # Only import munch when we really need it + + location = self._get_current_location(project_id=ip.get('owner')) + # This copy is to keep things from getting epically weird in tests + ip = ip.copy() + + ret = utils.Munch(location=location) + + fixed_ip_address = ip.pop('fixed_ip_address', ip.pop('fixed_ip', None)) + floating_ip_address = ip.pop('floating_ip_address', ip.pop('ip', None)) + network_id = ip.pop( + 'floating_network_id', ip.pop('network', ip.pop('pool', None)) + ) + project_id = ip.pop('tenant_id', '') + project_id = ip.pop('project_id', project_id) + + instance_id = ip.pop('instance_id', None) + router_id = ip.pop('router_id', None) + id = ip.pop('id') + port_id = ip.pop('port_id', None) + created_at = ip.pop('created_at', None) + updated_at = ip.pop('updated_at', None) + # Note - description may not always be on the underlying cloud. + # Normalizing it here is easy - what do we do when people want to + # set a description? + description = ip.pop('description', '') + revision_number = ip.pop('revision_number', None) + + if self._use_neutron_floating(): + attached = bool(port_id) + status = ip.pop('status', 'UNKNOWN') + else: + attached = bool(instance_id) + # In neutron's terms, Nova floating IPs are always ACTIVE + status = 'ACTIVE' + + ret = utils.Munch( + attached=attached, + fixed_ip_address=fixed_ip_address, + floating_ip_address=floating_ip_address, + id=id, + location=self._get_current_location(project_id=project_id), + network=network_id, + port=port_id, + router=router_id, + status=status, + created_at=created_at, + updated_at=updated_at, + description=description, + revision_number=revision_number, + properties=ip.copy(), + ) + # Backwards compat + if not self.strict_mode: + ret['port_id'] = port_id + ret['router_id'] = router_id + ret['project_id'] = project_id + ret['tenant_id'] = project_id + ret['floating_network_id'] = network_id + for key, val in ret['properties'].items(): + ret.setdefault(key, val) + + return ret + + # security groups + + def search_security_groups(self, name_or_id=None, filters=None): + # `filters` could be a dict or a jmespath (str) + groups = self.list_security_groups( + filters=filters if isinstance(filters, dict) else None + ) + return _utils._filter_list(groups, name_or_id, filters) + + def list_security_groups(self, filters=None): + """List all available security groups. + + :param filters: (optional) dict of filter conditions to push down + :returns: A list of security group + ``openstack.network.v2.security_group.SecurityGroup``. + + """ + # Security groups not supported + if not self._has_secgroups(): + raise exc.OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + if not filters: + filters = {} + + # Handle neutron security groups + if self._use_neutron_secgroups(): + # pass filters dict to the list to filter as much as possible on + # the server side + return list(self.network.security_groups(**filters)) + # Handle nova security groups + else: + data = proxy._json_response( + self.compute.get('/os-security-groups', params=filters) + ) + return self._normalize_secgroups( + self._get_and_munchify('security_groups', data) + ) + + def get_security_group(self, name_or_id, filters=None): + """Get a security group by name or ID. + + :param name_or_id: Name or ID of the security group. + :param filters: + A dictionary of meta data to use for further filtering. Elements + of this dictionary may, themselves, be dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A security group + ``openstack.network.v2.security_group.SecurityGroup`` + or None if no matching security group is found. + + """ + return _utils._get_entity(self, 'security_group', name_or_id, filters) + + def get_security_group_by_id(self, id): + """Get a security group by ID + + :param id: ID of the security group. + :returns: A security group + ``openstack.network.v2.security_group.SecurityGroup``. + """ + if not self._has_secgroups(): + raise exc.OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + error_message = f"Error getting security group with ID {id}" + if self._use_neutron_secgroups(): + return self.network.get_security_group(id) + else: + data = proxy._json_response( + self.compute.get(f'/os-security-groups/{id}'), + error_message=error_message, + ) + return self._normalize_secgroup( + self._get_and_munchify('security_group', data) + ) + + def create_security_group( + self, name, description, project_id=None, stateful=None + ): + """Create a new security group + + :param string name: A name for the security group. + :param string description: Describes the security group. + :param string project_id: + Specify the project ID this security group will be created + on (admin-only). + :param string stateful: Whether the security group is stateful or not. + + :returns: A ``openstack.network.v2.security_group.SecurityGroup`` + representing the new security group. + + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + :raises: OpenStackCloudUnavailableFeature if security groups are + not supported on this cloud. + """ + + # Security groups not supported + if not self._has_secgroups(): + raise exc.OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + security_group_json = {'name': name, 'description': description} + if stateful is not None: + security_group_json['stateful'] = stateful + if project_id is not None: + security_group_json['tenant_id'] = project_id + if self._use_neutron_secgroups(): + return self.network.create_security_group(**security_group_json) + else: + data = proxy._json_response( + self.compute.post( + '/os-security-groups', + json={'security_group': security_group_json}, + ) + ) + return self._normalize_secgroup( + self._get_and_munchify('security_group', data) + ) + + def delete_security_group(self, name_or_id): + """Delete a security group + + :param string name_or_id: The name or unique ID of the security group. + + :returns: True if delete succeeded, False otherwise. + + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + :raises: OpenStackCloudUnavailableFeature if security groups are + not supported on this cloud. + """ + # Security groups not supported + if not self._has_secgroups(): + raise exc.OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + # TODO(mordred): Let's come back and stop doing a GET before we do + # the delete. + secgroup = self.get_security_group(name_or_id) + if secgroup is None: + self.log.debug( + 'Security group %s not found for deleting', name_or_id + ) + return False + + if self._use_neutron_secgroups(): + self.network.delete_security_group( + secgroup['id'], ignore_missing=False + ) + return True + + else: + proxy._json_response( + self.compute.delete( + '/os-security-groups/{id}'.format(id=secgroup['id']) + ) + ) + return True + + @_utils.valid_kwargs('name', 'description', 'stateful') + def update_security_group(self, name_or_id, **kwargs): + """Update a security group + + :param string name_or_id: Name or ID of the security group to update. + :param string name: New name for the security group. + :param string description: New description for the security group. + + :returns: A ``openstack.network.v2.security_group.SecurityGroup`` + describing the updated security group. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + # Security groups not supported + if not self._has_secgroups(): + raise exc.OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + group = self.get_security_group(name_or_id) + + if group is None: + raise exceptions.SDKException( + f"Security group {name_or_id} not found." + ) + + if self._use_neutron_secgroups(): + return self.network.update_security_group(group['id'], **kwargs) + else: + for key in ('name', 'description'): + kwargs.setdefault(key, group[key]) + data = proxy._json_response( + self.compute.put( + '/os-security-groups/{id}'.format(id=group['id']), + json={'security_group': kwargs}, + ) + ) + return self._normalize_secgroup( + self._get_and_munchify('security_group', data) + ) + + def create_security_group_rule( + self, + secgroup_name_or_id, + port_range_min=None, + port_range_max=None, + protocol=None, + remote_ip_prefix=None, + remote_group_id=None, + remote_address_group_id=None, + direction='ingress', + ethertype='IPv4', + project_id=None, + description=None, + ): + """Create a new security group rule + + :param string secgroup_name_or_id: + The security group name or ID to associate with this security + group rule. If a non-unique group name is given, an exception + is raised. + :param int port_range_min: + The minimum port number in the range that is matched by the + security group rule. If the protocol is TCP or UDP, this value + must be less than or equal to the port_range_max attribute value. + If nova is used by the cloud provider for security groups, then + a value of None will be transformed to -1. + :param int port_range_max: + The maximum port number in the range that is matched by the + security group rule. The port_range_min attribute constrains the + port_range_max attribute. If nova is used by the cloud provider + for security groups, then a value of None will be transformed + to -1. + :param string protocol: + The protocol that is matched by the security group rule. Valid + values are None, tcp, udp, and icmp. + :param string remote_ip_prefix: + The remote IP prefix to be associated with this security group + rule. This attribute matches the specified IP prefix as the + source IP address of the IP packet. + :param string remote_group_id: + The remote group ID to be associated with this security group + rule. + :param string remote_address_group_id: + The remote address group ID to be associated with this security + group rule. + :param string direction: + Ingress or egress: The direction in which the security group + rule is applied. For a compute instance, an ingress security + group rule is applied to incoming (ingress) traffic for that + instance. An egress rule is applied to traffic leaving the + instance. + :param string ethertype: + Must be IPv4 or IPv6, and addresses represented in CIDR must + match the ingress or egress rules. + :param string project_id: + Specify the project ID this security group will be created + on (admin-only). + :param string description: + Description of the rule, max 255 characters. + + :returns: A ``openstack.network.v2.security_group.SecurityGroup`` + representing the new security group rule. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + # Security groups not supported + if not self._has_secgroups(): + raise exc.OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + secgroup = self.get_security_group(secgroup_name_or_id) + if not secgroup: + raise exceptions.SDKException( + f"Security group {secgroup_name_or_id} not found." + ) + + if self._use_neutron_secgroups(): + # NOTE: Nova accepts -1 port numbers, but Neutron accepts None + # as the equivalent value. + rule_def = { + 'security_group_id': secgroup['id'], + 'port_range_min': ( + None if port_range_min == -1 else port_range_min + ), + 'port_range_max': ( + None if port_range_max == -1 else port_range_max + ), + 'protocol': protocol, + 'remote_ip_prefix': remote_ip_prefix, + 'remote_group_id': remote_group_id, + 'remote_address_group_id': remote_address_group_id, + 'direction': direction, + 'ethertype': ethertype, + } + if project_id is not None: + rule_def['tenant_id'] = project_id + if description is not None: + rule_def["description"] = description + return self.network.create_security_group_rule(**rule_def) + else: + # NOTE: Neutron accepts None for protocol. Nova does not. + if protocol is None: + raise exceptions.SDKException('Protocol must be specified') + + if direction == 'egress': + self.log.debug( + 'Rule creation failed: Nova does not support egress rules' + ) + raise exceptions.SDKException('No support for egress rules') + + # NOTE: Neutron accepts None for ports, but Nova requires -1 + # as the equivalent value for ICMP. + # + # For TCP/UDP, if both are None, Neutron allows this and Nova + # represents this as all ports (1-65535). Nova does not accept + # None values, so to hide this difference, we will automatically + # convert to the full port range. If only a single port value is + # specified, it will error as normal. + if protocol == 'icmp': + if port_range_min is None: + port_range_min = -1 + if port_range_max is None: + port_range_max = -1 + elif protocol in ['tcp', 'udp']: + if port_range_min is None and port_range_max is None: + port_range_min = 1 + port_range_max = 65535 + + security_group_rule_dict = dict( + security_group_rule=dict( + parent_group_id=secgroup['id'], + ip_protocol=protocol, + from_port=port_range_min, + to_port=port_range_max, + cidr=remote_ip_prefix, + group_id=remote_group_id, + ) + ) + if project_id is not None: + security_group_rule_dict['security_group_rule'][ + 'tenant_id' + ] = project_id + data = proxy._json_response( + self.compute.post( + '/os-security-group-rules', json=security_group_rule_dict + ) + ) + return self._normalize_secgroup_rule( + self._get_and_munchify('security_group_rule', data) + ) + + def delete_security_group_rule(self, rule_id): + """Delete a security group rule + + :param string rule_id: The unique ID of the security group rule. + + :returns: True if delete succeeded, False otherwise. + + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + :raises: OpenStackCloudUnavailableFeature if security groups are + not supported on this cloud. + """ + # Security groups not supported + if not self._has_secgroups(): + raise exc.OpenStackCloudUnavailableFeature( + "Unavailable feature: security groups" + ) + + if self._use_neutron_secgroups(): + self.network.delete_security_group_rule( + rule_id, ignore_missing=False + ) + return True + + else: + try: + exceptions.raise_from_response( + self.compute.delete(f'/os-security-group-rules/{rule_id}') + ) + except exceptions.NotFoundException: + return False + + return True + + def _has_secgroups(self): + if not self.secgroup_source: + return False + else: + return self.secgroup_source.lower() in ('nova', 'neutron') + + def _use_neutron_secgroups(self): + return ( + self.has_service('network') and self.secgroup_source == 'neutron' + ) + + def _normalize_secgroups(self, groups): + """Normalize the structure of security groups + + This makes security group dicts, as returned from nova, look like the + security group dicts as returned from neutron. This does not make them + look exactly the same, but it's pretty close. + + :param list groups: A list of security group dicts. + + :returns: A list of normalized dicts. + """ + ret = [] + for group in groups: + ret.append(self._normalize_secgroup(group)) + return ret + + # TODO(stephenfin): Remove this once we get rid of support for nova + # secgroups + def _normalize_secgroup(self, group): + ret = utils.Munch() + # Copy incoming group because of shared dicts in unittests + group = group.copy() + + # Discard noise + self._remove_novaclient_artifacts(group) + + rules = self._normalize_secgroup_rules( + group.pop('security_group_rules', group.pop('rules', [])) + ) + project_id = group.pop('tenant_id', '') + project_id = group.pop('project_id', project_id) + + ret['location'] = self._get_current_location(project_id=project_id) + ret['id'] = group.pop('id') + ret['name'] = group.pop('name') + ret['security_group_rules'] = rules + ret['description'] = group.pop('description') + ret['properties'] = group + + if self._use_neutron_secgroups(): + ret['stateful'] = group.pop('stateful', True) + + # Backwards compat with Neutron + if not self.strict_mode: + ret['tenant_id'] = project_id + ret['project_id'] = project_id + for key, val in ret['properties'].items(): + ret.setdefault(key, val) + + return ret + + # TODO(stephenfin): Remove this once we get rid of support for nova + # secgroups + def _normalize_secgroup_rules(self, rules): + """Normalize the structure of nova security group rules + + Note that nova uses -1 for non-specific port values, but neutron + represents these with None. + + :param list rules: A list of security group rule dicts. + + :returns: A list of normalized dicts. + """ + ret = [] + for rule in rules: + ret.append(self._normalize_secgroup_rule(rule)) + return ret + + # TODO(stephenfin): Remove this once we get rid of support for nova + # secgroups + def _normalize_secgroup_rule(self, rule): + ret = utils.Munch() + # Copy incoming rule because of shared dicts in unittests + rule = rule.copy() + + ret['id'] = rule.pop('id') + ret['direction'] = rule.pop('direction', 'ingress') + ret['ethertype'] = rule.pop('ethertype', 'IPv4') + port_range_min = rule.get( + 'port_range_min', rule.pop('from_port', None) + ) + if port_range_min == -1: + port_range_min = None + if port_range_min is not None: + port_range_min = int(port_range_min) + ret['port_range_min'] = port_range_min + port_range_max = rule.pop('port_range_max', rule.pop('to_port', None)) + if port_range_max == -1: + port_range_max = None + if port_range_min is not None: + port_range_min = int(port_range_min) + ret['port_range_max'] = port_range_max + ret['protocol'] = rule.pop('protocol', rule.pop('ip_protocol', None)) + ret['remote_ip_prefix'] = rule.pop( + 'remote_ip_prefix', rule.pop('ip_range', {}).get('cidr', None) + ) + ret['security_group_id'] = rule.pop( + 'security_group_id', rule.pop('parent_group_id', None) + ) + ret['remote_group_id'] = rule.pop('remote_group_id', None) + project_id = rule.pop('tenant_id', '') + project_id = rule.pop('project_id', project_id) + ret['location'] = self._get_current_location(project_id=project_id) + ret['properties'] = rule + + # Backwards compat with Neutron + if not self.strict_mode: + ret['tenant_id'] = project_id + ret['project_id'] = project_id + for key, val in ret['properties'].items(): + ret.setdefault(key, val) + return ret + + def _remove_novaclient_artifacts(self, item): + # Remove novaclient artifacts + item.pop('links', None) + item.pop('NAME_ATTR', None) + item.pop('HUMAN_ID', None) + item.pop('human_id', None) + item.pop('request_ids', None) + item.pop('x_openstack_request_ids', None) diff --git a/openstack/cloud/_object_store.py b/openstack/cloud/_object_store.py new file mode 100644 index 0000000000..350f257757 --- /dev/null +++ b/openstack/cloud/_object_store.py @@ -0,0 +1,518 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import concurrent.futures +import urllib.parse +import warnings + +import keystoneauth1.exceptions + +from openstack.cloud import _utils +from openstack.cloud import openstackcloud +from openstack import exceptions +from openstack import warnings as os_warnings + + +OBJECT_CONTAINER_ACLS = { + 'public': '.r:*,.rlistings', + 'private': '', +} + + +class ObjectStoreCloudMixin(openstackcloud._OpenStackCloudMixin): + def list_containers(self, full_listing=None, prefix=None): + """List containers. + + :param full_listing: Ignored. Present for backwards compat + :param prefix: Only objects with this prefix will be returned. + (optional) + :returns: A list of object store ``Container`` objects. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + if full_listing is not None: + warnings.warn( + "The 'full_listing' field is unnecessary and will be removed " + "in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + return list(self.object_store.containers(prefix=prefix)) + + def search_containers(self, name=None, filters=None): + """Search containers. + + :param string name: Container name. + :param filters: A dict containing additional filters to use. + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A list of object store ``Container`` objects matching the + search criteria. + :raises: :class:`~openstack.exceptions.SDKException`: If something goes + wrong during the OpenStack API call. + """ + containers = self.list_containers() + return _utils._filter_list(containers, name, filters) + + # TODO(stephenfin): Remove 'skip_cache' as it no longer does anything + def get_container(self, name, skip_cache=False): + """Get metadata about a container. + + :param str name: + Name of the container to get metadata for. + :param bool skip_cache: Ignored. Present for backwards compatibility. + :returns: An object store ``Container`` object if found, else None. + """ + try: + return self.object_store.get_container_metadata(name) + except exceptions.HttpException as ex: + if ex.response.status_code == 404: + return None + raise + + def create_container(self, name, public=False): + """Create an object-store container. + + :param str name: Name of the container to create. + :param bool public: Whether to set this container to be public. + Defaults to ``False``. + :returns: The created object store ``Container`` object. + """ + container = self.get_container(name) + if container: + return container + attrs = dict(name=name) + if public: + attrs['read_ACL'] = OBJECT_CONTAINER_ACLS['public'] + container = self.object_store.create_container(**attrs) + return self.get_container(name, skip_cache=True) + + def delete_container(self, name): + """Delete an object-store container. + + :param str name: Name of the container to delete. + """ + try: + self.object_store.delete_container(name, ignore_missing=False) + return True + except exceptions.NotFoundException: + return False + except exceptions.ConflictException: + raise exceptions.SDKException( + f'Attempt to delete container {name} failed. The ' + f'container is not empty. Please delete the objects ' + f'inside it before deleting the container' + ) + + def update_container(self, name, headers): + """Update the metadata in a container. + + :param str name: Name of the container to update. + :param dict headers: Key/Value headers to set on the container. + """ + self.object_store.set_container_metadata( + name, refresh=False, **headers + ) + + def set_container_access(self, name, access, refresh=False): + """Set the access control list on a container. + + :param str name: Name of the container. + :param str access: ACL string to set on the container. Can also be + ``public`` or ``private`` which will be translated into appropriate + ACL strings. + :param refresh: Flag to trigger refresh of the container properties + """ + if access not in OBJECT_CONTAINER_ACLS: + raise exceptions.SDKException( + f"Invalid container access specified: {access}. " + f"Must be one of {list(OBJECT_CONTAINER_ACLS.keys())}" + ) + return self.object_store.set_container_metadata( + name, read_ACL=OBJECT_CONTAINER_ACLS[access], refresh=refresh + ) + + def get_container_access(self, name): + """Get the control list from a container. + + :param str name: Name of the container. + :returns: The contol list for the container. + :raises: :class:`~openstack.exceptions.SDKException` if the container + was not found or container access could not be determined. + """ + container = self.get_container(name, skip_cache=True) + if not container: + raise exceptions.SDKException(f"Container not found: {name}") + acl = container.read_ACL or '' + for key, value in OBJECT_CONTAINER_ACLS.items(): + # Convert to string for the comparison because swiftclient + # returns byte values as bytes sometimes and apparently == + # on bytes doesn't work like you'd think + if str(acl) == str(value): + return key + raise exceptions.SDKException( + f"Could not determine container access for ACL: {acl}." + ) + + def get_object_capabilities(self): + """Get infomation about the object-storage service + + The object-storage service publishes a set of capabilities that + include metadata about maximum values and thresholds. + + :returns: An object store ``Info`` object. + """ + return self.object_store.get_info() + + def get_object_segment_size(self, segment_size): + """Get a segment size that will work given capabilities. + + :param segment_size: + :returns: A segment size. + """ + return self.object_store.get_object_segment_size(segment_size) + + def is_object_stale( + self, container, name, filename, file_md5=None, file_sha256=None + ): + """Check to see if an object matches the hashes of a file. + + :param container: Name of the container. + :param name: Name of the object. + :param filename: Path to the file. + :param file_md5: Pre-calculated md5 of the file contents. Defaults to + None which means calculate locally. + :param file_sha256: Pre-calculated sha256 of the file contents. + Defaults to None which means calculate locally. + """ + return self.object_store.is_object_stale( + container, + name, + filename, + file_md5=file_md5, + file_sha256=file_sha256, + ) + + def create_directory_marker_object(self, container, name, **headers): + """Create a zero-byte directory marker object + + .. note:: + + This method is not needed in most cases. Modern swift does not + require directory marker objects. However, some swift installs may + need these. + + When using swift Static Web and Web Listings to serve static content + one may need to create a zero-byte object to represent each + "directory". Doing so allows Web Listings to generate an index of the + objects inside of it, and allows Static Web to render index.html + "files" that are "inside" the directory. + + :param container: The name of the container. + :param name: Name for the directory marker object within the container. + :param headers: These will be passed through to the object creation + API as HTTP Headers. + :returns: The created object store ``Object`` object. + """ + headers['content-type'] = 'application/directory' + + return self.create_object( + container, name, data='', generate_checksums=False, **headers + ) + + def create_object( + self, + container, + name, + filename=None, + md5=None, + sha256=None, + segment_size=None, + use_slo=True, + metadata=None, + generate_checksums=None, + data=None, + **headers, + ): + """Create a file object. + + Automatically uses large-object segments if needed. + + :param container: The name of the container to store the file in. + This container will be created if it does not exist already. + :param name: Name for the object within the container. + :param filename: The path to the local file whose contents will be + uploaded. Mutually exclusive with data. + :param data: The content to upload to the object. Mutually exclusive + with filename. + :param md5: A hexadecimal md5 of the file. (Optional), if it is known + and can be passed here, it will save repeating the expensive md5 + process. It is assumed to be accurate. + :param sha256: A hexadecimal sha256 of the file. (Optional) See md5. + :param segment_size: Break the uploaded object into segments of this + many bytes. (Optional) Shade will attempt to discover the maximum + value for this from the server if it is not specified, or will use + a reasonable default. + :param headers: These will be passed through to the object creation + API as HTTP Headers. + :param use_slo: If the object is large enough to need to be a Large + Object, use a static rather than dynamic object. Static Objects + will delete segment objects when the manifest object is deleted. + (optional, defaults to True) + :param generate_checksums: Whether to generate checksums on the client + side that get added to headers for later prevention of double + uploads of identical data. (optional, defaults to True) + :param metadata: This dict will get changed into headers that set + metadata of the object + + :returns: The created object store ``Object`` object. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + return self.object_store.create_object( + container, + name, + filename=filename, + data=data, + md5=md5, + sha256=sha256, + use_slo=use_slo, + generate_checksums=generate_checksums, + metadata=metadata, + **headers, + ) + + def update_object(self, container, name, metadata=None, **headers): + """Update the metadata of an object + + :param container: The name of the container the object is in + :param name: Name for the object within the container. + :param metadata: This dict will get changed into headers that set + metadata of the object + :param headers: These will be passed through to the object update + API as HTTP Headers. + + :returns: None + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + meta = metadata.copy() or {} + meta.update(**headers) + self.object_store.set_object_metadata(name, container, **meta) + + def list_objects(self, container, full_listing=True, prefix=None): + """List objects. + + :param container: Name of the container to list objects in. + :param full_listing: Ignored. Present for backwards compat + :param prefix: Only objects with this prefix will be returned. + (optional) + + :returns: A list of object store ``Object`` objects. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + return list( + self.object_store.objects(container=container, prefix=prefix) + ) + + def search_objects(self, container, name=None, filters=None): + """Search objects. + + :param string name: Object name. + :param filters: A dict containing additional filters to use. + OR + A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + + :returns: A list of object store ``Object`` objects matching the + search criteria. + :raises: :class:`~openstack.exceptions.SDKException`: If something goes + wrong during the OpenStack API call. + """ + objects = self.list_objects(container) + return _utils._filter_list(objects, name, filters) + + def delete_object(self, container, name, meta=None): + """Delete an object from a container. + + :param string container: Name of the container holding the object. + :param string name: Name of the object to delete. + :param dict meta: Metadata for the object in question. (optional, will + be fetched if not provided) + + :returns: True if delete succeeded, False if the object was not found. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + try: + self.object_store.delete_object( + name, + ignore_missing=False, + container=container, + ) + return True + except exceptions.SDKException: + return False + + def delete_autocreated_image_objects( + self, + container=None, + segment_prefix=None, + ): + """Delete all objects autocreated for image uploads. + + This method should generally not be needed, as shade should clean up + the objects it uses for object-based image creation. If something + goes wrong and it is found that there are leaked objects, this method + can be used to delete any objects that shade has created on the user's + behalf in service of image uploads. + + :param str container: Name of the container. Defaults to 'images'. + :param str segment_prefix: Prefix for the image segment names to + delete. If not given, all image upload segments present are + deleted. + :returns: True if deletion was successful, else False. + """ + return self.object_store._delete_autocreated_image_objects( + container, segment_prefix=segment_prefix + ) + + def get_object_metadata(self, container, name): + """Get object metadata. + + :param container: + :param name: + :returns: The object metadata. + """ + return self.object_store.get_object_metadata(name, container).metadata + + def get_object_raw(self, container, obj, query_string=None, stream=False): + """Get a raw response object for an object. + + :param string container: Name of the container. + :param string obj: Name of the object. + :param string query_string: Query args for uri. (delimiter, prefix, + etc.) + :param bool stream: Whether to stream the response or not. + + :returns: A `requests.Response` + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + endpoint = self._get_object_endpoint(container, obj, query_string) + return self.object_store.get(endpoint, stream=stream) + + def _get_object_endpoint(self, container, obj=None, query_string=None): + endpoint = urllib.parse.quote(container) + if obj: + endpoint = f'{endpoint}/{urllib.parse.quote(obj)}' + if query_string: + endpoint = f'{endpoint}?{query_string}' + return endpoint + + def stream_object( + self, + container, + obj, + query_string=None, + resp_chunk_size=1024, + ): + """Download the content via a streaming iterator. + + :param string container: Name of the container. + :param string obj: Name of the object. + :param string query_string: Query args for uri. (delimiter, prefix, + etc.) + :param int resp_chunk_size: Chunk size of data to read. Only used if + the results are + + :returns: An iterator over the content or None if the object is not + found. + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + try: + yield from self.object_store.stream_object( + obj, container, chunk_size=resp_chunk_size + ) + except exceptions.NotFoundException: + return + + def get_object( + self, + container, + obj, + query_string=None, + resp_chunk_size=1024, + outfile=None, + stream=False, + ): + """Get the headers and body of an object + + :param string container: Name of the container. + :param string obj: Name of the object. + :param string query_string: Query args for uri. (delimiter, prefix, + etc.) + :param int resp_chunk_size: Chunk size of data to read. Only used if + the results are being written to a file or stream is True. + (optional, defaults to 1k) + :param outfile: Write the object to a file instead of returning the + contents. If this option is given, body in the return tuple will be + None. outfile can either be a file path given as a string, or a + File like object. + + :returns: Tuple (headers, body) of the object, or None if the object + is not found (404). + :raises: :class:`~openstack.exceptions.SDKException` on operation + error. + """ + try: + obj = self.object_store.get_object( + obj, + container=container, + resp_chunk_size=resp_chunk_size, + outfile=outfile, + remember_content=(outfile is None), + ) + headers = {k.lower(): v for k, v in obj._last_headers.items()} + return (headers, obj.data) + + except exceptions.NotFoundException: + return None + + def _wait_for_futures(self, futures, raise_on_error=True): + """Collect results or failures from a list of running future tasks.""" + results = [] + retries = [] + + # Check on each result as its thread finishes + for completed in concurrent.futures.as_completed(futures): + try: + result = completed.result() + exceptions.raise_from_response(result) + results.append(result) + except ( + keystoneauth1.exceptions.RetriableConnectionFailure, + exceptions.HttpException, + ) as e: + error_text = f"Exception processing async task: {e!s}" + if raise_on_error: + self.log.exception(error_text) + raise + else: + self.log.debug(error_text) + # If we get an exception, put the result into a list so we + # can try again + retries.append(completed.result()) + return results, retries diff --git a/openstack/cloud/_orchestration.py b/openstack/cloud/_orchestration.py new file mode 100644 index 0000000000..ead46cc93d --- /dev/null +++ b/openstack/cloud/_orchestration.py @@ -0,0 +1,257 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.cloud import _utils +from openstack.cloud import openstackcloud +from openstack import exceptions +from openstack.orchestration.util import event_utils + + +class OrchestrationCloudMixin(openstackcloud._OpenStackCloudMixin): + def get_template_contents( + self, + template_file=None, + template_url=None, + template_object=None, + files=None, + ): + return self.orchestration.get_template_contents( + template_file=template_file, + template_url=template_url, + template_object=template_object, + files=files, + ) + + def create_stack( + self, + name, + tags=None, + template_file=None, + template_url=None, + template_object=None, + files=None, + rollback=True, + wait=False, + timeout=3600, + environment_files=None, + **parameters, + ): + """Create a stack. + + :param string name: Name of the stack. + :param tags: List of tag(s) of the stack. (optional) + :param string template_file: Path to the template. + :param string template_url: URL of template. + :param string template_object: URL to retrieve template object. + :param dict files: dict of additional file content to include. + :param boolean rollback: Enable rollback on create failure. + :param boolean wait: Whether to wait for the delete to finish. + :param int timeout: Stack create timeout in seconds. + :param environment_files: Paths to environment files to apply. + + Other arguments will be passed as stack parameters which will take + precedence over any parameters specified in the environments. + + Only one of template_file, template_url, template_object should be + specified. + + :returns: a dict containing the stack description + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call + """ + params = dict( + tags=tags, + is_rollback_disabled=not rollback, + timeout_mins=timeout // 60, + parameters=parameters, + ) + params.update( + self.orchestration.read_env_and_templates( + template_file=template_file, + template_url=template_url, + template_object=template_object, + files=files, + environment_files=environment_files, + ) + ) + self.orchestration.create_stack(name=name, **params) + if wait: + event_utils.poll_for_events(self, stack_name=name, action='CREATE') + return self.get_stack(name) + + def update_stack( + self, + name_or_id, + template_file=None, + template_url=None, + template_object=None, + files=None, + rollback=True, + tags=None, + wait=False, + timeout=3600, + environment_files=None, + **parameters, + ): + """Update a stack. + + :param string name_or_id: Name or ID of the stack to update. + :param string template_file: Path to the template. + :param string template_url: URL of template. + :param string template_object: URL to retrieve template object. + :param dict files: dict of additional file content to include. + :param boolean rollback: Enable rollback on update failure. + :param boolean wait: Whether to wait for the delete to finish. + :param int timeout: Stack update timeout in seconds. + :param environment_files: Paths to environment files to apply. + + Other arguments will be passed as stack parameters which will take + precedence over any parameters specified in the environments. + + Only one of template_file, template_url, template_object should be + specified. + + :returns: a dict containing the stack description + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API calls + """ + params = dict( + tags=tags, + is_rollback_disabled=not rollback, + timeout_mins=timeout // 60, + parameters=parameters, + ) + params.update( + self.orchestration.read_env_and_templates( + template_file=template_file, + template_url=template_url, + template_object=template_object, + files=files, + environment_files=environment_files, + ) + ) + if wait: + # find the last event to use as the marker + events = event_utils.get_events( + self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1} + ) + marker = events[0].id if events else None + + # Not to cause update of ID field pass stack as dict + self.orchestration.update_stack(stack={'id': name_or_id}, **params) + + if wait: + event_utils.poll_for_events( + self, name_or_id, action='UPDATE', marker=marker + ) + return self.get_stack(name_or_id) + + def delete_stack(self, name_or_id, wait=False): + """Delete a stack + + :param string name_or_id: Stack name or ID. + :param boolean wait: Whether to wait for the delete to finish + + :returns: True if delete succeeded, False if the stack was not found. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call + """ + stack = self.get_stack(name_or_id, resolve_outputs=False) + if stack is None: + self.log.debug("Stack %s not found for deleting", name_or_id) + return False + + if wait: + # find the last event to use as the marker + events = event_utils.get_events( + self, name_or_id, event_args={'sort_dir': 'desc', 'limit': 1} + ) + marker = events[0].id if events else None + + self.orchestration.delete_stack(stack) + + if wait: + try: + event_utils.poll_for_events( + self, stack_name=name_or_id, action='DELETE', marker=marker + ) + except exceptions.HttpException: + pass + stack = self.get_stack(name_or_id, resolve_outputs=False) + if stack and stack['stack_status'] == 'DELETE_FAILED': + raise exceptions.SDKException( + "Failed to delete stack {id}: {reason}".format( + id=name_or_id, reason=stack['stack_status_reason'] + ) + ) + + return True + + def search_stacks(self, name_or_id=None, filters=None): + """Search stacks. + + :param name_or_id: Name or ID of the desired stack. + :param filters: a dict containing additional filters to use. e.g. + {'stack_status': 'CREATE_COMPLETE'} + + :returns: a list of ``openstack.orchestration.v1.stack.Stack`` + containing the stack description. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + stacks = self.list_stacks() + return _utils._filter_list(stacks, name_or_id, filters) + + def list_stacks(self, **query): + """List all stacks. + + :param dict query: Query parameters to limit stacks. + + :returns: a list of :class:`openstack.orchestration.v1.stack.Stack` + objects containing the stack description. + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call. + """ + return list(self.orchestration.stacks(**query)) + + def get_stack(self, name_or_id, filters=None, resolve_outputs=True): + """Get exactly one stack. + + :param name_or_id: Name or ID of the desired stack. + :param filters: a dict containing additional filters to use. e.g. + {'stack_status': 'CREATE_COMPLETE'} + :param resolve_outputs: If True, then outputs for this + stack will be resolved + + :returns: a :class:`openstack.orchestration.v1.stack.Stack` + containing the stack description + :raises: :class:`~openstack.exceptions.SDKException` if something goes + wrong during the OpenStack API call or if multiple matches are + found. + """ + + def _search_one_stack(name_or_id=None, filters=None): + # stack names are mandatory and enforced unique in the project + # so a StackGet can always be used for name or ID. + try: + stack = self.orchestration.find_stack( + name_or_id, + ignore_missing=False, + resolve_outputs=resolve_outputs, + ) + if stack.status == 'DELETE_COMPLETE': + return [] + except exceptions.NotFoundException: + return [] + return _utils._filter_list([stack], name_or_id, filters) + + return _utils._get_entity(self, _search_one_stack, name_or_id, filters) diff --git a/openstack/cloud/_shared_file_system.py b/openstack/cloud/_shared_file_system.py new file mode 100644 index 0000000000..7f552d46d7 --- /dev/null +++ b/openstack/cloud/_shared_file_system.py @@ -0,0 +1,22 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.cloud import openstackcloud + + +class SharedFileSystemCloudMixin(openstackcloud._OpenStackCloudMixin): + def list_share_availability_zones(self): + """List all availability zones for the Shared File Systems service. + + :returns: A list of Shared File Systems Availability Zones. + """ + return list(self.share.availability_zones()) diff --git a/openstack/cloud/_utils.py b/openstack/cloud/_utils.py new file mode 100644 index 0000000000..38f2e075a4 --- /dev/null +++ b/openstack/cloud/_utils.py @@ -0,0 +1,501 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import contextlib +import fnmatch +import inspect +import ipaddress +import re +import socket +import uuid +import warnings + +from decorator import decorator +import jmespath +import psutil + +from openstack import _log +from openstack import exceptions +from openstack import warnings as os_warnings + + +def _dictify_resource(resource): + if isinstance(resource, list): + return [_dictify_resource(r) for r in resource] + else: + if hasattr(resource, 'toDict'): + return resource.toDict() + else: + return resource + + +def _filter_list(data, name_or_id, filters): + """Filter a list by name/ID and arbitrary meta data. + + :param list data: The list of dictionary data to filter. It is expected + that each dictionary contains an 'id' and 'name' key if a value for + name_or_id is given. + :param string name_or_id: The name or ID of the entity being filtered. Can + be a glob pattern, such as 'nb01*'. + :param filters: A dictionary of meta data to use for further filtering. + Elements of this dictionary may, themselves, be dictionaries. Example:: + + {'last_name': 'Smith', 'other': {'gender': 'Female'}} + + OR + + A string containing a jmespath expression for further filtering. + Invalid filters will be ignored. + """ + # The logger is openstack.cloud.fmmatch to allow a user/operator to + # configure logging not to communicate about fnmatch misses + # (they shouldn't be too spammy, but one never knows) + log = _log.setup_logging('openstack.fnmatch') + if name_or_id: + # name_or_id might already be unicode + name_or_id = str(name_or_id) + identifier_matches = [] + bad_pattern = False + try: + fn_reg = re.compile(fnmatch.translate(name_or_id)) + except re.error: + # If the fnmatch re doesn't compile, then we don't care, + # but log it in case the user DID pass a pattern but did + # it poorly and wants to know what went wrong with their + # search + fn_reg = None + for e in data: + e_id = str(e.get('id', None)) + e_name = str(e.get('name', None)) + + if (e_id and e_id == name_or_id) or ( + e_name and e_name == name_or_id + ): + identifier_matches.append(e) + else: + # Only try fnmatch if we don't match exactly + if not fn_reg: + # If we don't have a pattern, skip this, but set the flag + # so that we log the bad pattern + bad_pattern = True + continue + if (e_id and fn_reg.match(e_id)) or ( + e_name and fn_reg.match(e_name) + ): + identifier_matches.append(e) + if not identifier_matches and bad_pattern: + log.debug("Bad pattern passed to fnmatch", exc_info=True) + data = identifier_matches + + if not filters: + return data + + if isinstance(filters, str): + warnings.warn( + 'Support for jmespath-style filters is deprecated and will be ' + 'removed in a future release. Consider using dictionary-style ' + 'filters instead.', + os_warnings.RemovedInSDK60Warning, + ) + return jmespath.search(filters, data) + + def _dict_filter(f, d): + if not d: + return False + for key in f.keys(): + if key not in d: + log.warning( + "Invalid filter: %s is not an attribute of %s.%s", + key, + e.__class__.__module__, + e.__class__.__qualname__, + ) + # we intentionally skip this since the user was trying to + # filter on _something_, but we don't know what that + # _something_ was + raise AttributeError(key) + if isinstance(f[key], dict): + if not _dict_filter(f[key], d.get(key, None)): + return False + elif d.get(key, None) != f[key]: + return False + return True + + filtered = [] + for e in data: + if _dict_filter(filters, e): + filtered.append(e) + return filtered + + +def _get_entity(cloud, resource, name_or_id, filters, **kwargs): + """Return a single entity from the list returned by a given method. + + :param object cloud: The controller class (Example: the main OpenStackCloud + object). + :param string or callable resource: The string that identifies the resource + to use to lookup the get_<>_by_id or search_s methods + (Example: network) or a callable to invoke. + :param string name_or_id: The name or ID of the entity being filtered or an + object or dict. If this is an object/dict with an 'id' attr/key, we + return it and bypass resource lookup. + :param filters: A dictionary of meta data to use for further filtering. + OR A string containing a jmespath expression for further filtering. + Example:: "[?last_name==`Smith`] | [?other.gender]==`Female`]" + """ + + # Sometimes in the control flow of openstacksdk, we already have an object + # fetched. Rather than then needing to pull the name or id out of that + # object, pass it in here and rely on caching to prevent us from making + # an additional call, it's simple enough to test to see if we got an + # object and just short-circuit return it. + + if hasattr(name_or_id, 'id') or ( + isinstance(name_or_id, dict) and 'id' in name_or_id + ): + return name_or_id + + # If a uuid is passed short-circuit it calling the + # get__by_id method + if getattr(cloud, 'use_direct_get', False) and _is_uuid_like(name_or_id): + get_resource = getattr(cloud, f'get_{resource}_by_id', None) + if get_resource: + return get_resource(name_or_id) + + search = ( + resource + if callable(resource) + else getattr(cloud, f'search_{resource}s', None) + ) + if search: + entities = search(name_or_id, filters, **kwargs) + if entities: + if len(entities) > 1: + raise exceptions.SDKException( + f"Multiple matches found for {name_or_id}" + ) + return entities[0] + return None + + +def localhost_supports_ipv6(): + """Determine whether the local host supports IPv6 + + We look for the all ip addresses configured to this node, and assume that + if any of these is IPv6 address (but not loopback or link local), this host + has IPv6 connectivity. + """ + + for ifname, if_addrs in psutil.net_if_addrs().items(): + for if_addr in if_addrs: + if if_addr.family != socket.AF_INET6: + continue + addr = ipaddress.ip_address(if_addr.address) + if not addr.is_link_local and not addr.is_loopback: + return True + return False + + +def valid_kwargs(*valid_args): + # This decorator checks if argument passed as **kwargs to a function are + # present in valid_args. + # + # Typically, valid_kwargs is used when we want to distinguish between + # None and omitted arguments and we still want to validate the argument + # list. + # + # Example usage: + # + # @valid_kwargs('opt_arg1', 'opt_arg2') + # def my_func(self, mandatory_arg1, mandatory_arg2, **kwargs): + # ... + # + @decorator + def func_wrapper(func, *args, **kwargs): + argspec = inspect.getfullargspec(func) + for k in kwargs: + if k not in argspec.args[1:] and k not in valid_args: + raise TypeError( + f"{inspect.stack()[1][3]}() got an unexpected keyword " + f"argument '{k}'" + ) + return func(*args, **kwargs) + + return func_wrapper + + +@contextlib.contextmanager +def openstacksdk_exceptions(error_message=None): + """Context manager for dealing with openstack exceptions. + + :param string error_message: String to use for the exception message + content on non-SDKException exception. + + Useful for avoiding wrapping SDKException exceptions + within themselves. Code called from within the context may throw such + exceptions without having to catch and reraise them. + + Non-SDKException exceptions thrown within the context will + be wrapped and the exception message will be appended to the given + error message. + """ + try: + yield + except exceptions.SDKException: + raise + except Exception as e: + if error_message is None: + error_message = str(e) + raise exceptions.SDKException(error_message) + + +def safe_dict_min(key, data): + """Safely find the minimum for a given key in a list of dict objects. + + This will find the minimum integer value for specific dictionary key + across a list of dictionaries. The values for the given key MUST be + integers, or string representations of an integer. + + The dictionary key does not have to be present in all (or any) + of the elements/dicts within the data set. + + :param string key: The dictionary key to search for the minimum value. + :param list data: List of dicts to use for the data set. + + :returns: None if the field was not found in any elements, or + the minimum value for the field otherwise. + """ + min_value = None + for d in data: + if (key in d) and (d[key] is not None): + try: + val = int(d[key]) + except ValueError: + raise exceptions.SDKException( + "Search for minimum value failed. " + f"Value for {key} is not an integer: {d[key]}" + ) + if (min_value is None) or (val < min_value): + min_value = val + return min_value + + +def safe_dict_max(key, data): + """Safely find the maximum for a given key in a list of dict objects. + + This will find the maximum integer value for specific dictionary key + across a list of dictionaries. The values for the given key MUST be + integers, or string representations of an integer. + + The dictionary key does not have to be present in all (or any) + of the elements/dicts within the data set. + + :param string key: The dictionary key to search for the maximum value. + :param list data: List of dicts to use for the data set. + + :returns: None if the field was not found in any elements, or + the maximum value for the field otherwise. + """ + max_value = None + for d in data: + if (key in d) and (d[key] is not None): + try: + val = int(d[key]) + except ValueError: + raise exceptions.SDKException( + "Search for maximum value failed. " + f"Value for {key} is not an integer: {d[key]}" + ) + if (max_value is None) or (val > max_value): + max_value = val + return max_value + + +def parse_range(value): + """Parse a numerical range string. + + Breakdown a range expression into its operater and numerical parts. + This expression must be a string. Valid values must be an integer string, + optionally preceeded by one of the following operators:: + + - "<" : Less than + - ">" : Greater than + - "<=" : Less than or equal to + - ">=" : Greater than or equal to + + Some examples of valid values and function return values:: + + - "1024" : returns (None, 1024) + - "<5" : returns ("<", 5) + - ">=100" : returns (">=", 100) + + :param string value: The range expression to be parsed. + + :returns: A tuple with the operator string (or None if no operator + was given) and the integer value. None is returned if parsing failed. + """ + if value is None: + return None + + range_exp = re.match(r'(<|>|<=|>=){0,1}(\d+)$', value) + if range_exp is None: + return None + + op = range_exp.group(1) + num = int(range_exp.group(2)) + return (op, num) + + +def range_filter(data, key, range_exp): + """Filter a list by a single range expression. + + :param list data: List of dictionaries to be searched. + :param string key: Key name to search within the data set. + :param string range_exp: The expression describing the range of values. + + :returns: A list subset of the original data set. + :raises: :class:`~openstack.exceptions.SDKException` on invalid range + expressions. + """ + filtered = [] + range_exp = str(range_exp).upper() + + if range_exp == "MIN": + key_min = safe_dict_min(key, data) + if key_min is None: + return [] + for d in data: + if int(d[key]) == key_min: + filtered.append(d) + return filtered + elif range_exp == "MAX": + key_max = safe_dict_max(key, data) + if key_max is None: + return [] + for d in data: + if int(d[key]) == key_max: + filtered.append(d) + return filtered + + # Not looking for a min or max, so a range or exact value must + # have been supplied. + val_range = parse_range(range_exp) + + # If parsing the range fails, it must be a bad value. + if val_range is None: + raise exceptions.SDKException(f"Invalid range value: {range_exp}") + + op = val_range[0] + if op: + # Range matching + for d in data: + d_val = int(d[key]) + if op == '<': + if d_val < val_range[1]: + filtered.append(d) + elif op == '>': + if d_val > val_range[1]: + filtered.append(d) + elif op == '<=': + if d_val <= val_range[1]: + filtered.append(d) + elif op == '>=': + if d_val >= val_range[1]: + filtered.append(d) + return filtered + else: + # Exact number match + for d in data: + if int(d[key]) == val_range[1]: + filtered.append(d) + return filtered + + +def generate_patches_from_kwargs(operation, **kwargs): + """Given a set of parameters, returns a list with the + valid patch values. + + :param string operation: The operation to perform. + :param list kwargs: Dict of parameters. + + :returns: A list with the right patch values. + """ + patches = [] + for k, v in kwargs.items(): + patch = {'op': operation, 'value': v, 'path': f'/{k}'} + patches.append(patch) + return sorted(patches) + + +class FileSegment: + """File-like object to pass to requests.""" + + def __init__(self, filename, offset, length): + self.filename = filename + self.offset = offset + self.length = length + self.pos = 0 + self._file = open(filename, 'rb') + self.seek(0) + + def tell(self): + return self._file.tell() - self.offset + + def seek(self, offset, whence=0): + if whence == 0: + self._file.seek(self.offset + offset, whence) + elif whence == 1: + self._file.seek(offset, whence) + elif whence == 2: + self._file.seek(self.offset + self.length - offset, 0) + + def read(self, size=-1): + remaining = self.length - self.pos + if remaining <= 0: + return b'' + + to_read = remaining if size < 0 else min(size, remaining) + chunk = self._file.read(to_read) + self.pos += len(chunk) + + return chunk + + def reset(self): + self._file.seek(self.offset, 0) + + +def _format_uuid_string(string): + return ( + string.replace('urn:', '') + .replace('uuid:', '') + .strip('{}') + .replace('-', '') + .lower() + ) + + +def _is_uuid_like(val): + """Returns validation of a value as a UUID. + + :param val: Value to verify + :type val: string + :returns: bool + + .. versionchanged:: 1.1.1 + Support non-lowercase UUIDs. + """ + try: + return str(uuid.UUID(val)).replace('-', '') == _format_uuid_string(val) + except (TypeError, ValueError, AttributeError): + return False diff --git a/openstack/telemetry/alarm/v2/__init__.py b/openstack/cloud/cmd/__init__.py similarity index 100% rename from openstack/telemetry/alarm/v2/__init__.py rename to openstack/cloud/cmd/__init__.py diff --git a/openstack/cloud/cmd/inventory.py b/openstack/cloud/cmd/inventory.py new file mode 100644 index 0000000000..06e027cba4 --- /dev/null +++ b/openstack/cloud/cmd/inventory.py @@ -0,0 +1,83 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import argparse +import json +import sys + +import yaml + +import openstack.cloud +import openstack.cloud.inventory +from openstack import exceptions + + +def output_format_dict(data, use_yaml): + if use_yaml: + return yaml.safe_dump(data, default_flow_style=False) + else: + return json.dumps(data, sort_keys=True, indent=2) + + +def parse_args(): + parser = argparse.ArgumentParser(description='OpenStack Inventory Module') + parser.add_argument( + '--refresh', action='store_true', help='Refresh cached information' + ) + group = parser.add_mutually_exclusive_group(required=True) + group.add_argument( + '--list', action='store_true', help='List active servers' + ) + group.add_argument('--host', help='List details about the specific host') + parser.add_argument( + '--private', + action='store_true', + default=False, + help='Use private IPs for interface_ip', + ) + parser.add_argument( + '--cloud', default=None, help='Return data for one cloud only' + ) + parser.add_argument( + '--yaml', + action='store_true', + default=False, + help='Output data in nicely readable yaml', + ) + parser.add_argument( + '--debug', + action='store_true', + default=False, + help='Enable debug output', + ) + return parser.parse_args() + + +def main(): + args = parse_args() + try: + openstack.enable_logging(debug=args.debug) + inventory = openstack.cloud.inventory.OpenStackInventory( + refresh=args.refresh, private=args.private, cloud=args.cloud + ) + if args.list: + output = inventory.list_hosts() + elif args.host: + output = inventory.get_host(args.host) + print(output_format_dict(output, args.yaml)) + except exceptions.SDKException as e: + sys.stderr.write(e.message + '\n') + sys.exit(1) + sys.exit(0) diff --git a/openstack/cloud/exc.py b/openstack/cloud/exc.py new file mode 100644 index 0000000000..986a1873c9 --- /dev/null +++ b/openstack/cloud/exc.py @@ -0,0 +1,43 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack import exceptions + +OpenStackCloudException = exceptions.SDKException + + +class OpenStackCloudUnavailableExtension(OpenStackCloudException): + pass + + +class OpenStackCloudUnavailableFeature(OpenStackCloudException): + pass + + +# Backwards compat. These are deprecated and should not be used in new code. +class OpenStackCloudCreateException(OpenStackCloudException): + def __init__(self, resource, resource_id, extra_data=None, **kwargs): + super().__init__( + message=f"Error creating {resource}: {resource_id}", + extra_data=extra_data, + **kwargs, + ) + self.resource_id = resource_id + + +OpenStackCloudTimeout = exceptions.ResourceTimeout +OpenStackCloudHTTPError = exceptions.HttpException +OpenStackCloudBadRequest = exceptions.BadRequestException +OpenStackCloudURINotFound = exceptions.NotFoundException +OpenStackCloudResourceNotFound = OpenStackCloudURINotFound diff --git a/openstack/cloud/inventory.py b/openstack/cloud/inventory.py new file mode 100644 index 0000000000..2c4909a6b0 --- /dev/null +++ b/openstack/cloud/inventory.py @@ -0,0 +1,93 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import functools + +from openstack.cloud import _utils +from openstack.config import loader +from openstack import connection +from openstack import exceptions + +__all__ = ['OpenStackInventory'] + + +class OpenStackInventory: + # Put this here so the capability can be detected with hasattr on the class + extra_config = None + + def __init__( + self, + config_files=None, + refresh=False, + private=False, + config_key=None, + config_defaults=None, + cloud=None, + use_direct_get=False, + ): + if config_files is None: + config_files = [] + config = loader.OpenStackConfig( + config_files=loader.CONFIG_FILES + config_files + ) + self.extra_config = config.get_extra_config( + config_key, config_defaults + ) + + if cloud is None: + self.clouds = [ + connection.Connection(config=cloud_region) + for cloud_region in config.get_all() + ] + else: + self.clouds = [connection.Connection(config=config.get_one(cloud))] + + if private: + for cloud in self.clouds: + cloud.private = True + + # Handle manual invalidation of entire persistent cache + if refresh: + for cloud in self.clouds: + cloud._cache.invalidate() + + def list_hosts( + self, expand=True, fail_on_cloud_config=True, all_projects=False + ): + hostvars = [] + + for cloud in self.clouds: + try: + # Cycle on servers + for server in cloud.list_servers( + detailed=expand, all_projects=all_projects + ): + hostvars.append(server) + except exceptions.SDKException: + # Don't fail on one particular cloud as others may work + if fail_on_cloud_config: + raise + + return hostvars + + def search_hosts(self, name_or_id=None, filters=None, expand=True): + hosts = self.list_hosts(expand=expand) + return _utils._filter_list(hosts, name_or_id, filters) + + def get_host(self, name_or_id, filters=None, expand=True): + if expand: + func = self.search_hosts + else: + func = functools.partial(self.search_hosts, expand=False) + return _utils._get_entity(self, func, name_or_id, filters) diff --git a/openstack/cloud/meta.py b/openstack/cloud/meta.py new file mode 100644 index 0000000000..2080e35723 --- /dev/null +++ b/openstack/cloud/meta.py @@ -0,0 +1,632 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import ipaddress +import socket + +from openstack import _log +from openstack import exceptions +from openstack import utils + + +NON_CALLABLES = (str, bool, dict, int, float, list, type(None)) + + +def find_nova_interfaces( + addresses, ext_tag=None, key_name=None, version=4, mac_addr=None +): + ret = [] + for k, v in iter(addresses.items()): + if key_name is not None and k != key_name: + # key_name is specified and it doesn't match the current network. + # Continue with the next one + continue + + for interface_spec in v: + if ext_tag is not None: + if 'OS-EXT-IPS:type' not in interface_spec: + # ext_tag is specified, but this interface has no tag + # We could actually return right away as this means that + # this cloud doesn't support OS-EXT-IPS. Nevertheless, + # it would be better to perform an explicit check + # but this needs cloud to be passed to this function. + continue + elif interface_spec['OS-EXT-IPS:type'] != ext_tag: + # Type doesn't match, continue with next one + continue + + if mac_addr is not None: + if 'OS-EXT-IPS-MAC:mac_addr' not in interface_spec: + # mac_addr is specified, but this interface has no mac_addr + # We could actually return right away as this means that + # this cloud doesn't support OS-EXT-IPS-MAC. Nevertheless, + # it would be better to perform an explicit check + # but this needs cloud to be passed to this function. + continue + elif interface_spec['OS-EXT-IPS-MAC:mac_addr'] != mac_addr: + # MAC doesn't match, continue with next one + continue + + if interface_spec['version'] == version: + ret.append(interface_spec) + return ret + + +def find_nova_addresses( + addresses, ext_tag=None, key_name=None, version=4, mac_addr=None +): + interfaces = find_nova_interfaces( + addresses, ext_tag, key_name, version, mac_addr + ) + floating_addrs = [] + fixed_addrs = [] + for i in interfaces: + if i.get('OS-EXT-IPS:type') == 'floating': + floating_addrs.append(i['addr']) + else: + fixed_addrs.append(i['addr']) + return floating_addrs + fixed_addrs + + +def get_server_ip(server, public=False, cloud_public=True, **kwargs): + """Get an IP from the Nova addresses dict + + :param server: The server to pull the address from + :param public: Whether the address we're looking for should be considered + 'public' and therefore reachabiliity tests should be + used. (defaults to False) + :param cloud_public: Whether the cloud has been configured to use private + IPs from servers as the interface_ip. This inverts the + public reachability logic, as in this case it's the + private ip we expect shade to be able to reach + """ + addrs = find_nova_addresses(server['addresses'], **kwargs) + return find_best_address(addrs, public=public, cloud_public=cloud_public) + + +def get_server_private_ip(server, cloud=None): + """Find the private IP address + + If Neutron is available, search for a port on a network where + `router:external` is False and `shared` is False. This combination + indicates a private network with private IP addresses. This port should + have the private IP. + + If Neutron is not available, or something goes wrong communicating with it, + as a fallback, try the list of addresses associated with the server dict, + looking for an IP type tagged as 'fixed' in the network named 'private'. + + Last resort, ignore the IP type and just look for an IP on the 'private' + network (e.g., Rackspace). + """ + if cloud and not cloud.use_internal_network(): + return None + + # Try to get a floating IP interface. If we have one then return the + # private IP address associated with that floating IP for consistency. + fip_ints = find_nova_interfaces(server['addresses'], ext_tag='floating') + fip_mac = None + if fip_ints: + fip_mac = fip_ints[0].get('OS-EXT-IPS-MAC:mac_addr') + + # Short circuit the ports/networks search below with a heavily cached + # and possibly pre-configured network name + if cloud: + int_nets = cloud.get_internal_ipv4_networks() + for int_net in int_nets: + int_ip = get_server_ip( + server, + key_name=int_net['name'], + ext_tag='fixed', + cloud_public=not cloud.private, + mac_addr=fip_mac, + ) + if int_ip is not None: + return int_ip + # Try a second time without the fixed tag. This is for old nova-network + # results that do not have the fixed/floating tag. + for int_net in int_nets: + int_ip = get_server_ip( + server, + key_name=int_net['name'], + cloud_public=not cloud.private, + mac_addr=fip_mac, + ) + if int_ip is not None: + return int_ip + + ip = get_server_ip( + server, ext_tag='fixed', key_name='private', mac_addr=fip_mac + ) + if ip: + return ip + + # Last resort, and Rackspace + return get_server_ip(server, key_name='private') + + +def get_server_external_ipv4(cloud, server): + """Find an externally routable IP for the server. + + There are 5 different scenarios we have to account for: + + * Cloud has externally routable IP from neutron but neutron APIs don't + work (only info available is in nova server record) (rackspace) + * Cloud has externally routable IP from neutron (runabove, ovh) + * Cloud has externally routable IP from neutron AND supports optional + private tenant networks (vexxhost, unitedstack) + * Cloud only has private tenant network provided by neutron and requires + floating-ip for external routing (dreamhost, hp) + * Cloud only has private tenant network provided by nova-network and + requires floating-ip for external routing (auro) + + :param cloud: the cloud we're working with + :param server: the server dict from which we want to get an IPv4 address + :return: a string containing the IPv4 address or None + """ + + if not cloud.use_external_network(): + return None + + if server['accessIPv4']: + return server['accessIPv4'] + + # Short circuit the ports/networks search below with a heavily cached + # and possibly pre-configured network name + ext_nets = cloud.get_external_ipv4_networks() + for ext_net in ext_nets: + ext_ip = get_server_ip( + server, + key_name=ext_net['name'], + public=True, + cloud_public=not cloud.private, + ) + if ext_ip is not None: + return ext_ip + + # Try to get a floating IP address + # Much as I might find floating IPs annoying, if it has one, that's + # almost certainly the one that wants to be used + ext_ip = get_server_ip( + server, ext_tag='floating', public=True, cloud_public=not cloud.private + ) + if ext_ip is not None: + return ext_ip + + # The cloud doesn't support Neutron or Neutron can't be contacted. The + # server might have fixed addresses that are reachable from outside the + # cloud (e.g. Rax) or have plain ol' floating IPs + + # Try to get an address from a network named 'public' + ext_ip = get_server_ip( + server, key_name='public', public=True, cloud_public=not cloud.private + ) + if ext_ip is not None: + return ext_ip + + # Nothing else works, try to find a globally routable IP address + for interfaces in server['addresses'].values(): + for interface in interfaces: + try: + ip = ipaddress.ip_address(interface['addr']) + except Exception: # noqa: S112 + # Skip any error, we're looking for a working ip - if the + # cloud returns garbage, it wouldn't be the first weird thing + # but it still doesn't meet the requirement of "be a working + # ip address" + continue + if ip.version == 4 and not ip.is_private: + return str(ip) + + return None + + +def find_best_address(addresses, public=False, cloud_public=True): + do_check = public == cloud_public + if not addresses: + return None + if len(addresses) == 1: + return addresses[0] + if len(addresses) > 1 and do_check: + # We only want to do this check if the address is supposed to be + # reachable. Otherwise we're just debug log spamming on every listing + # of private ip addresses + for address in addresses: + try: + for count in utils.iterate_timeout( + 5, f"Timeout waiting for {address}", wait=0.1 + ): + # Return the first one that is reachable + try: + for res in socket.getaddrinfo( + address, + 22, + socket.AF_UNSPEC, + socket.SOCK_STREAM, + 0, + ): + family, socktype, proto, _, sa = res + connect_socket = socket.socket( + family, socktype, proto + ) + connect_socket.settimeout(1) + connect_socket.connect(sa) + return address + except OSError: + # Sometimes a "no route to address" type error + # will fail fast, but can often come alive + # when retried. + continue + except Exception: # noqa: S110 + # This is best effort. Ignore any errors. + pass + + # Give up and return the first - none work as far as we can tell + if do_check: + log = _log.setup_logging('openstack') + log.debug( + f"The cloud returned multiple addresses ({addresses}) and we " + f"could not connect to port 22 on either. That might be what you " + f"wanted, but we have no clue what's going on, so we picked the " + f"first one {addresses[0]}" + ) + return addresses[0] + + +def get_server_external_ipv6(server): + """Get an IPv6 address reachable from outside the cloud. + + This function assumes that if a server has an IPv6 address, that address + is reachable from outside the cloud. + + :param server: the server from which we want to get an IPv6 address + :return: a string containing the IPv6 address or None + """ + # Don't return ipv6 interfaces if forcing IPv4 + if server['accessIPv6']: + return server['accessIPv6'] + addresses = find_nova_addresses(addresses=server['addresses'], version=6) + return find_best_address(addresses, public=True) + + +def get_server_default_ip(cloud, server): + """Get the configured 'default' address + + It is possible in clouds.yaml to configure for a cloud a network that + is the 'default_interface'. This is the network that should be used + to talk to instances on the network. + + :param cloud: the cloud we're working with + :param server: the server dict from which we want to get the default + IPv4 address + :return: a string containing the IPv4 address or None + """ + ext_net = cloud.get_default_network() + if ext_net: + if cloud._local_ipv6 and not cloud.force_ipv4: + # try 6 first, fall back to four + versions = [6, 4] + else: + versions = [4] + for version in versions: + ext_ip = get_server_ip( + server, + key_name=ext_net['name'], + version=version, + public=True, + cloud_public=not cloud.private, + ) + if ext_ip is not None: + return ext_ip + return None + + +def _get_interface_ip(cloud, server): + """Get the interface IP for the server + + Interface IP is the IP that should be used for communicating with the + server. It is: + - the IP on the configured default_interface network + - if cloud.private, the private ip if it exists + - if the server has a public ip, the public ip + """ + default_ip = get_server_default_ip(cloud, server) + if default_ip: + return default_ip + + if cloud.private and server['private_v4']: + return server['private_v4'] + + if server['public_v6'] and cloud._local_ipv6 and not cloud.force_ipv4: + return server['public_v6'] + else: + return server['public_v4'] + + +def get_groups_from_server(cloud, server, server_vars): + groups = [] + + # NOTE(efried): This is hardcoded to 'compute' because this method is only + # used from ComputeCloudMixin. + region = cloud.config.get_region_name('compute') + cloud_name = cloud.name + + # Create a group for the cloud + groups.append(cloud_name) + + # Create a group on region + groups.append(region) + + # And one by cloud_region + groups.append(f"{cloud_name}_{region}") + + # Check if group metadata key in servers' metadata + group = server['metadata'].get('group') + if group: + groups.append(group) + + for extra_group in server['metadata'].get('groups', '').split(','): + if extra_group: + groups.append(extra_group) + + groups.append('instance-{}'.format(server['id'])) + + for key in ('flavor', 'image'): + if 'name' in server_vars[key]: + groups.append('{}-{}'.format(key, server_vars[key]['name'])) + + for key, value in iter(server['metadata'].items()): + groups.append(f'meta-{key}_{value}') + + az = server_vars.get('az', None) + if az: + # Make groups for az, region_az and cloud_region_az + groups.append(az) + groups.append(f'{region}_{az}') + groups.append(f'{cloud.name}_{region}_{az}') + return groups + + +def expand_server_vars(cloud, server): + """Backwards compatibility function.""" + return add_server_interfaces(cloud, server) + + +def _make_address_dict(fip, port): + address = dict(version=4, addr=fip['floating_ip_address']) + address['OS-EXT-IPS:type'] = 'floating' + address['OS-EXT-IPS-MAC:mac_addr'] = port['mac_address'] + return address + + +def _get_supplemental_addresses(cloud, server): + fixed_ip_mapping = {} + for name, network in server['addresses'].items(): + for address in network: + if address['version'] == 6: + continue + if address.get('OS-EXT-IPS:type') == 'floating': + # We have a floating IP that nova knows about, do nothing + return server['addresses'] + fixed_ip_mapping[address['addr']] = name + try: + # Don't bother doing this before the server is active, it's a waste + # of an API call while polling for a server to come up + if ( + cloud.has_service('network') + and cloud._has_floating_ips() + and server['status'] == 'ACTIVE' + ): + for port in cloud.search_ports( + filters=dict(device_id=server['id']) + ): + # This SHOULD return one and only one FIP - but doing it as a + # search/list lets the logic work regardless + for fip in cloud.search_floating_ips( + filters=dict(port_id=port['id']) + ): + fixed_net = fixed_ip_mapping.get(fip['fixed_ip_address']) + if fixed_net is None: + log = _log.setup_logging('openstack') + log.debug( + "The cloud returned floating ip %(fip)s attached " + "to server %(server)s but the fixed ip associated " + "with the floating ip in the neutron listing " + "does not exist in the nova listing. Something " + "is exceptionally broken.", + dict(fip=fip['id'], server=server['id']), + ) + else: + server['addresses'][fixed_net].append( + _make_address_dict(fip, port) + ) + except exceptions.SDKException: + # If something goes wrong with a cloud call, that's cool - this is + # an attempt to provide additional data and should not block forward + # progress + pass + return server['addresses'] + + +def add_server_interfaces(cloud, server): + """Add network interface information to server. + + Query the cloud as necessary to add information to the server record + about the network information needed to interface with the server. + + Ensures that public_v4, public_v6, private_v4, private_v6, interface_ip, + accessIPv4 and accessIPv6 are always set. + """ + # First, add an IP address. Set it to '' rather than None if it does + # not exist to remain consistent with the pre-existing missing values + server['addresses'] = _get_supplemental_addresses(cloud, server) + server['public_v4'] = get_server_external_ipv4(cloud, server) or '' + # If we're forcing IPv4, then don't report IPv6 interfaces which + # are likely to be unconfigured. + if cloud.force_ipv4: + server['public_v6'] = '' + else: + server['public_v6'] = get_server_external_ipv6(server) or '' + server['private_v4'] = get_server_private_ip(server, cloud) or '' + server['interface_ip'] = _get_interface_ip(cloud, server) or '' + + # Some clouds do not set these, but they're a regular part of the Nova + # server record. Since we know them, go ahead and set them. In the case + # where they were set previous, we use the values, so this will not break + # clouds that provide the information + if cloud.private and server.private_v4: + server['access_ipv4'] = server['private_v4'] + else: + server['access_ipv4'] = server['public_v4'] + server['access_ipv6'] = server['public_v6'] + + return server + + +def expand_server_security_groups(cloud, server): + try: + groups = cloud.list_server_security_groups(server) + except exceptions.SDKException: + groups = [] + server['security_groups'] = groups or [] + + +def get_hostvars_from_server(cloud, server, mounts=None): + """Expand additional server information useful for ansible inventory. + + Variables in this function may make additional cloud queries to flesh out + possibly interesting info, making it more expensive to call than + expand_server_vars if caching is not set up. If caching is set up, + the extra cost should be minimal. + """ + server_vars = obj_to_munch(add_server_interfaces(cloud, server)) + + flavor_id = server['flavor'].get('id') + if flavor_id: + # In newer nova, the flavor record can be kept around for flavors + # that no longer exist. The id and name are not there. + flavor_name = cloud.get_flavor_name(flavor_id) + if flavor_name: + server_vars['flavor']['name'] = flavor_name + elif 'original_name' in server['flavor']: + # Users might be have code still expecting name. That name is in + # original_name. + server_vars['flavor']['name'] = server['flavor']['original_name'] + + expand_server_security_groups(cloud, server) + + # OpenStack can return image as a string when you've booted from volume + if str(server['image']) == server['image']: + image_id = server['image'] + server_vars['image'] = dict(id=image_id) + else: + image_id = server['image'].get('id', None) + if image_id: + image_name = cloud.get_image_name(image_id) + if image_name: + server_vars['image']['name'] = image_name + + # During the switch to returning sdk resource objects we need temporarily + # to force convertion to dict. This will be dropped soon. + if hasattr(server_vars['image'], 'to_dict'): + server_vars['image'] = server_vars['image'].to_dict(computed=False) + + volumes = [] + if cloud.has_service('volume'): + try: + for volume in cloud.get_volumes(server): + # Make things easier to consume elsewhere + volume['device'] = volume['attachments'][0]['device'] + volumes.append(volume) + except exceptions.SDKException: + pass + server_vars['volumes'] = volumes + if mounts: + for mount in mounts: + for vol in server_vars['volumes']: + if vol['display_name'] == mount['display_name']: + if 'mount' in mount: + vol['mount'] = mount['mount'] + + return server_vars + + +def obj_to_munch(obj): + """Turn an object with attributes into a dict suitable for serializing. + + Some of the things that are returned in OpenStack are objects with + attributes. That's awesome - except when you want to expose them as JSON + structures. We use this as the basis of get_hostvars_from_server above so + that we can just have a plain dict of all of the values that exist in the + nova metadata for a server. + """ + if obj is None: + return None + elif isinstance(obj, utils.Munch) or hasattr(obj, 'mock_add_spec'): + # If we obj_to_munch twice, don't fail, just return the munch + # Also, don't try to modify Mock objects - that way lies madness + return obj + elif isinstance(obj, dict): + # The new request-id tracking spec: + # https://specs.openstack.org/openstack/nova-specs/specs/juno/approved/log-request-id-mappings.html + # adds a request-ids attribute to returned objects. It does this even + # with dicts, which now become dict subclasses. So we want to convert + # the dict we get, but we also want it to fall through to object + # attribute processing so that we can also get the request_ids + # data into our resulting object. + instance = utils.Munch(obj) + else: + instance = utils.Munch() + + for key in dir(obj): + try: + value = getattr(obj, key) + # some attributes can be defined as a @property, so we can't assure + # to have a valid value + # e.g. id in python-novaclient/tree/novaclient/v2/quotas.py + except AttributeError: + continue + if isinstance(value, NON_CALLABLES) and not key.startswith('_'): + instance[key] = value + return instance + + +obj_to_dict = obj_to_munch + + +def obj_list_to_munch(obj_list): + """Enumerate through lists of objects and return lists of dictonaries. + + Some of the objects returned in OpenStack are actually lists of objects, + and in order to expose the data structures as JSON, we need to facilitate + the conversion to lists of dictonaries. + """ + return [obj_to_munch(obj) for obj in obj_list] + + +obj_list_to_dict = obj_list_to_munch + + +def get_and_munchify(key, data): + """Get the value associated to key and convert it. + + The value will be converted in a Munch object or a list of Munch objects + based on the type + """ + result = data.get(key, []) if key else data + if isinstance(result, list): + return obj_list_to_munch(result) + elif isinstance(result, dict): + return obj_to_munch(result) + return result diff --git a/openstack/cloud/openstackcloud.py b/openstack/cloud/openstackcloud.py new file mode 100644 index 0000000000..7fac057f0d --- /dev/null +++ b/openstack/cloud/openstackcloud.py @@ -0,0 +1,812 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import atexit +import concurrent.futures +import copy +import functools +import queue +import types +import typing as ty +import warnings +import weakref + +import dogpile.cache +import keystoneauth1.exceptions +from keystoneauth1.identity import base as ks_plugin_base +import requests.models +import typing_extensions as ty_ext +import urllib3.exceptions + +from openstack import _log +from openstack import _services_mixin +from openstack.cloud import _utils +from openstack.cloud import meta +from openstack import config as cloud_config +from openstack.config import cloud_region +from openstack import exceptions +from openstack import proxy +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + +if ty.TYPE_CHECKING: + from dogpile.cache import region as cache_region + from keystoneauth1.access import service_catalog as ks_service_catalog + from keystoneauth1 import session as ks_session + from oslo_config import cfg + + from openstack import service_description + + +class _OpenStackCloudMixin(_services_mixin.ServicesMixin): + """Represent a connection to an OpenStack Cloud. + + OpenStackCloud is the entry point for all cloud operations, regardless + of which OpenStack service those operations may ultimately come from. + The operations on an OpenStackCloud are resource oriented rather than + REST API operation oriented. For instance, one will request a Floating IP + and that Floating IP will be actualized either via neutron or via nova + depending on how this particular cloud has decided to arrange itself. + """ + + _OBJECT_MD5_KEY = 'x-sdk-md5' + _OBJECT_SHA256_KEY = 'x-sdk-sha256' + _OBJECT_AUTOCREATE_KEY = 'x-sdk-autocreated' + _OBJECT_AUTOCREATE_CONTAINER = 'images' + + # NOTE(shade) shade keys were x-object-meta-x-shade-md5 - we need to check + # those in freshness checks so that a shade->sdk transition + # doesn't result in a re-upload + _SHADE_OBJECT_MD5_KEY = 'x-object-meta-x-shade-md5' + _SHADE_OBJECT_SHA256_KEY = 'x-object-meta-x-shade-sha256' + _SHADE_OBJECT_AUTOCREATE_KEY = 'x-object-meta-x-shade-autocreated' + + config: cloud_region.CloudRegion + + cache_enabled: bool + _cache_expirations: dict[str, int] + _cache: 'cache_region.CacheRegion' + + verify: bool | str | None + cert: str | tuple[str, str] | None + + def __init__( + self, + cloud: str | None = None, + config: cloud_region.CloudRegion | None = None, + session: ty.Optional['ks_session.Session'] = None, + app_name: str | None = None, + app_version: str | None = None, + extra_services: list['service_description.ServiceDescription'] + | None = None, + strict: bool = False, + use_direct_get: bool | None = None, + task_manager: ty.Any = None, + rate_limit: float | dict[str, float] | None = None, + oslo_conf: ty.Optional['cfg.ConfigOpts'] = None, + service_types: list[str] | None = None, + global_request_id: str | None = None, + strict_proxies: bool = False, + pool_executor: concurrent.futures.Executor | None = None, + **kwargs: ty.Any, + ) -> None: + """Create a connection to a cloud. + + A connection needs information about how to connect, how to + authenticate and how to select the appropriate services to use. + + The recommended way to provide this information is by referencing + a named cloud config from an existing `clouds.yaml` file. The cloud + name ``envvars`` may be used to consume a cloud configured via ``OS_`` + environment variables. + + A pre-existing :class:`~openstack.config.cloud_region.CloudRegion` + object can be passed in lieu of a cloud name, for cases where the user + already has a fully formed CloudRegion and just wants to use it. + + Similarly, if for some reason the user already has a + :class:`~keystoneauth1.session.Session` and wants to use it, it may be + passed in. + + :param str cloud: Name of the cloud from config to use. + :param config: CloudRegion object representing the config for the + region of the cloud in question. + :type config: :class:`~openstack.config.cloud_region.CloudRegion` + :param session: A session object compatible with + :class:`~keystoneauth1.session.Session`. + :type session: :class:`~keystoneauth1.session.Session` + :param str app_name: Name of the application to be added to User Agent. + :param str app_version: Version of the application to be added to + User Agent. + :param extra_services: List of + :class:`~openstack.service_description.ServiceDescription` + objects describing services that openstacksdk otherwise does not + know about. + :param bool use_direct_get: + For get methods, make specific REST calls for server-side + filtering instead of making list calls and filtering client-side. + Default false. + :param task_manager: + Ignored. Exists for backwards compat during transition. Rate limit + parameters should be passed directly to the `rate_limit` parameter. + :param rate_limit: + Client-side rate limit, expressed in calls per second. The + parameter can either be a single float, or it can be a dict with + keys as service-type and values as floats expressing the calls + per second for that service. Defaults to None, which means no + rate-limiting is performed. + :param oslo_conf: An oslo.config CONF object. + :type oslo_conf: :class:`~oslo_config.cfg.ConfigOpts` + An oslo.config ``CONF`` object that has been populated with + ``keystoneauth1.loading.register_adapter_conf_options`` in + groups named by the OpenStack service's project name. + :param service_types: + A list/set of service types this Connection should support. All + other service types will be disabled (will error if used). + **Currently only supported in conjunction with the ``oslo_conf`` + kwarg.** + :param global_request_id: A Request-id to send with all interactions. + :param strict_proxies: + If True, check proxies on creation and raise + ServiceDiscoveryException if the service is unavailable. + :type strict_proxies: bool + Throw an ``openstack.exceptions.ServiceDiscoveryException`` if the + endpoint for a given service doesn't work. This is useful for + OpenStack services using sdk to talk to other OpenStack services + where it can be expected that the deployer config is correct and + errors should be reported immediately. + Default false. + :param pool_executor: + :type pool_executor: :class:`~futurist.Executor` + A futurist ``Executor`` object to be used for concurrent background + activities. Defaults to None in which case a ThreadPoolExecutor + will be created if needed. + :param kwargs: If a config is not provided, the rest of the parameters + provided are assumed to be arguments to be passed to the + CloudRegion constructor. + """ + super().__init__() + + if use_direct_get is not None: + warnings.warn( + "The 'use_direct_get' argument is deprecated for removal", + os_warnings.RemovedInSDK50Warning, + ) + + self._extra_services = {} + self._strict_proxies = strict_proxies + if extra_services: + for service in extra_services: + self._extra_services[service.service_type] = service + + if config: + self.config = config + else: + if oslo_conf: + self.config = cloud_region.from_conf( + oslo_conf, + session=session, + app_name=app_name, + app_version=app_version, + service_types=service_types, + ) + elif session: + self.config = cloud_region.from_session( + session=session, + app_name=app_name, + app_version=app_version, + load_yaml_config=False, + load_envvars=False, + rate_limit=rate_limit, + **kwargs, + ) + else: + self.config = cloud_config.get_cloud_region( + cloud=cloud, + app_name=app_name, + app_version=app_version, + load_yaml_config=cloud is not None, + load_envvars=cloud is not None, + rate_limit=rate_limit, + **kwargs, + ) + + self._session: ks_session.Session | None = None + self._proxies: dict[str, proxy.Proxy] = {} + self.__pool_executor = pool_executor + self._global_request_id = global_request_id + self.use_direct_get = use_direct_get or False + self.strict_mode = strict + + self.log = _log.setup_logging('openstack') + + self.name = self.config.name + self.auth = self.config.get_auth_args() + self.default_interface = self.config.get_interface() + self.force_ipv4 = self.config.force_ipv4 + + self.verify, self.cert = self.config.get_requests_verify_args() + + # Turn off urllib3 warnings about insecure certs if we have + # explicitly configured requests to tell it we do not want + # cert verification + if not self.verify: + self.log.debug( + "Turning off Insecure SSL warnings since verify=False" + ) + warnings.filterwarnings( + 'ignore', category=urllib3.exceptions.InsecureRequestWarning + ) + + self._disable_warnings: dict[str, bool] = {} + + cache_expiration_time = int(self.config.get_cache_expiration_time()) + cache_class = self.config.get_cache_class() + cache_arguments = self.config.get_cache_arguments() + + self._cache_expirations = dict() + + if cache_class != 'dogpile.cache.null': + self.cache_enabled = True + else: + self.cache_enabled = False + + # Uncoditionally create cache even with a "null" backend + self._cache = self._make_cache( + cache_class, cache_expiration_time, cache_arguments + ) + expirations = self.config.get_cache_expirations() + for expire_key in expirations.keys(): + self._cache_expirations[expire_key] = expirations[expire_key] + + self._api_cache_keys: set[str] = set() + + self._local_ipv6 = ( + _utils.localhost_supports_ipv6() if not self.force_ipv4 else False + ) + + # Register cleanup steps + atexit.register(self.close) + + @property + def session(self) -> 'ks_session.Session': + if not self._session: + self._session = self.config.get_session() + # Hide a reference to the connection on the session to help with + # backwards compatibility for folks trying to just pass + # conn.session to a Resource method's session argument. + setattr(self.session, '_sdk_connection', weakref.proxy(self)) + return self._session + + @property + def _pool_executor(self) -> concurrent.futures.Executor: + if not self.__pool_executor: + self.__pool_executor = concurrent.futures.ThreadPoolExecutor( + max_workers=5 + ) + return self.__pool_executor + + def close(self) -> None: + """Release any resources held open.""" + self.config.set_auth_cache() + if self.__pool_executor: + self.__pool_executor.shutdown() + atexit.unregister(self.close) + + def __enter__(self) -> ty_ext.Self: + return self + + def __exit__( + self, + exc_type: type[BaseException] | None, + exc_value: BaseException | None, + traceback: types.TracebackType | None, + ) -> None: + self.close() + + def set_global_request_id(self, global_request_id: str) -> None: + self._global_request_id = global_request_id + + def global_request(self, global_request_id: str) -> ty_ext.Self: + """Make a new Connection object with a global request id set. + + Take the existing settings from the current Connection and construct a + new Connection object with the global_request_id overridden. + + .. code-block:: python + + from oslo_context import context + + cloud = openstack.connect(cloud='example') + # Work normally + servers = cloud.list_servers() + cloud2 = cloud.global_request(context.generate_request_id()) + # cloud2 sends all requests with global_request_id set + servers = cloud2.list_servers() + + Additionally, this can be used as a context manager: + + .. code-block:: python + + from oslo_context import context + + c = openstack.connect(cloud='example') + # Work normally + servers = c.list_servers() + with c.global_request(context.generate_request_id()) as c2: + # c2 sends all requests with global_request_id set + servers = c2.list_servers() + + :param global_request_id: The `global_request_id` to send. + """ + params = copy.deepcopy(self.config.config) + config = cloud_region.from_session( + session=self.session, + app_name=self.config._app_name, + app_version=self.config._app_version, + discovery_cache=self.session._discovery_cache, + **params, + ) + + # Override the cloud name so that logging/location work right + config._name = self.name + config.config['profile'] = self.name + # Use self.__class__ so that we return whatever this is, like if it's + # a subclass in the case of shade wrapping sdk. + new_conn = self.__class__(config=config) + new_conn.set_global_request_id(global_request_id) + return new_conn + + def _make_cache( + self, + cache_class: str, + expiration_time: int, + arguments: dict[str, ty.Any] | None, + ) -> 'cache_region.CacheRegion': + return dogpile.cache.make_region( + function_key_generator=self._make_cache_key + ).configure( + cache_class, expiration_time=expiration_time, arguments=arguments + ) + + def _make_cache_key( + self, namespace: str, fn: ty.Callable[..., ty.Any] + ) -> ty.Callable[..., str]: + fname = fn.__name__ + if namespace is None: + name_key = self.name + else: + name_key = f'{self.name}:{namespace}' + + def generate_key(*args, **kwargs): + # TODO(frickler): make handling arg keys actually work + arg_key = '' + kw_keys = sorted(kwargs.keys()) + kwargs_key = ','.join( + [f'{k}:{kwargs[k]}' for k in kw_keys if k != 'cache'] + ) + ans = "_".join([str(name_key), fname, arg_key, kwargs_key]) + return ans + + return generate_key + + def pprint(self, resource: object) -> None: + """Wrapper around pprint that groks munch objects""" + # import late since this is a utility function + import pprint + + new_resource = _utils._dictify_resource(resource) + pprint.pprint(new_resource) + + def pformat(self, resource: object) -> str: + """Wrapper around pformat that groks munch objects""" + # import late since this is a utility function + import pprint + + new_resource = _utils._dictify_resource(resource) + return pprint.pformat(new_resource) + + @property + def _keystone_catalog(self) -> 'ks_service_catalog.ServiceCatalog': + if self.session.auth is None: + raise exceptions.ConfigException( + 'session has no auth information attached' + ) + + if not isinstance( + self.session.auth, ks_plugin_base.BaseIdentityPlugin + ): + raise exceptions.ConfigException( + 'cannot fetch catalog for non-keystone auth plugin' + ) + + return self.session.auth.get_access(self.session).service_catalog + + @property + def service_catalog(self) -> list[dict[str, ty.Any]]: + return self._keystone_catalog.catalog + + @property + def auth_token(self) -> str | None: + # Keystone's session will reuse a token if it is still valid. + # We don't need to track validity here, just get_token() each time. + return self.session.get_token() + + @property + def current_user_id(self) -> str | None: + """Get the id of the currently logged-in user from the token.""" + if self.session.auth is None: + raise exceptions.ConfigException( + 'session has no auth information attached' + ) + + if not isinstance( + self.session.auth, ks_plugin_base.BaseIdentityPlugin + ): + raise exceptions.ConfigException( + 'cannot fetch catalog for non-keystone auth plugin' + ) + + return self.session.auth.get_access(self.session).user_id + + @property + def current_project_id(self) -> str | None: + """Get the current project ID. + + Returns the project_id of the current token scope. None means that + the token is domain scoped or unscoped. + + :raises keystoneauth1.exceptions.auth.AuthorizationFailure: + if a new token fetch fails. + :raises keystoneauth1.exceptions.auth_plugins.MissingAuthPlugin: + if a plugin is not available. + """ + return self.session.get_project_id() + + @property + def current_project(self) -> utils.Munch: + """Return a ``utils.Munch`` describing the current project""" + return self._get_project_info() + + def _get_project_info(self, project_id: str | None = None) -> utils.Munch: + project_info = utils.Munch( + id=project_id, + name=None, + domain_id=None, + domain_name=None, + ) + if not project_id or project_id == self.current_project_id: + # If we don't have a project_id parameter, it means a user is + # directly asking what the current state is. + # Alternately, if we have one, that means we're calling this + # from within a normalize function, which means the object has + # a project_id associated with it. If the project_id matches + # the project_id of our current token, that means we can supplement + # the info with human readable info about names if we have them. + # If they don't match, that means we're an admin who has pulled + # an object from a different project, so adding info from the + # current token would be wrong. + auth_args = self.config.config.get('auth', {}) + project_info['id'] = self.current_project_id + project_info['name'] = auth_args.get('project_name') + project_info['domain_id'] = auth_args.get('project_domain_id') + project_info['domain_name'] = auth_args.get('project_domain_name') + return project_info + + @property + def current_location(self) -> utils.Munch: + """Return a ``utils.Munch`` explaining the current cloud location.""" + return self._get_current_location() + + def _get_current_location( + self, + project_id: str | None = None, + zone: str | None = None, + ) -> utils.Munch: + return utils.Munch( + cloud=self.name, + # TODO(efried): This is wrong, but it only seems to be used in a + # repr; can we get rid of it? + region_name=self.config.get_region_name(), + zone=zone, + project=self._get_project_info(project_id), + ) + + def range_search(self, data, filters): + """Perform integer range searches across a list of dictionaries. + + Given a list of dictionaries, search across the list using the given + dictionary keys and a range of integer values for each key. Only + dictionaries that match ALL search filters across the entire original + data set will be returned. + + It is not a requirement that each dictionary contain the key used + for searching. Those without the key will be considered non-matching. + + The range values must be string values and is either a set of digits + representing an integer for matching, or a range operator followed by + a set of digits representing an integer for matching. If a range + operator is not given, exact value matching will be used. Valid + operators are one of: <,>,<=,>= + + :param data: List of dictionaries to be searched. + :param filters: Dict describing the one or more range searches to + perform. If more than one search is given, the result will be the + members of the original data set that match ALL searches. An + example of filtering by multiple ranges:: + + {"vcpus": "<=5", "ram": "<=2048", "disk": "1"} + + :returns: A list subset of the original data set. + :raises: :class:`~openstack.exceptions.SDKException` on invalid range + expressions. + """ + filtered: list[object] = [] + + for key, range_value in filters.items(): + # We always want to operate on the full data set so that + # calculations for minimum and maximum are correct. + results = _utils.range_filter(data, key, range_value) + + if not filtered: + # First set of results + filtered = results + else: + # The combination of all searches should be the intersection of + # all result sets from each search. So adjust the current set + # of filtered data by computing its intersection with the + # latest result set. + filtered = [r for r in results for f in filtered if r == f] + + return filtered + + def _get_and_munchify(self, key, data): + """Wrapper around meta.get_and_munchify. + + Some of the methods expect a `meta` attribute to be passed in as + part of the method signature. In those methods the meta param is + overriding the meta module making the call to meta.get_and_munchify + to fail. + """ + if isinstance(data, requests.models.Response): + data = proxy._json_response(data) + return meta.get_and_munchify(key, data) + + def get_name(self) -> str: + return self.name + + def get_session_endpoint( + self, + service_key: str, + min_version: str | None = None, + max_version: str | None = None, + ) -> str | None: + try: + return self.config.get_session_endpoint( + service_key, + min_version=min_version, + max_version=max_version, + ) + except keystoneauth1.exceptions.catalog.EndpointNotFound as e: + self.log.debug( + "Endpoint not found in %s cloud: %s", self.name, str(e) + ) + endpoint = None + except exceptions.SDKException: + raise + except Exception as e: + raise exceptions.SDKException( + f"Error getting {service_key} endpoint on " + f"{self.name}:{self.config.get_region_name(service_key)}: " + f"{e!s}" + ) + return endpoint + + def has_service( + self, service_key: str, version: str | None = None + ) -> bool: + if not self.config.has_service(service_key): + # TODO(mordred) add a stamp here so that we only report this once + if not (self._disable_warnings.get(service_key)): + self.log.debug( + "Disabling %(service_key)s entry in catalog per config", + {'service_key': service_key}, + ) + self._disable_warnings[service_key] = True + return False + try: + kwargs = dict() + # If a specific version was requested - try it + if version is not None: + kwargs['min_version'] = version + kwargs['max_version'] = version + endpoint = self.get_session_endpoint(service_key, **kwargs) + except exceptions.SDKException: + return False + if endpoint: + return True + else: + return False + + def search_resources( + self, + resource_type, + name_or_id, + get_args=None, + get_kwargs=None, + list_args=None, + list_kwargs=None, + **filters, + ): + """Search resources + + Search resources matching certain conditions + + :param str resource_type: String representation of the expected + resource as `service.resource` (i.e. "network.security_group"). + :param str name_or_id: Name or ID of the resource + :param list get_args: Optional args to be passed to the _get call. + :param dict get_kwargs: Optional kwargs to be passed to the _get call. + :param list list_args: Optional args to be passed to the _list call. + :param dict list_kwargs: Optional kwargs to be passed to the _list call + :param dict filters: Additional filters to be used for querying + resources. + """ + get_args = get_args or () + get_kwargs = get_kwargs or {} + list_args = list_args or () + list_kwargs = list_kwargs or {} + + # User used string notation. Try to find proper + # resource + service_name, resource_name = resource_type.split('.') + if not hasattr(self, service_name): + raise exceptions.SDKException( + f"service {service_name} is not existing/enabled" + ) + + service_proxy = getattr(self, service_name) + try: + resource_type = service_proxy._resource_registry[resource_name] + except KeyError: + raise exceptions.SDKException( + f"Resource {resource_name} is not known in service " + f"{service_name}" + ) + + if name_or_id: + # name_or_id is definitely not None + try: + resource_by_id = service_proxy._get( + resource_type, name_or_id, *get_args, **get_kwargs + ) + return [resource_by_id] + except exceptions.NotFoundException: + pass + + if not filters: + filters = {} + + if name_or_id: + filters["name"] = name_or_id + list_kwargs.update(filters) + + return list( + service_proxy._list(resource_type, *list_args, **list_kwargs) + ) + + def project_cleanup( + self, + dry_run=True, + wait_timeout=120, + status_queue=None, + filters=None, + resource_evaluation_fn=None, + skip_resources=None, + ): + """Cleanup the project resources. + + Cleanup all resources in all services, which provide cleanup methods. + + :param bool dry_run: Cleanup or only list identified resources. + :param int wait_timeout: Maximum amount of time given to each service + to comlete the cleanup. + :param queue status_queue: a threading queue object used to get current + process status. The queue contain processed resources. + :param dict filters: Additional filters for the cleanup (only resources + matching all filters will be deleted, if there are no other + dependencies). + :param resource_evaluation_fn: A callback function, which will be + invoked for each resurce and must return True/False depending on + whether resource need to be deleted or not. + :param skip_resources: List of specific resources whose cleanup should + be skipped. + """ + dependencies = {} + get_dep_fn_name = '_get_cleanup_dependencies' + cleanup_fn_name = '_service_cleanup' + if not status_queue: + status_queue = queue.Queue() + for service in self.config.get_enabled_services(): + try: + if hasattr(self, service): + proxy = getattr(self, service) + if ( + proxy + and hasattr(proxy, get_dep_fn_name) + and hasattr(proxy, cleanup_fn_name) + ): + deps = getattr(proxy, get_dep_fn_name)() + if deps: + dependencies.update(deps) + except ( + exceptions.NotSupported, + exceptions.ServiceDisabledException, + ): + # Cloud may include endpoint in catalog but not + # implement the service or disable it + pass + dep_graph = utils.TinyDAG() + for k, v in dependencies.items(): + dep_graph.add_node(k) + for dep in v['before']: + dep_graph.add_node(dep) + dep_graph.add_edge(k, dep) + for dep in v.get('after', []): + dep_graph.add_edge(dep, k) + + cleanup_resources: dict[str, resource.Resource] = {} + + for service in dep_graph.walk(timeout=wait_timeout): + fn = None + try: + if hasattr(self, service): + proxy = getattr(self, service) + cleanup_fn = getattr(proxy, cleanup_fn_name, None) + if cleanup_fn: + fn = functools.partial( + cleanup_fn, + dry_run=dry_run, + client_status_queue=status_queue, + identified_resources=cleanup_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + skip_resources=skip_resources, + ) + except exceptions.ServiceDisabledException: + # same reason as above + pass + if fn: + self._pool_executor.submit( + cleanup_task, dep_graph, service, fn + ) + else: + dep_graph.node_done(service) + + for count in utils.iterate_timeout( + timeout=wait_timeout, + message="Timeout waiting for cleanup to finish", + wait=1, + ): + if dep_graph.is_complete(): + return + + +def cleanup_task(graph, service, fn): + try: + fn() + except Exception: + log = _log.setup_logging('openstack.project_cleanup') + log.exception(f'Error in the {service} cleanup function') + finally: + graph.node_done(service) diff --git a/openstack/telemetry/v2/__init__.py b/openstack/cloud/tests/__init__.py similarity index 100% rename from openstack/telemetry/v2/__init__.py rename to openstack/cloud/tests/__init__.py diff --git a/openstack/cluster/cluster_service.py b/openstack/cluster/cluster_service.py deleted file mode 100644 index 7b6eb5d1be..0000000000 --- a/openstack/cluster/cluster_service.py +++ /dev/null @@ -1,27 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import service_filter - - -class ClusterService(service_filter.ServiceFilter): - """The cluster service.""" - - valid_versions = [service_filter.ValidVersion('v1')] - UNVERSIONED = None - - def __init__(self, version=None): - """Create a cluster service.""" - super(ClusterService, self).__init__( - service_type='clustering', - version=version - ) diff --git a/openstack/cluster/v1/_proxy.py b/openstack/cluster/v1/_proxy.py deleted file mode 100644 index f5526c69a8..0000000000 --- a/openstack/cluster/v1/_proxy.py +++ /dev/null @@ -1,856 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.cluster.v1 import action as _action -from openstack.cluster.v1 import build_info -from openstack.cluster.v1 import cluster as _cluster -from openstack.cluster.v1 import cluster_attr as _cluster_attr -from openstack.cluster.v1 import cluster_policy as _cluster_policy -from openstack.cluster.v1 import event as _event -from openstack.cluster.v1 import node as _node -from openstack.cluster.v1 import policy as _policy -from openstack.cluster.v1 import policy_type as _policy_type -from openstack.cluster.v1 import profile as _profile -from openstack.cluster.v1 import profile_type as _profile_type -from openstack.cluster.v1 import receiver as _receiver -from openstack import proxy2 -from openstack import resource2 - - -class Proxy(proxy2.BaseProxy): - - def get_build_info(self): - """Get build info for service engine and API - - :returns: A dictionary containing the API and engine revision string. - """ - return self._get(build_info.BuildInfo, requires_id=False) - - def profile_types(self, **query): - """Get a generator of profile types. - - :returns: A generator of objects that are of type - :class:`~openstack.cluster.v1.profile_type.ProfileType` - """ - return self._list(_profile_type.ProfileType, paginated=False, **query) - - def get_profile_type(self, profile_type): - """Get the details about a profile_type. - - :param name: The name of the profile_type to retrieve or an object of - :class:`~openstack.cluster.v1.profile_type.ProfileType`. - - :returns: A :class:`~openstack.cluster.v1.profile_type.ProfileType` - object. - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - profile_type matching the name could be found. - """ - return self._get(_profile_type.ProfileType, profile_type) - - def policy_types(self, **query): - """Get a generator of policy types. - - :returns: A generator of objects that are of type - :class:`~openstack.cluster.v1.policy_type.PolicyType` - """ - return self._list(_policy_type.PolicyType, paginated=False, **query) - - def get_policy_type(self, policy_type): - """Get the details about a policy_type. - - :param policy_type: The name of a poicy_type or an object of - :class:`~openstack.cluster.v1.policy_type.PolicyType`. - - :returns: A :class:`~openstack.cluster.v1.policy_type.PolicyType` - object. - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - policy_type matching the name could be found. - """ - return self._get(_policy_type.PolicyType, policy_type) - - def create_profile(self, **attrs): - """Create a new profile from attributes. - - :param dict attrs: Keyword arguments that will be used to create a - :class:`~openstack.cluster.v1.profile.Profile`, it is comprised - of the properties on the Profile class. - - :returns: The results of profile creation. - :rtype: :class:`~openstack.cluster.v1.profile.Profile`. - """ - return self._create(_profile.Profile, **attrs) - - def delete_profile(self, profile, ignore_missing=True): - """Delete a profile. - - :param profile: The value can be either the name or ID of a profile or - a :class:`~openstack.cluster.v1.profile.Profile` instance. - :param bool ignore_missing: When set to ``False``, an exception - :class:`~openstack.exceptions.ResourceNotFound` will be raised when - the profile could not be found. When set to ``True``, no exception - will be raised when attempting to delete a non-existent profile. - - :returns: ``None`` - """ - self._delete(_profile.Profile, profile, ignore_missing=ignore_missing) - - def find_profile(self, name_or_id, ignore_missing=True): - """Find a single profile. - - :param str name_or_id: The name or ID of a profile. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.cluster.v1.profile.Profile` object - or None - """ - return self._find(_profile.Profile, name_or_id, - ignore_missing=ignore_missing) - - def get_profile(self, profile): - """Get a single profile. - - :param profile: The value can be the name or ID of a profile or a - :class:`~openstack.cluster.v1.profile.Profile` instance. - - :returns: One :class:`~openstack.cluster.v1.profile.Profile` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - profile matching the criteria could be found. - """ - return self._get(_profile.Profile, profile) - - def profiles(self, **query): - """Retrieve a generator of profiles. - - :param kwargs \*\*query: Optional query parameters to be sent to - restrict the profiles to be returned. Available parameters include: - - * name: The name of a profile. - * type: The type name of a profile. - * metadata: A list of key-value pairs that are associated with a - profile. - * sort: A list of sorting keys separated by commas. Each sorting - key can optionally be attached with a sorting direction - modifier which can be ``asc`` or ``desc``. - * limit: Requests a specified size of returned items from the - query. Returns a number of items up to the specified limit - value. - * marker: Specifies the ID of the last-seen item. Use the limit - parameter to make an initial limited request and use the ID of - the last-seen item from the response as the marker parameter - value in a subsequent limited request. - * global_project: A boolean value indicating whether profiles - from all projects will be returned. - - :returns: A generator of profile instances. - """ - return self._list(_profile.Profile, paginated=True, **query) - - def update_profile(self, profile, **attrs): - """Update a profile. - - :param profile: Either the name or the ID of the profile, or an - instance of :class:`~openstack.cluster.v1.profile.Profile`. - :param attrs: The attributes to update on the profile represented by - the ``value`` parameter. - - :returns: The updated profile. - :rtype: :class:`~openstack.cluster.v1.profile.Profile` - """ - return self._update(_profile.Profile, profile, **attrs) - - def validate_profile(self, **attrs): - """Validate a profile spec. - - :param dict attrs: Keyword arguments that will be used to create a - :class:`~openstack.cluster.v1.profile.ProfileValidate`, it is - comprised of the properties on the Profile class. - - :returns: The results of profile validation. - :rtype: :class:`~openstack.cluster.v1.profile.ProfileValidate`. - """ - return self._create(_profile.ProfileValidate, **attrs) - - def create_cluster(self, **attrs): - """Create a new cluster from attributes. - - :param dict attrs: Keyword arguments that will be used to create a - :class:`~openstack.cluster.v1.cluster.Cluster`, it is comprised - of the properties on the Cluster class. - - :returns: The results of cluster creation. - :rtype: :class:`~openstack.cluster.v1.cluster.Cluster`. - """ - return self._create(_cluster.Cluster, **attrs) - - def delete_cluster(self, cluster, ignore_missing=True): - """Delete a cluster. - - :param cluster: The value can be either the name or ID of a cluster or - a :class:`~openstack.cluster.v1.cluster.Cluster` instance. - :param bool ignore_missing: When set to ``False``, an exception - :class:`~openstack.exceptions.ResourceNotFound` will be raised when - the cluster could not be found. When set to ``True``, no exception - will be raised when attempting to delete a non-existent cluster. - - :returns: The instance of the Cluster which was deleted. - :rtype: :class:`~openstack.cluster.v1.cluster.Cluster`. - """ - return self._delete(_cluster.Cluster, cluster, - ignore_missing=ignore_missing) - - def find_cluster(self, name_or_id, ignore_missing=True): - """Find a single cluster. - - :param str name_or_id: The name or ID of a cluster. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.cluster.v1.cluster.Cluster` object - or None - """ - return self._find(_cluster.Cluster, name_or_id, - ignore_missing=ignore_missing) - - def get_cluster(self, cluster): - """Get a single cluster. - - :param cluster: The value can be the name or ID of a cluster or a - :class:`~openstack.cluster.v1.cluster.Cluster` instance. - - :returns: One :class:`~openstack.cluster.v1.cluster.Cluster` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - cluster matching the criteria could be found. - """ - return self._get(_cluster.Cluster, cluster) - - def clusters(self, **query): - """Retrieve a generator of clusters. - - :param kwargs \*\*query: Optional query parameters to be sent to - restrict the clusters to be returned. Available parameters include: - - * name: The name of a cluster. - * status: The current status of a cluster. - * sort: A list of sorting keys separated by commas. Each sorting - key can optionally be attached with a sorting direction - modifier which can be ``asc`` or ``desc``. - * limit: Requests a specified size of returned items from the - query. Returns a number of items up to the specified limit - value. - * marker: Specifies the ID of the last-seen item. Use the limit - parameter to make an initial limited request and use the ID of - the last-seen item from the response as the marker parameter - value in a subsequent limited request. - * global_project: A boolean value indicating whether clusters - from all projects will be returned. - - :returns: A generator of cluster instances. - """ - return self._list(_cluster.Cluster, paginated=True, **query) - - def update_cluster(self, cluster, **attrs): - """Update a cluster. - - :param cluster: Either the name or the ID of the cluster, or an - instance of :class:`~openstack.cluster.v1.cluster.Cluster`. - :param attrs: The attributes to update on the cluster represented by - the ``cluster`` parameter. - - :returns: The updated cluster. - :rtype: :class:`~openstack.cluster.v1.cluster.Cluster` - """ - return self._update(_cluster.Cluster, cluster, **attrs) - - def cluster_add_nodes(self, cluster, nodes): - """Add nodes to a cluster. - - :param cluster: Either the name or the ID of the cluster, or an - instance of :class:`~openstack.cluster.v1.cluster.Cluster`. - :param nodes: List of nodes to be added to the cluster. - :returns: A dict containing the action initiated by this operation. - """ - if isinstance(cluster, _cluster.Cluster): - obj = cluster - else: - obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) - return obj.add_nodes(self.session, nodes) - - def cluster_del_nodes(self, cluster, nodes): - """Remove nodes from a cluster. - - :param cluster: Either the name or the ID of the cluster, or an - instance of :class:`~openstack.cluster.v1.cluster.Cluster`. - :param nodes: List of nodes to be removed from the cluster. - :returns: A dict containing the action initiated by this operation. - """ - if isinstance(cluster, _cluster.Cluster): - obj = cluster - else: - obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) - return obj.del_nodes(self.session, nodes) - - def cluster_replace_nodes(self, cluster, nodes): - """Replace the nodes in a cluster with specified nodes. - - :param cluster: Either the name or the ID of the cluster, or an - instance of :class:`~openstack.cluster.v1.cluster.Cluster`. - :param nodes: List of nodes to be deleted/added to the cluster. - :returns: A dict containing the action initiated by this operation. - """ - if isinstance(cluster, _cluster.Cluster): - obj = cluster - else: - obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) - return obj.replace_nodes(self.session, nodes) - - def cluster_scale_out(self, cluster, count=None): - """Inflate the size of a cluster. - - :param cluster: Either the name or the ID of the cluster, or an - instance of :class:`~openstack.cluster.v1.cluster.Cluster`. - :param count: Optional parameter specifying the number of nodes to - be added. - :returns: A dict containing the action initiated by this operation. - """ - if isinstance(cluster, _cluster.Cluster): - obj = cluster - else: - obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) - return obj.scale_out(self.session, count) - - def cluster_scale_in(self, cluster, count=None): - """Shrink the size of a cluster. - - :param cluster: Either the name or the ID of the cluster, or an - instance of :class:`~openstack.cluster.v1.cluster.Cluster`. - :param count: Optional parameter specifying the number of nodes to - be removed. - :returns: A dict containing the action initiated by this operation. - """ - if isinstance(cluster, _cluster.Cluster): - obj = cluster - else: - obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) - return obj.scale_in(self.session, count) - - def cluster_resize(self, cluster, **params): - """Resize of cluster. - - :param cluster: Either the name or the ID of the cluster, or an - instance of :class:`~openstack.cluster.v1.cluster.Cluster`. - :param dict \*\*params: A dictionary providing the parameters for the - resize action. - :returns: A dict containing the action initiated by this operation. - """ - if isinstance(cluster, _cluster.Cluster): - obj = cluster - else: - obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) - return obj.resize(self.session, **params) - - def cluster_attach_policy(self, cluster, policy, **params): - """Attach a policy to a cluster. - - :param cluster: Either the name or the ID of the cluster, or an - instance of :class:`~openstack.cluster.v1.cluster.Cluster`. - :param policy: Either the name or the ID of a policy. - :param dict \*\*params: A dictionary containing the properties for the - policy to be attached. - :returns: A dict containing the action initiated by this operation. - """ - if isinstance(cluster, _cluster.Cluster): - obj = cluster - else: - obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) - return obj.policy_attach(self.session, policy, **params) - - def cluster_detach_policy(self, cluster, policy): - """Attach a policy to a cluster. - - :param cluster: Either the name or the ID of the cluster, or an - instance of :class:`~openstack.cluster.v1.cluster.Cluster`. - :param policy: Either the name or the ID of a policy. - :returns: A dict containing the action initiated by this operation. - """ - if isinstance(cluster, _cluster.Cluster): - obj = cluster - else: - obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) - return obj.policy_detach(self.session, policy) - - def cluster_update_policy(self, cluster, policy, **params): - """Change properties of a policy which is bound to the cluster. - - :param cluster: Either the name or the ID of the cluster, or an - instance of :class:`~openstack.cluster.v1.cluster.Cluster`. - :param policy: Either the name or the ID of a policy. - :param dict \*\*params: A dictionary containing the new properties for - the policy. - :returns: A dict containing the action initiated by this operation. - """ - if isinstance(cluster, _cluster.Cluster): - obj = cluster - else: - obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) - return obj.policy_update(self.session, policy, **params) - - def collect_cluster_attrs(self, cluster, path): - """Collect attribute values across a cluster. - - :param cluster: The value can be either the ID of a cluster or a - :class:`~openstack.cluster.v1.cluster.Cluster` instance. - :param path: A Json path string specifying the attribute to collect. - - :returns: A dictionary containing the list of attribute values. - """ - return self._list(_cluster_attr.ClusterAttr, paginated=False, - cluster_id=cluster, path=path) - - def check_cluster(self, cluster, **params): - """check a cluster. - - :param cluster: The value can be either the ID of a cluster or a - :class:`~openstack.cluster.v1.cluster.Cluster` instance. - :param dict \*\*params: A dictionary providing the parameters for the - check action. - - :returns: A dictionary containing the action ID. - """ - obj = self._get_resource(_cluster.Cluster, cluster) - return obj.check(self.session, **params) - - def recover_cluster(self, cluster, **params): - """recover a node. - - :param cluster: The value can be either the ID of a cluster or a - :class:`~openstack.cluster.v1.cluster.Cluster` instance. - :param dict \*\*params: A dictionary providing the parameters for the - check action. - - :returns: A dictionary containing the action ID. - """ - obj = self._get_resource(_cluster.Cluster, cluster) - return obj.recover(self.session, **params) - - def create_node(self, **attrs): - """Create a new node from attributes. - - :param dict attrs: Keyword arguments that will be used to create a - :class:`~openstack.cluster.v1.node.Node`, it is comprised - of the properties on the ``Node`` class. - - :returns: The results of node creation. - :rtype: :class:`~openstack.cluster.v1.node.Node`. - """ - return self._create(_node.Node, **attrs) - - def delete_node(self, node, ignore_missing=True): - """Delete a node. - - :param node: The value can be either the name or ID of a node or a - :class:`~openstack.cluster.v1.node.Node` instance. - :param bool ignore_missing: When set to ``False``, an exception - :class:`~openstack.exceptions.ResourceNotFound` will be raised when - the node could not be found. When set to ``True``, no exception - will be raised when attempting to delete a non-existent node. - - :returns: The instance of the Node which was deleted. - :rtype: :class:`~openstack.cluster.v1.node.Node`. - """ - return self._delete(_node.Node, node, ignore_missing=ignore_missing) - - def check_node(self, node, **params): - """check a node. - - :param node: The value can be either the ID of a node or a - :class:`~openstack.cluster.v1.node.Node` instance. - - :returns: A dictionary containing the action ID. - """ - obj = self._get_resource(_node.Node, node) - return obj.check(self.session, **params) - - def recover_node(self, node, **params): - """recover a node. - - :param node: The value can be either the ID of a node or a - :class:`~openstack.cluster.v1.node.Node` instance. - - :returns: A dictionary containing the action ID. - """ - obj = self._get_resource(_node.Node, node) - return obj.recover(self.session, **params) - - def find_node(self, name_or_id, ignore_missing=True): - """Find a single node. - - :param str name_or_id: The name or ID of a node. - :returns: One :class:`~openstack.cluster.v1.node.Node` object or None. - """ - return self._find(_node.Node, name_or_id, - ignore_missing=ignore_missing) - - def get_node(self, node, details=False): - """Get a single node. - - :param node: The value can be the name or ID of a node or a - :class:`~openstack.cluster.v1.node.Node` instance. - :param details: An optional argument that indicates whether the - server should return more details when retrieving the node data. - - :returns: One :class:`~openstack.cluster.v1.node.Node` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - node matching the name or ID could be found. - """ - # NOTE: When retrieving node with details (using NodeDetail resource), - # the `node_id` is treated as part of the base_path thus a URI - # property rather than a resource ID as assumed by the _get() method - # in base proxy. - if details: - return self._get(_node.NodeDetail, requires_id=False, node_id=node) - return self._get(_node.Node, node) - - def nodes(self, **query): - """Retrieve a generator of nodes. - - :param kwargs \*\*query: Optional query parameters to be sent to - restrict the nodes to be returned. Available parameters include: - - * cluster_id: A string including the name or ID of a cluster to - which the resulted node(s) is a member. - * name: The name of a node. - * status: The current status of a node. - * sort: A list of sorting keys separated by commas. Each sorting - key can optionally be attached with a sorting direction - modifier which can be ``asc`` or ``desc``. - * limit: Requests at most the specified number of items be - returned from the query. - * marker: Specifies the ID of the last-seen node. Use the limit - parameter to make an initial limited request and use the ID of - the last-seen node from the response as the marker parameter - value in a subsequent limited request. - * global_project: A boolean value indicating whether nodes - from all projects will be returned. - - :returns: A generator of node instances. - """ - return self._list(_node.Node, paginated=True, **query) - - def update_node(self, node, **attrs): - """Update a node. - - :param node: Either the name or the ID of the node, or an instance - of :class:`~openstack.cluster.v1.node.Node`. - :param attrs: The attributes to update on the node represented by - the ``node`` parameter. - - :returns: The updated node. - :rtype: :class:`~openstack.cluster.v1.node.Node` - """ - return self._update(_node.Node, node, **attrs) - - def create_policy(self, **attrs): - """Create a new policy from attributes. - - :param dict attrs: Keyword arguments that will be used to create a - :class:`~openstack.cluster.v1.policy.Policy`, it is comprised - of the properties on the ``Policy`` class. - - :returns: The results of policy creation. - :rtype: :class:`~openstack.cluster.v1.policy.Policy`. - """ - return self._create(_policy.Policy, **attrs) - - def delete_policy(self, policy, ignore_missing=True): - """Delete a policy. - - :param policy: The value can be either the name or ID of a policy or a - :class:`~openstack.cluster.v1.policy.Policy` instance. - :param bool ignore_missing: When set to ``False``, an exception - :class:`~openstack.exceptions.ResourceNotFound` will be raised when - the policy could not be found. When set to ``True``, no exception - will be raised when attempting to delete a non-existent policy. - - :returns: ``None`` - """ - self._delete(_policy.Policy, policy, ignore_missing=ignore_missing) - - def find_policy(self, name_or_id, ignore_missing=True): - """Find a single policy. - - :param str name_or_id: The name or ID of a policy. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the specified policy does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent policy. - :returns: A policy object or None. - :rtype: :class:`~openstack.cluster.v1.policy.Policy` - """ - return self._find(_policy.Policy, name_or_id, - ignore_missing=ignore_missing) - - def get_policy(self, policy): - """Get a single policy. - - :param policy: The value can be the name or ID of a policy or a - :class:`~openstack.cluster.v1.policy.Policy` instance. - - :returns: A policy object. - :rtype: :class:`~openstack.cluster.v1.policy.Policy` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - policy matching the criteria could be found. - """ - return self._get(_policy.Policy, policy) - - def policies(self, **query): - """Retrieve a generator of policies. - - :param kwargs \*\*query: Optional query parameters to be sent to - restrict the policies to be returned. Available parameters include: - - * name: The name of a policy. - * type: The type name of a policy. - * sort: A list of sorting keys separated by commas. Each sorting - key can optionally be attached with a sorting direction - modifier which can be ``asc`` or ``desc``. - * limit: Requests a specified size of returned items from the - query. Returns a number of items up to the specified limit - value. - * marker: Specifies the ID of the last-seen item. Use the limit - parameter to make an initial limited request and use the ID of - the last-seen item from the response as the marker parameter - value in a subsequent limited request. - * global_project: A boolean value indicating whether policies from - all projects will be returned. - - :returns: A generator of policy instances. - """ - return self._list(_policy.Policy, paginated=True, **query) - - def update_policy(self, policy, **attrs): - """Update a policy. - - :param policy: Either the name or the ID of a policy, or an instance - of :class:`~openstack.cluster.v1.policy.Policy`. - :param attrs: The attributes to update on the policy represented by - the ``value`` parameter. - - :returns: The updated policy. - :rtype: :class:`~openstack.cluster.v1.policy.Policy` - """ - return self._update(_policy.Policy, policy, **attrs) - - def validate_policy(self, **attrs): - """Validate a policy spec. - - :param dict attrs: Keyword arguments that will be used to create a - :class:`~openstack.cluster.v1.policy.PolicyValidate`, it is - comprised of the properties on the Policy class. - - :returns: The results of Policy validation. - :rtype: :class:`~openstack.cluster.v1.policy.PolicyValidate`. - """ - return self._create(_policy.PolicyValidate, **attrs) - - def cluster_policies(self, cluster, **query): - """Retrieve a generator of cluster-policy bindings. - - :param cluster: The value can be the name or ID of a cluster or a - :class:`~openstack.cluster.v1.cluster.Cluster` instance. - :param kwargs \*\*query: Optional query parameters to be sent to - restrict the policies to be returned. Available parameters include: - * enabled: A boolean value indicating whether the policy is - enabled on the cluster. - :returns: A generator of cluster-policy binding instances. - """ - cluster_id = resource2.Resource._get_id(cluster) - return self._list(_cluster_policy.ClusterPolicy, paginated=False, - cluster_id=cluster_id, **query) - - def get_cluster_policy(self, cluster_policy, cluster): - """Get a cluster-policy binding. - - :param cluster_policy: - The value can be the name or ID of a policy or a - :class:`~openstack.cluster.v1.policy.Policy` instance. - :param cluster: The value can be the name or ID of a cluster or a - :class:`~openstack.cluster.v1.cluster.Cluster` instance. - - :returns: a cluster-policy binding object. - :rtype: :class:`~openstack.cluster.v1.cluster_policy.CLusterPolicy` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - cluster-policy binding matching the criteria could be found. - """ - return self._get(_cluster_policy.ClusterPolicy, cluster_policy, - cluster_id=cluster) - - def create_receiver(self, **attrs): - """Create a new receiver from attributes. - - :param dict attrs: Keyword arguments that will be used to create a - :class:`~openstack.cluster.v1.receiver.Receiver`, it is comprised - of the properties on the Receiver class. - - :returns: The results of receiver creation. - :rtype: :class:`~openstack.cluster.v1.receiver.Receiver`. - """ - return self._create(_receiver.Receiver, **attrs) - - def delete_receiver(self, receiver, ignore_missing=True): - """Delete a receiver. - - :param receiver: The value can be either the name or ID of a receiver - or a :class:`~openstack.cluster.v1.receiver.Receiver` instance. - :param bool ignore_missing: When set to ``False``, an exception - :class:`~openstack.exceptions.ResourceNotFound` will be raised when - the receiver could not be found. When set to ``True``, no exception - will be raised when attempting to delete a non-existent receiver. - - :returns: ``None`` - """ - self._delete(_receiver.Receiver, receiver, - ignore_missing=ignore_missing) - - def find_receiver(self, name_or_id, ignore_missing=True): - """Find a single receiver. - - :param str name_or_id: The name or ID of a receiver. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the specified receiver does not exist. When - set to ``True``, None will be returned when attempting to - find a nonexistent receiver. - :returns: A receiver object or None. - :rtype: :class:`~openstack.cluster.v1.receiver.Receiver` - """ - return self._find(_receiver.Receiver, name_or_id, - ignore_missing=ignore_missing) - - def get_receiver(self, receiver): - """Get a single receiver. - - :param receiver: The value can be the name or ID of a receiver or a - :class:`~openstack.cluster.v1.receiver.Receiver` instance. - - :returns: A receiver object. - :rtype: :class:`~openstack.cluster.v1.receiver.Receiver` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - receiver matching the criteria could be found. - """ - return self._get(_receiver.Receiver, receiver) - - def receivers(self, **query): - """Retrieve a generator of receivers. - - :param kwargs \*\*query: Optional query parameters for restricting the - receivers to be returned. Available parameters include: - - * name: The name of a receiver object. - * type: The type of receiver objects. - * cluster_id: The ID of the associated cluster. - * action: The name of the associated action. - * sort: A list of sorting keys separated by commas. Each sorting - key can optionally be attached with a sorting direction - modifier which can be ``asc`` or ``desc``. - * global_project: A boolean value indicating whether receivers - * from all projects will be returned. - - :returns: A generator of receiver instances. - """ - return self._list(_receiver.Receiver, paginated=True, **query) - - def get_action(self, action): - """Get a single action. - - :param action: The value can be the name or ID of an action or a - :class:`~openstack.cluster.v1.action.Action` instance. - - :returns: an action object. - :rtype: :class:`~openstack.cluster.v1.action.Action` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - action matching the criteria could be found. - """ - return self._get(_action.Action, action) - - def actions(self, **query): - """Retrieve a generator of actions. - - :param kwargs \*\*query: Optional query parameters to be sent to - restrict the actions to be returned. Available parameters include: - - * name: name of action for query. - * target: ID of the target object for which the actions should be - returned. - * action: built-in action types for query. - * sort: A list of sorting keys separated by commas. Each sorting - key can optionally be attached with a sorting direction - modifier which can be ``asc`` or ``desc``. - * limit: Requests a specified size of returned items from the - query. Returns a number of items up to the specified limit - value. - * marker: Specifies the ID of the last-seen item. Use the limit - parameter to make an initial limited request and use the ID of - the last-seen item from the response as the marker parameter - value in a subsequent limited request. - - :returns: A generator of action instances. - """ - return self._list(_action.Action, paginated=True, **query) - - def get_event(self, event): - """Get a single event. - - :param event: The value can be the name or ID of an event or a - :class:`~openstack.cluster.v1.event.Event` instance. - - :returns: an event object. - :rtype: :class:`~openstack.cluster.v1.event.Event` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no - event matching the criteria could be found. - """ - return self._get(_event.Event, event) - - def events(self, **query): - """Retrieve a generator of events. - - :param kwargs \*\*query: Optional query parameters to be sent to - restrict the events to be returned. Available parameters include: - - * obj_name: name string of the object associated with an event. - * obj_type: type string of the object related to an event. The - value can be ``cluster``, ``node``, ``policy`` etc. - * obj_id: ID of the object associated with an event. - * cluster_id: ID of the cluster associated with the event, if any. - * action: name of the action associated with an event. - * sort: A list of sorting keys separated by commas. Each sorting - key can optionally be attached with a sorting direction - modifier which can be ``asc`` or ``desc``. - * limit: Requests a specified size of returned items from the - query. Returns a number of items up to the specified limit - value. - * marker: Specifies the ID of the last-seen item. Use the limit - parameter to make an initial limited request and use the ID of - the last-seen item from the response as the marker parameter - value in a subsequent limited request. - * global_project: A boolean specifying whether events from all - projects should be returned. This option is subject to access - control checking. - - :returns: A generator of event instances. - """ - return self._list(_event.Event, paginated=True, **query) diff --git a/openstack/cluster/v1/action.py b/openstack/cluster/v1/action.py deleted file mode 100644 index 58691692e4..0000000000 --- a/openstack/cluster/v1/action.py +++ /dev/null @@ -1,70 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from openstack.cluster import cluster_service -from openstack import resource2 as resource - - -class Action(resource.Resource): - resource_key = 'action' - resources_key = 'actions' - base_path = '/actions' - service = cluster_service.ClusterService() - - # Capabilities - allow_list = True - allow_get = True - - _query_mapping = resource.QueryParameters( - 'name', 'target', 'action', 'status', 'sort', 'global_project') - - # Properties - #: Name of the action. - name = resource.Body('name') - #: ID of the target object, which can be a cluster or a node. - target_id = resource.Body('target') - #: Built-in type name of action. - action = resource.Body('action') - #: A string representation of the reason why the action was created. - cause = resource.Body('cause') - #: The owning engine that is currently running the action. - owner_id = resource.Body('owner') - #: The ID of the user who created this action. - user_id = resource.Body('user') - #: The ID of the project this profile belongs to. - project_id = resource.Body('project') - #: Interval in seconds between two consecutive executions. - interval = resource.Body('interval') - #: The time the action was started. - start_at = resource.Body('start_time') - #: The time the action completed execution. - end_at = resource.Body('end_time') - #: The timeout in seconds. - timeout = resource.Body('timeout') - #: Current status of the action. - status = resource.Body('status') - #: A string describing the reason that brought the action to its current - # status. - status_reason = resource.Body('status_reason') - #: A dictionary containing the inputs to the action. - inputs = resource.Body('inputs', type=dict) - #: A dictionary containing the outputs to the action. - outputs = resource.Body('outputs', type=dict) - #: A list of actions that must finish before this action starts execution. - depends_on = resource.Body('depends_on', type=list) - #: A list of actions that can start only after this action has finished. - depended_by = resource.Body('depended_by', type=list) - #: Timestamp when the action is created. - created_at = resource.Body('created_at') - #: Timestamp when the action was last updated. - updated_at = resource.Body('updated_at') diff --git a/openstack/cluster/v1/cluster.py b/openstack/cluster/v1/cluster.py deleted file mode 100644 index a2dd62a65c..0000000000 --- a/openstack/cluster/v1/cluster.py +++ /dev/null @@ -1,165 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.cluster import cluster_service -from openstack import resource2 as resource -from openstack import utils - - -class Cluster(resource.Resource): - resource_key = 'cluster' - resources_key = 'clusters' - base_path = '/clusters' - service = cluster_service.ClusterService() - - # capabilities - allow_create = True - allow_get = True - allow_update = True - allow_delete = True - allow_list = True - patch_update = True - - _query_mapping = resource.QueryParameters( - 'name', 'status', 'sort', 'global_project') - - # Properties - #: The name of the cluster. - name = resource.Body('name') - #: The ID of the profile used by this cluster. - profile_id = resource.Body('profile_id') - #: The ID of the user who created this cluster, thus the owner of it. - user_id = resource.Body('user') - #: The ID of the project this cluster belongs to. - project_id = resource.Body('project') - #: The domain ID of the cluster owner. - domain_id = resource.Body('domain') - #: Timestamp of when the cluster was initialized. - #: *Type: datetime object parsed from ISO 8601 formatted string* - init_at = resource.Body('init_at') - #: Timestamp of when the cluster was created. - #: *Type: datetime object parsed from ISO 8601 formatted string* - created_at = resource.Body('created_at') - #: Timestamp of when the cluster was last updated. - #: *Type: datetime object parsed from ISO 8601 formatted string* - updated_at = resource.Body('updated_at') - #: Lower bound (inclusive) for the size of the cluster. - min_size = resource.Body('min_size', type=int) - #: Upper bound (inclusive) for the size of the cluster. A value of - #: -1 indicates that there is no upper limit of cluster size. - max_size = resource.Body('max_size', type=int) - #: Desired capacity for the cluster. A cluster would be created at the - #: scale specified by this value. - desired_capacity = resource.Body('desired_capacity', type=int) - #: Default timeout (in seconds) for cluster operations. - timeout = resource.Body('timeout') - #: A string representation of the cluster status. - status = resource.Body('status') - #: A string describing the reason why the cluster in current status. - status_reason = resource.Body('status_reason') - #: A collection of key-value pairs that are attached to the cluster. - metadata = resource.Body('metadata', type=dict) - #: A dictionary with some runtime data associated with the cluster. - data = resource.Body('data', type=dict) - #: A list IDs of nodes that are members of the cluster. - node_ids = resource.Body('nodes') - #: Name of the profile used by the cluster. - profile_name = resource.Body('profile_name') - #: A dictionary with dependency information of the cluster - dependents = resource.Body('dependents', type=dict) - - def action(self, session, body): - url = utils.urljoin(self.base_path, self._get_id(self), 'actions') - resp = session.post(url, endpoint_filter=self.service, json=body) - return resp.json() - - def add_nodes(self, session, nodes): - body = { - 'add_nodes': { - 'nodes': nodes, - } - } - return self.action(session, body) - - def del_nodes(self, session, nodes): - body = { - 'del_nodes': { - 'nodes': nodes, - } - } - return self.action(session, body) - - def replace_nodes(self, session, nodes): - body = { - 'replace_nodes': { - 'nodes': nodes, - } - } - return self.action(session, body) - - def scale_out(self, session, count=None): - body = { - 'scale_out': { - 'count': count, - } - } - return self.action(session, body) - - def scale_in(self, session, count=None): - body = { - 'scale_in': { - 'count': count, - } - } - return self.action(session, body) - - def resize(self, session, **params): - body = { - 'resize': params - } - return self.action(session, body) - - def policy_attach(self, session, policy_id, **params): - data = {'policy_id': policy_id} - data.update(params) - body = { - 'policy_attach': data - } - return self.action(session, body) - - def policy_detach(self, session, policy_id): - body = { - 'policy_detach': { - 'policy_id': policy_id, - } - } - return self.action(session, body) - - def policy_update(self, session, policy_id, **params): - data = {'policy_id': policy_id} - data.update(params) - body = { - 'policy_update': data - } - return self.action(session, body) - - def check(self, session, **params): - body = { - 'check': params - } - return self.action(session, body) - - def recover(self, session, **params): - body = { - 'recover': params - } - return self.action(session, body) diff --git a/openstack/cluster/v1/event.py b/openstack/cluster/v1/event.py deleted file mode 100644 index 008dc4794d..0000000000 --- a/openstack/cluster/v1/event.py +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from openstack.cluster import cluster_service -from openstack import resource2 as resource - - -class Event(resource.Resource): - resource_key = 'event' - resources_key = 'events' - base_path = '/events' - service = cluster_service.ClusterService() - - # Capabilities - allow_list = True - allow_get = True - - _query_mapping = resource.QueryParameters( - 'oname', 'otype', 'oid', 'cluster_id', 'action', 'level', - 'sort', 'global_project') - - # Properties - #: Timestamp string (in ISO8601 format) when the event was generated. - generated_at = resource.Body('timestamp') - #: The UUID of the object related to this event. - obj_id = resource.Body('oid') - #: The name of the object related to this event. - obj_name = resource.Body('oname') - #: The type name of the object related to this event. - obj_type = resource.Body('otype') - #: The UUID of the cluster related to this event, if any. - cluster_id = resource.Body('cluster_id') - #: The event level (priority). - level = resource.Body('level') - #: The ID of the user. - user_id = resource.Body('user') - #: The ID of the project (tenant). - project_id = resource.Body('project') - #: The string representation of the action associated with the event. - action = resource.Body('action') - #: The status of the associated object. - status = resource.Body('status') - #: A string description of the reason that brought the object into its - #: current status. - status_reason = resource.Body('status_reason') diff --git a/openstack/cluster/v1/node.py b/openstack/cluster/v1/node.py deleted file mode 100644 index 9593c7e9f3..0000000000 --- a/openstack/cluster/v1/node.py +++ /dev/null @@ -1,124 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.cluster import cluster_service -from openstack import resource2 as resource -from openstack import utils - - -class Node(resource.Resource): - resource_key = 'node' - resources_key = 'nodes' - base_path = '/nodes' - service = cluster_service.ClusterService() - - # capabilities - allow_create = True - allow_get = True - allow_update = True - allow_delete = True - allow_list = True - - patch_update = True - - _query_mapping = resource.QueryParameters( - 'show_details', 'name', 'sort', 'global_project', 'cluster_id', - 'status') - - # Properties - #: The name of the node. - name = resource.Body('name') - #: The ID of the physical object that backs the node. - physical_id = resource.Body('physical_id') - #: The ID of the cluster in which this node is a member. - #: A node is an orphan node if this field is empty. - cluster_id = resource.Body('cluster_id') - #: The ID of the profile used by this node. - profile_id = resource.Body('profile_id') - #: The ID of the user who created this node. - user_id = resource.Body('user') - #: The ID of the project this node belongs to. - project_id = resource.Body('project') - #: The name of the profile used by this node. - profile_name = resource.Body('profile_name') - #: An integer that is unique inside the owning cluster. - #: A value of -1 means this node is an orphan node. - index = resource.Body('index', type=int) - #: A string indicating the role the node plays in a cluster. - role = resource.Body('role') - #: The timestamp of the node object's initialization. - #: *Type: datetime object parsed from ISO 8601 formatted string* - init_at = resource.Body('init_at') - #: The timestamp of the node's creation, i.e. the physical object - #: represented by this node is also created. - #: *Type: datetime object parsed from ISO 8601 formatted string* - created_at = resource.Body('created_at') - #: The timestamp the node was last updated. - #: *Type: datetime object parsed from ISO 8601 formatted string* - updated_at = resource.Body('updated_at') - #: A string indicating the node's status. - status = resource.Body('status') - #: A string describing why the node entered its current status. - status_reason = resource.Body('status_reason') - #: A map containing key-value pairs attached to the node. - metadata = resource.Body('metadata', type=dict) - #: A map containing some runtime data for this node. - data = resource.Body('data', type=dict) - #: A map containing the details of the physical object this node - #: represents - details = resource.Body('details', type=dict) - #: A map containing the dependency of nodes - dependents = resource.Body('dependents', type=dict) - - def _action(self, session, body): - """Procedure the invoke an action API. - - :param session: A session object used for sending request. - :param body: The body of action to be sent. - """ - url = utils.urljoin(self.base_path, self.id, 'actions') - resp = session.post(url, endpoint_filter=self.service, json=body) - return resp.json() - - def check(self, session, **params): - """An action procedure for the node to check its health status. - - :param session: A session object used for sending request. - :returns: A dictionary containing the action ID. - """ - body = { - 'check': params - } - return self._action(session, body) - - def recover(self, session, **params): - """An action procedure for the node to recover. - - :param session: A session object used for sending request. - :returns: A dictionary containing the action ID. - """ - body = { - 'recover': params - } - return self._action(session, body) - - -class NodeDetail(Node): - base_path = '/nodes/%(node_id)s?show_details=True' - - allow_create = False - allow_get = True - allow_update = False - allow_delete = False - allow_list = False - - node_id = resource.URI('node_id') diff --git a/openstack/cluster/v1/policy.py b/openstack/cluster/v1/policy.py deleted file mode 100644 index 7b2e67bd36..0000000000 --- a/openstack/cluster/v1/policy.py +++ /dev/null @@ -1,64 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.cluster import cluster_service -from openstack import resource2 as resource - - -class Policy(resource.Resource): - resource_key = 'policy' - resources_key = 'policies' - base_path = '/policies' - service = cluster_service.ClusterService() - - # Capabilities - allow_list = True - allow_get = True - allow_create = True - allow_delete = True - allow_update = True - - patch_update = True - - _query_mapping = resource.QueryParameters( - 'name', 'type', 'sort', 'global_project') - - # Properties - #: The name of the policy. - name = resource.Body('name') - #: The type name of the policy. - type = resource.Body('type') - #: The ID of the project this policy belongs to. - project_id = resource.Body('project') - #: The ID of the user who created this policy. - user_id = resource.Body('user') - #: The timestamp when the policy is created. - created_at = resource.Body('created_at') - #: The timestamp when the policy was last updated. - updated_at = resource.Body('updated_at') - #: The specification of the policy. - spec = resource.Body('spec', type=dict) - #: A dictionary containing runtime data of the policy. - data = resource.Body('data', type=dict) - - -class PolicyValidate(Policy): - base_path = '/policies/validate' - - # Capabilities - allow_list = False - allow_get = False - allow_create = True - allow_delete = False - allow_update = False - - patch_update = False diff --git a/openstack/cluster/v1/policy_type.py b/openstack/cluster/v1/policy_type.py deleted file mode 100644 index aefcffec67..0000000000 --- a/openstack/cluster/v1/policy_type.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.cluster import cluster_service -from openstack import resource2 as resource - - -class PolicyType(resource.Resource): - resource_key = 'policy_type' - resources_key = 'policy_types' - base_path = '/policy-types' - service = cluster_service.ClusterService() - - # Capabilities - allow_list = True - allow_get = True - - # Properties - #: Name of policy type. - name = resource.Body('name', alternate_id=True) - #: The schema of the policy type. - schema = resource.Body('schema') - #: The support status of the policy type - support_status = resource.Body('support_status') diff --git a/openstack/cluster/v1/profile.py b/openstack/cluster/v1/profile.py deleted file mode 100644 index 7688e07cf9..0000000000 --- a/openstack/cluster/v1/profile.py +++ /dev/null @@ -1,62 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.cluster import cluster_service -from openstack import resource2 as resource - - -class Profile(resource.Resource): - resource_key = 'profile' - resources_key = 'profiles' - base_path = '/profiles' - service = cluster_service.ClusterService() - - # capabilities - allow_create = True - allow_get = True - allow_update = True - allow_delete = True - allow_list = True - - patch_update = True - - _query_mapping = resource.QueryParameters( - 'sort', 'global_project', 'type', 'name') - - # Bodyerties - #: The name of the profile - name = resource.Body('name') - #: The type of the profile. - type = resource.Body('type') - #: The ID of the project this profile belongs to. - project_id = resource.Body('project') - #: The ID of the user who created this profile. - user_id = resource.Body('user') - #: The spec of the profile. - spec = resource.Body('spec', type=dict) - #: A collection of key-value pairs that are attached to the profile. - metadata = resource.Body('metadata', type=dict) - #: Timestamp of when the profile was created. - created_at = resource.Body('created_at') - #: Timestamp of when the profile was last updated. - updated_at = resource.Body('updated_at') - - -class ProfileValidate(Profile): - base_path = '/profiles/validate' - allow_create = True - allow_get = False - allow_update = False - allow_delete = False - allow_list = False - - patch_update = False diff --git a/openstack/cluster/v1/profile_type.py b/openstack/cluster/v1/profile_type.py deleted file mode 100644 index 3b79297aa3..0000000000 --- a/openstack/cluster/v1/profile_type.py +++ /dev/null @@ -1,33 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.cluster import cluster_service -from openstack import resource2 as resource - - -class ProfileType(resource.Resource): - resource_key = 'profile_type' - resources_key = 'profile_types' - base_path = '/profile-types' - service = cluster_service.ClusterService() - - # Capabilities - allow_list = True - allow_get = True - - # Properties - #: Name of the profile type. - name = resource.Body('name', alternate_id=True) - #: The schema of the profile type. - schema = resource.Body('schema') - #: The support status of the profile type - support_status = resource.Body('support_status') diff --git a/openstack/cluster/v1/receiver.py b/openstack/cluster/v1/receiver.py deleted file mode 100644 index af9f1367f2..0000000000 --- a/openstack/cluster/v1/receiver.py +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.cluster import cluster_service -from openstack import resource2 as resource - - -class Receiver(resource.Resource): - resource_key = 'receiver' - resources_key = 'receivers' - base_path = '/receivers' - service = cluster_service.ClusterService() - - # Capabilities - allow_list = True - allow_get = True - allow_create = True - allow_delete = True - - _query_mapping = resource.QueryParameters( - 'name', 'type', 'cluster_id', 'action', 'sort', 'global_project', - user_id='user') - - # Properties - #: The name of the receiver. - name = resource.Body('name') - #: The type of the receiver. - type = resource.Body('type') - #: The ID of the user who created the receiver, thus the owner of it. - user_id = resource.Body('user') - #: The ID of the project this receiver belongs to. - project_id = resource.Body('project') - #: The domain ID of the receiver. - domain_id = resource.Body('domain') - #: The ID of the targeted cluster. - cluster_id = resource.Body('cluster_id') - #: The name of the targeted action. - action = resource.Body('action') - #: Timestamp of when the receiver was created. - created_at = resource.Body('created_at') - #: Timestamp of when the receiver was last updated. - updated_at = resource.Body('updated_at') - #: The credential of the impersonated user. - actor = resource.Body('actor', type=dict) - #: A dictionary containing key-value pairs that are provided to the - #: targeted action. - params = resource.Body('params', type=dict) - #: The information about the channel through which you can trigger the - #: receiver hence the associated action. - channel = resource.Body('channel', type=dict) diff --git a/openstack/cluster/version.py b/openstack/cluster/version.py deleted file mode 100644 index 070497b677..0000000000 --- a/openstack/cluster/version.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - - -from openstack.cluster import cluster_service -from openstack import resource - - -class Version(resource.Resource): - resource_key = 'version' - resources_key = 'versions' - base_path = '/' - service = cluster_service.ClusterService( - version=cluster_service.ClusterService.UNVERSIONED - ) - - # capabilities - allow_list = True - - # Properties - links = resource.prop('links') - status = resource.prop('status') diff --git a/openstack/tests/examples/__init__.py b/openstack/clustering/__init__.py similarity index 100% rename from openstack/tests/examples/__init__.py rename to openstack/clustering/__init__.py diff --git a/openstack/clustering/clustering_service.py b/openstack/clustering/clustering_service.py new file mode 100644 index 0000000000..7abd5b1564 --- /dev/null +++ b/openstack/clustering/clustering_service.py @@ -0,0 +1,22 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.clustering.v1 import _proxy +from openstack import service_description + + +class ClusteringService(service_description.ServiceDescription[_proxy.Proxy]): + """The clustering service.""" + + supported_versions = { + '1': _proxy.Proxy, + } diff --git a/openstack/tests/functional/block_store/__init__.py b/openstack/clustering/v1/__init__.py similarity index 100% rename from openstack/tests/functional/block_store/__init__.py rename to openstack/clustering/v1/__init__.py diff --git a/openstack/clustering/v1/_async_resource.py b/openstack/clustering/v1/_async_resource.py new file mode 100644 index 0000000000..01d59af9e9 --- /dev/null +++ b/openstack/clustering/v1/_async_resource.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.clustering.v1 import action as _action +from openstack import exceptions +from openstack import resource + + +class AsyncResource(resource.Resource): + def delete(self, session, error_message=None, **kwargs): + """Delete the remote resource based on this instance. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + + :return: An :class:`~openstack.clustering.v1.action.Action` + instance. The ``fetch`` method will need to be used + to populate the `Action` with status information. + :raises: :exc:`~openstack.exceptions.MethodNotSupported` if + :data:`Resource.allow_commit` is not set to ``True``. + :raises: :exc:`~openstack.exceptions.NotFoundException` if + the resource was not found. + """ + response = self._raw_delete(session) + return self._delete_response(response, error_message) + + def _delete_response(self, response, error_message=None): + exceptions.raise_from_response(response, error_message=error_message) + location = response.headers['Location'] + action_id = location.split('/')[-1] + action = _action.Action.existing( + id=action_id, connection=self._connection + ) + return action diff --git a/openstack/clustering/v1/_proxy.py b/openstack/clustering/v1/_proxy.py new file mode 100644 index 0000000000..8c70c1afa3 --- /dev/null +++ b/openstack/clustering/v1/_proxy.py @@ -0,0 +1,1142 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +from openstack.clustering.v1 import action as _action +from openstack.clustering.v1 import build_info +from openstack.clustering.v1 import cluster as _cluster +from openstack.clustering.v1 import cluster_attr as _cluster_attr +from openstack.clustering.v1 import cluster_policy as _cluster_policy +from openstack.clustering.v1 import event as _event +from openstack.clustering.v1 import node as _node +from openstack.clustering.v1 import policy as _policy +from openstack.clustering.v1 import policy_type as _policy_type +from openstack.clustering.v1 import profile as _profile +from openstack.clustering.v1 import profile_type as _profile_type +from openstack.clustering.v1 import receiver as _receiver +from openstack.clustering.v1 import service as _service +from openstack import proxy +from openstack import resource + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['1']] = '1' + + _resource_registry = { + "action": _action.Action, + "build_info": build_info.BuildInfo, + "cluster": _cluster.Cluster, + "cluster_attr": _cluster_attr.ClusterAttr, + "cluster_policy": _cluster_policy.ClusterPolicy, + "event": _event.Event, + "node": _node.Node, + "policy": _policy.Policy, + "policy_type": _policy_type.PolicyType, + "profile": _profile.Profile, + "profile_type": _profile_type.ProfileType, + "receiver": _receiver.Receiver, + "service": _service.Service, + } + + def get_build_info(self): + """Get build info for service engine and API + + :returns: A dictionary containing the API and engine revision string. + """ + return self._get(build_info.BuildInfo, requires_id=False) + + def profile_types(self, **query): + """Get a generator of profile types. + + :returns: A generator of objects that are of type + :class:`~openstack.clustering.v1.profile_type.ProfileType` + """ + return self._list(_profile_type.ProfileType, **query) + + def get_profile_type(self, profile_type): + """Get the details about a profile type. + + :param profile_type: The name of the profile_type to retrieve or an + object of + :class:`~openstack.clustering.v1.profile_type.ProfileType`. + + :returns: A :class:`~openstack.clustering.v1.profile_type.ProfileType` + object. + :raises: :class:`~openstack.exceptions.NotFoundException` when no + profile_type matching the name could be found. + """ + return self._get(_profile_type.ProfileType, profile_type) + + def policy_types(self, **query): + """Get a generator of policy types. + + :returns: A generator of objects that are of type + :class:`~openstack.clustering.v1.policy_type.PolicyType` + """ + return self._list(_policy_type.PolicyType, **query) + + def get_policy_type(self, policy_type): + """Get the details about a policy type. + + :param policy_type: The name of a poicy_type or an object of + :class:`~openstack.clustering.v1.policy_type.PolicyType`. + + :returns: A :class:`~openstack.clustering.v1.policy_type.PolicyType` + object. + :raises: :class:`~openstack.exceptions.NotFoundException` when no + policy_type matching the name could be found. + """ + return self._get(_policy_type.PolicyType, policy_type) + + def create_profile(self, **attrs): + """Create a new profile from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.clustering.v1.profile.Profile`, it is comprised + of the properties on the Profile class. + + :returns: The results of profile creation. + :rtype: :class:`~openstack.clustering.v1.profile.Profile`. + """ + return self._create(_profile.Profile, **attrs) + + def delete_profile(self, profile, ignore_missing=True): + """Delete a profile. + + :param profile: The value can be either the name or ID of a profile or + a :class:`~openstack.clustering.v1.profile.Profile` instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the profile could not be found. When set to ``True``, no + exception will be raised when attempting to delete a non-existent + profile. + + :returns: ``None`` + """ + self._delete(_profile.Profile, profile, ignore_missing=ignore_missing) + + def find_profile(self, name_or_id, ignore_missing=True): + """Find a single profile. + + :param str name_or_id: The name or ID of a profile. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :returns: One :class:`~openstack.clustering.v1.profile.Profile` object + or None + """ + return self._find( + _profile.Profile, name_or_id, ignore_missing=ignore_missing + ) + + def get_profile(self, profile): + """Get a single profile. + + :param profile: The value can be the name or ID of a profile or a + :class:`~openstack.clustering.v1.profile.Profile` instance. + + :returns: One :class:`~openstack.clustering.v1.profile.Profile` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + profile matching the criteria could be found. + """ + return self._get(_profile.Profile, profile) + + def profiles(self, **query): + """Retrieve a generator of profiles. + + :param kwargs query: Optional query parameters to be sent to + restrict the profiles to be returned. Available parameters include: + + * name: The name of a profile. + * type: The type name of a profile. + * metadata: A list of key-value pairs that are associated with a + profile. + * sort: A list of sorting keys separated by commas. Each sorting + key can optionally be attached with a sorting direction + modifier which can be ``asc`` or ``desc``. + * limit: Requests a specified size of returned items from the + query. Returns a number of items up to the specified limit + value. + * marker: Specifies the ID of the last-seen item. Use the limit + parameter to make an initial limited request and use the ID of + the last-seen item from the response as the marker parameter + value in a subsequent limited request. + * global_project: A boolean value indicating whether profiles + from all projects will be returned. + + :returns: A generator of profile instances. + """ + return self._list(_profile.Profile, **query) + + def update_profile(self, profile, **attrs): + """Update a profile. + + :param profile: Either the name or the ID of the profile, or an + instance of :class:`~openstack.clustering.v1.profile.Profile`. + :param attrs: The attributes to update on the profile represented by + the ``value`` parameter. + + :returns: The updated profile. + :rtype: :class:`~openstack.clustering.v1.profile.Profile` + """ + return self._update(_profile.Profile, profile, **attrs) + + def validate_profile(self, **attrs): + """Validate a profile spec. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.clustering.v1.profile.ProfileValidate`, it is + comprised of the properties on the Profile class. + + :returns: The results of profile validation. + :rtype: :class:`~openstack.clustering.v1.profile.ProfileValidate`. + """ + return self._create(_profile.ProfileValidate, **attrs) + + # ====== CLUSTERS ====== + def create_cluster(self, **attrs): + """Create a new cluster from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.clustering.v1.cluster.Cluster`, it is comprised + of the properties on the Cluster class. + + :returns: The results of cluster creation. + :rtype: :class:`~openstack.clustering.v1.cluster.Cluster`. + """ + return self._create(_cluster.Cluster, **attrs) + + def delete_cluster(self, cluster, ignore_missing=True, force_delete=False): + """Delete a cluster. + + :param cluster: The value can be either the name or ID of a cluster or + a :class:`~openstack.cluster.v1.cluster.Cluster` instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the cluster could not be found. When set to ``True``, no + exception will be raised when attempting to delete a non-existent + cluster. + :param bool force_delete: When set to ``True``, the cluster deletion + will be forced immediately. + + :returns: The instance of the Cluster which was deleted. + :rtype: :class:`~openstack.cluster.v1.cluster.Cluster`. + """ + if force_delete: + server = self._get_resource(_cluster.Cluster, cluster) + return server.force_delete(self) + else: + return self._delete( + _cluster.Cluster, cluster, ignore_missing=ignore_missing + ) + + def find_cluster(self, name_or_id, ignore_missing=True): + """Find a single cluster. + + :param str name_or_id: The name or ID of a cluster. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :returns: One :class:`~openstack.clustering.v1.cluster.Cluster` object + or None + """ + return self._find( + _cluster.Cluster, name_or_id, ignore_missing=ignore_missing + ) + + def get_cluster(self, cluster): + """Get a single cluster. + + :param cluster: The value can be the name or ID of a cluster or a + :class:`~openstack.clustering.v1.cluster.Cluster` instance. + + :returns: One :class:`~openstack.clustering.v1.cluster.Cluster` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + cluster matching the criteria could be found. + """ + return self._get(_cluster.Cluster, cluster) + + def clusters(self, **query): + """Retrieve a generator of clusters. + + :param kwargs query: Optional query parameters to be sent to + restrict the clusters to be returned. Available parameters include: + + * name: The name of a cluster. + * status: The current status of a cluster. + * sort: A list of sorting keys separated by commas. Each sorting + key can optionally be attached with a sorting direction + modifier which can be ``asc`` or ``desc``. + * limit: Requests a specified size of returned items from the + query. Returns a number of items up to the specified limit + value. + * marker: Specifies the ID of the last-seen item. Use the limit + parameter to make an initial limited request and use the ID of + the last-seen item from the response as the marker parameter + value in a subsequent limited request. + * global_project: A boolean value indicating whether clusters + from all projects will be returned. + + :returns: A generator of cluster instances. + """ + return self._list(_cluster.Cluster, **query) + + def update_cluster(self, cluster, **attrs): + """Update a cluster. + + :param cluster: Either the name or the ID of the cluster, or an + instance of :class:`~openstack.clustering.v1.cluster.Cluster`. + :param attrs: The attributes to update on the cluster represented by + the ``cluster`` parameter. + + :returns: The updated cluster. + :rtype: :class:`~openstack.clustering.v1.cluster.Cluster` + """ + return self._update(_cluster.Cluster, cluster, **attrs) + + def get_cluster_metadata(self, cluster): + """Return a dictionary of metadata for a cluster + + :param cluster: Either the ID of a cluster or a + :class:`~openstack.clustering.v3.cluster.Cluster`. + + :returns: A :class:`~openstack.clustering.v3.cluster.Cluster` with the + cluster's metadata. All keys and values are Unicode text. + :rtype: :class:`~openstack.clustering.v3.cluster.Cluster` + """ + cluster = self._get_resource(_cluster.Cluster, cluster) + return cluster.fetch_metadata(self) + + def set_cluster_metadata(self, cluster, **metadata): + """Update metadata for a cluster + + :param cluster: Either the ID of a cluster or a + :class:`~openstack.clustering.v3.cluster.Cluster`. + :param kwargs metadata: Key/value pairs to be updated in the cluster's + metadata. No other metadata is modified by this call. All keys + and values are stored as Unicode. + + + :returns: A :class:`~openstack.clustering.v3.cluster.Cluster` with the + cluster's metadata. All keys and values are Unicode text. + :rtype: :class:`~openstack.clustering.v3.cluster.Cluster` + """ + cluster = self._get_resource(_cluster.Cluster, cluster) + return cluster.set_metadata(self, metadata=metadata) + + def delete_cluster_metadata(self, cluster, keys=None): + """Delete metadata for a cluster + + :param cluster: Either the ID of a cluster or a + :class:`~openstack.clustering.v3.cluster.Cluster`. + :param list keys: The keys to delete. If left empty complete + metadata will be removed. + + :rtype: ``None`` + """ + cluster = self._get_resource(_cluster.Cluster, cluster) + if keys is not None: + for key in keys: + cluster.delete_metadata_item(self, key) + else: + cluster.delete_metadata(self) + + def add_nodes_to_cluster(self, cluster, nodes): + """Add nodes to a cluster. + + :param cluster: Either the name or the ID of the cluster, or an + instance of :class:`~openstack.clustering.v1.cluster.Cluster`. + :param nodes: List of nodes to be added to the cluster. + :returns: A dict containing the action initiated by this operation. + """ + if isinstance(cluster, _cluster.Cluster): + obj = cluster + else: + obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) + return obj.add_nodes(self, nodes) + + def remove_nodes_from_cluster(self, cluster, nodes, **params): + """Remove nodes from a cluster. + + :param cluster: Either the name or the ID of the cluster, or an + instance of :class:`~openstack.clustering.v1.cluster.Cluster`. + :param nodes: List of nodes to be removed from the cluster. + :param kwargs params: Optional query parameters to be sent to + restrict the nodes to be returned. Available parameters include: + + * destroy_after_deletion: A boolean value indicating whether the + deleted nodes to be destroyed right away. + :returns: A dict containing the action initiated by this operation. + """ + if isinstance(cluster, _cluster.Cluster): + obj = cluster + else: + obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) + return obj.del_nodes(self, nodes, **params) + + def replace_nodes_in_cluster(self, cluster, nodes): + """Replace the nodes in a cluster with specified nodes. + + :param cluster: Either the name or the ID of the cluster, or an + instance of :class:`~openstack.clustering.v1.cluster.Cluster`. + :param nodes: List of nodes to be deleted/added to the cluster. + :returns: A dict containing the action initiated by this operation. + """ + if isinstance(cluster, _cluster.Cluster): + obj = cluster + else: + obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) + return obj.replace_nodes(self, nodes) + + def scale_out_cluster(self, cluster, count=None): + """Inflate the size of a cluster. + + :param cluster: Either the name or the ID of the cluster, or an + instance of :class:`~openstack.clustering.v1.cluster.Cluster`. + :param count: Optional parameter specifying the number of nodes to + be added. + :returns: A dict containing the action initiated by this operation. + """ + if isinstance(cluster, _cluster.Cluster): + obj = cluster + else: + obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) + return obj.scale_out(self, count) + + def scale_in_cluster(self, cluster, count=None): + """Shrink the size of a cluster. + + :param cluster: Either the name or the ID of the cluster, or an + instance of :class:`~openstack.clustering.v1.cluster.Cluster`. + :param count: Optional parameter specifying the number of nodes to + be removed. + :returns: A dict containing the action initiated by this operation. + """ + if isinstance(cluster, _cluster.Cluster): + obj = cluster + else: + obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) + return obj.scale_in(self, count) + + def resize_cluster(self, cluster, **params): + """Resize of cluster. + + :param cluster: Either the name or the ID of the cluster, or an + instance of :class:`~openstack.clustering.v1.cluster.Cluster`. + :param dict params: A dictionary providing the parameters for the + resize action. + :returns: A dict containing the action initiated by this operation. + """ + if isinstance(cluster, _cluster.Cluster): + obj = cluster + else: + obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) + return obj.resize(self, **params) + + def attach_policy_to_cluster(self, cluster, policy, **params): + """Attach a policy to a cluster. + + :param cluster: Either the name or the ID of the cluster, or an + instance of :class:`~openstack.clustering.v1.cluster.Cluster`. + :param policy: Either the name or the ID of a policy. + :param dict params: A dictionary containing the properties for the + policy to be attached. + :returns: A dict containing the action initiated by this operation. + """ + if isinstance(cluster, _cluster.Cluster): + obj = cluster + else: + obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) + return obj.policy_attach(self, policy, **params) + + def detach_policy_from_cluster(self, cluster, policy): + """Detach a policy from a cluster. + + :param cluster: Either the name or the ID of the cluster, or an + instance of :class:`~openstack.clustering.v1.cluster.Cluster`. + :param policy: Either the name or the ID of a policy. + :returns: A dict containing the action initiated by this operation. + """ + if isinstance(cluster, _cluster.Cluster): + obj = cluster + else: + obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) + return obj.policy_detach(self, policy) + + def update_cluster_policy(self, cluster, policy, **params): + """Change properties of a policy which is bound to the cluster. + + :param cluster: Either the name or the ID of the cluster, or an + instance of :class:`~openstack.clustering.v1.cluster.Cluster`. + :param policy: Either the name or the ID of a policy. + :param dict params: A dictionary containing the new properties for + the policy. + :returns: A dict containing the action initiated by this operation. + """ + if isinstance(cluster, _cluster.Cluster): + obj = cluster + else: + obj = self._find(_cluster.Cluster, cluster, ignore_missing=False) + return obj.policy_update(self, policy, **params) + + def collect_cluster_attrs(self, cluster, path, **query): + """Collect attribute values across a cluster. + + :param cluster: The value can be either the ID of a cluster or a + :class:`~openstack.clustering.v1.cluster.Cluster` instance. + :param path: A Json path string specifying the attribute to collect. + :param query: Optional query parameters to be sent to limit the + resources being returned. + + :returns: A dictionary containing the list of attribute values. + """ + return self._list( + _cluster_attr.ClusterAttr, cluster_id=cluster, path=path + ) + + def check_cluster(self, cluster, **params): + """Check a cluster. + + :param cluster: The value can be either the ID of a cluster or a + :class:`~openstack.clustering.v1.cluster.Cluster` instance. + :param dict params: A dictionary providing the parameters for the + check action. + + :returns: A dictionary containing the action ID. + """ + obj = self._get_resource(_cluster.Cluster, cluster) + return obj.check(self, **params) + + def recover_cluster(self, cluster, **params): + """recover a cluster. + + :param cluster: The value can be either the ID of a cluster or a + :class:`~openstack.clustering.v1.cluster.Cluster` instance. + :param dict params: A dictionary providing the parameters for the + recover action. + + :returns: A dictionary containing the action ID. + """ + obj = self._get_resource(_cluster.Cluster, cluster) + return obj.recover(self, **params) + + def perform_operation_on_cluster(self, cluster, operation, **params): + """Perform an operation on the specified cluster. + + :param cluster: The value can be either the ID of a cluster or a + :class:`~openstack.clustering.v1.cluster.Cluster` instance. + :param operation: A string specifying the operation to be performed. + :param dict params: A dictionary providing the parameters for the + operation. + + :returns: A dictionary containing the action ID. + """ + obj = self._get_resource(_cluster.Cluster, cluster) + return obj.op(self, operation, **params) + + def create_node(self, **attrs): + """Create a new node from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.clustering.v1.node.Node`, it is comprised + of the properties on the ``Node`` class. + + :returns: The results of node creation. + :rtype: :class:`~openstack.clustering.v1.node.Node`. + """ + return self._create(_node.Node, **attrs) + + def delete_node(self, node, ignore_missing=True, force_delete=False): + """Delete a node. + + :param node: The value can be either the name or ID of a node or a + :class:`~openstack.cluster.v1.node.Node` instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the node could not be found. When set to ``True``, no + exception will be raised when attempting to delete a non-existent + node. + :param bool force_delete: When set to ``True``, the node deletion + will be forced immediately. + + :returns: The instance of the Node which was deleted. + :rtype: :class:`~openstack.cluster.v1.node.Node`. + """ + if force_delete: + server = self._get_resource(_node.Node, node) + return server.force_delete(self) + else: + return self._delete( + _node.Node, node, ignore_missing=ignore_missing + ) + + def find_node(self, name_or_id, ignore_missing=True): + """Find a single node. + + :param str name_or_id: The name or ID of a node. + :param bool ignore_missing: When set to "False" + :class:`~openstack.exceptions.NotFoundException` will be + raised when the specified node does not exist. + when set to "True", None will be returned when + attempting to find a nonexistent policy + :returns: One :class:`~openstack.clustering.v1.node.Node` object + or None. + """ + return self._find( + _node.Node, name_or_id, ignore_missing=ignore_missing + ) + + def get_node(self, node, details=False): + """Get a single node. + + :param node: The value can be the name or ID of a node or a + :class:`~openstack.clustering.v1.node.Node` instance. + :param details: An optional argument that indicates whether the + server should return more details when retrieving the node data. + + :returns: One :class:`~openstack.clustering.v1.node.Node` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + node matching the name or ID could be found. + """ + # NOTE: When retrieving node with details (using NodeDetail resource), + # the `node_id` is treated as part of the base_path thus a URI + # property rather than a resource ID as assumed by the _get() method + # in base proxy. + if details: + return self._get(_node.NodeDetail, requires_id=False, node_id=node) + return self._get(_node.Node, node) + + def nodes(self, **query): + """Retrieve a generator of nodes. + + :param kwargs query: Optional query parameters to be sent to + restrict the nodes to be returned. Available parameters include: + + * cluster_id: A string including the name or ID of a cluster to + which the resulted node(s) is a member. + * name: The name of a node. + * status: The current status of a node. + * sort: A list of sorting keys separated by commas. Each sorting + key can optionally be attached with a sorting direction + modifier which can be ``asc`` or ``desc``. + * limit: Requests at most the specified number of items be + returned from the query. + * marker: Specifies the ID of the last-seen node. Use the limit + parameter to make an initial limited request and use the ID of + the last-seen node from the response as the marker parameter + value in a subsequent limited request. + * global_project: A boolean value indicating whether nodes + from all projects will be returned. + + :returns: A generator of node instances. + """ + return self._list(_node.Node, **query) + + def update_node(self, node, **attrs): + """Update a node. + + :param node: Either the name or the ID of the node, or an instance + of :class:`~openstack.clustering.v1.node.Node`. + :param attrs: The attributes to update on the node represented by + the ``node`` parameter. + + :returns: The updated node. + :rtype: :class:`~openstack.clustering.v1.node.Node` + """ + return self._update(_node.Node, node, **attrs) + + def check_node(self, node, **params): + """Check the health of the specified node. + + :param node: The value can be either the ID of a node or a + :class:`~openstack.clustering.v1.node.Node` instance. + :param dict params: A dictionary providing the parametes to the check + action. + + :returns: A dictionary containing the action ID. + """ + obj = self._get_resource(_node.Node, node) + return obj.check(self, **params) + + def recover_node(self, node, **params): + """Recover the specified node into healthy status. + + :param node: The value can be either the ID of a node or a + :class:`~openstack.clustering.v1.node.Node` instance. + :param dict params: A dict supplying parameters to the recover action. + + :returns: A dictionary containing the action ID. + """ + obj = self._get_resource(_node.Node, node) + return obj.recover(self, **params) + + def adopt_node(self, preview=False, **attrs): + """Adopting an existing resource as a node. + + :param preview: A boolean indicating whether this is a "preview" + operation which means only the profile to be used is returned + rather than creating a node object using that profile. + :param dict attrs: Keyword parameters for node adoption. Valid + parameters include: + + * type: (Required) A string containing the profile type and + version to be used for node adoption. For example, + ``os.nova.sever-1.0``. + * identity: (Required) A string including the name or ID of an + OpenStack resource to be adopted as a Senlin node. + * name: (Optional) The name of node to be created. Omitting + this parameter will have the node named automatically. + * snapshot: (Optional) A boolean indicating whether a snapshot + of the target resource should be created if possible. Default + is False. + * metadata: (Optional) A dictionary of arbitrary key-value pairs + to be associated with the adopted node. + * overrides: (Optional) A dictionary of key-value pairs to be used + to override attributes derived from the target resource. + + :returns: The result of node adoption. If `preview` is set to False + (default), returns a :class:`~openstack.clustering.v1.node.Node` + object, otherwise a Dict is returned containing the profile to + be used for the new node. + """ + node = self._get_resource(_node.Node, None) + return node.adopt(self, preview=preview, **attrs) + + def perform_operation_on_node(self, node, operation, **params): + """Perform an operation on the specified node. + + :param node: The value can be either the ID of a node or a + :class:`~openstack.clustering.v1.node.Node` instance. + :param operation: A string specifying the operation to be performed. + :param dict params: A dictionary providing the parameters for the + operation. + + :returns: A dictionary containing the action ID. + """ + obj = self._get_resource(_node.Node, node) + return obj.op(self, operation, **params) + + def create_policy(self, **attrs): + """Create a new policy from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.clustering.v1.policy.Policy`, it is comprised + of the properties on the ``Policy`` class. + + :returns: The results of policy creation. + :rtype: :class:`~openstack.clustering.v1.policy.Policy`. + """ + return self._create(_policy.Policy, **attrs) + + def delete_policy(self, policy, ignore_missing=True): + """Delete a policy. + + :param policy: The value can be either the name or ID of a policy or a + :class:`~openstack.clustering.v1.policy.Policy` instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the policy could not be found. When set to ``True``, no + exception will be raised when attempting to delete a non-existent + policy. + + :returns: ``None`` + """ + self._delete(_policy.Policy, policy, ignore_missing=ignore_missing) + + def find_policy(self, name_or_id, ignore_missing=True): + """Find a single policy. + + :param str name_or_id: The name or ID of a policy. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the specified policy does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent policy. + :returns: A policy object or None. + :rtype: :class:`~openstack.clustering.v1.policy.Policy` + """ + return self._find( + _policy.Policy, name_or_id, ignore_missing=ignore_missing + ) + + def get_policy(self, policy): + """Get a single policy. + + :param policy: The value can be the name or ID of a policy or a + :class:`~openstack.clustering.v1.policy.Policy` instance. + + :returns: A policy object. + :rtype: :class:`~openstack.clustering.v1.policy.Policy` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + policy matching the criteria could be found. + """ + return self._get(_policy.Policy, policy) + + def policies(self, **query): + """Retrieve a generator of policies. + + :param kwargs query: Optional query parameters to be sent to + restrict the policies to be returned. Available parameters include: + + * name: The name of a policy. + * type: The type name of a policy. + * sort: A list of sorting keys separated by commas. Each sorting + key can optionally be attached with a sorting direction + modifier which can be ``asc`` or ``desc``. + * limit: Requests a specified size of returned items from the + query. Returns a number of items up to the specified limit + value. + * marker: Specifies the ID of the last-seen item. Use the limit + parameter to make an initial limited request and use the ID of + the last-seen item from the response as the marker parameter + value in a subsequent limited request. + * global_project: A boolean value indicating whether policies from + all projects will be returned. + + :returns: A generator of policy instances. + """ + return self._list(_policy.Policy, **query) + + def update_policy(self, policy, **attrs): + """Update a policy. + + :param policy: Either the name or the ID of a policy, or an instance + of :class:`~openstack.clustering.v1.policy.Policy`. + :param attrs: The attributes to update on the policy represented by + the ``value`` parameter. + + :returns: The updated policy. + :rtype: :class:`~openstack.clustering.v1.policy.Policy` + """ + return self._update(_policy.Policy, policy, **attrs) + + def validate_policy(self, **attrs): + """Validate a policy spec. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.clustering.v1.policy.PolicyValidate`, it is + comprised of the properties on the Policy class. + + :returns: The results of Policy validation. + :rtype: :class:`~openstack.clustering.v1.policy.PolicyValidate`. + """ + return self._create(_policy.PolicyValidate, **attrs) + + def cluster_policies(self, cluster, **query): + """Retrieve a generator of cluster-policy bindings. + + :param cluster: The value can be the name or ID of a cluster or a + :class:`~openstack.clustering.v1.cluster.Cluster` instance. + :param kwargs query: Optional query parameters to be sent to + restrict the policies to be returned. Available parameters include: + + * enabled: A boolean value indicating whether the policy is + enabled on the cluster. + :returns: A generator of cluster-policy binding instances. + """ + cluster_id = resource.Resource._get_id(cluster) + return self._list( + _cluster_policy.ClusterPolicy, cluster_id=cluster_id, **query + ) + + def get_cluster_policy(self, cluster_policy, cluster): + """Get a cluster-policy binding. + + :param cluster_policy: + The value can be the name or ID of a policy or a + :class:`~openstack.clustering.v1.policy.Policy` instance. + :param cluster: The value can be the name or ID of a cluster or a + :class:`~openstack.clustering.v1.cluster.Cluster` instance. + + :returns: a cluster-policy binding object. + :rtype: :class:`~openstack.clustering.v1.cluster_policy.CLusterPolicy` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + cluster-policy binding matching the criteria could be found. + """ + return self._get( + _cluster_policy.ClusterPolicy, cluster_policy, cluster_id=cluster + ) + + def create_receiver(self, **attrs): + """Create a new receiver from attributes. + + :param dict attrs: Keyword arguments that will be used to create a + :class:`~openstack.clustering.v1.receiver.Receiver`, it is + comprised of the properties on the Receiver class. + + :returns: The results of receiver creation. + :rtype: :class:`~openstack.clustering.v1.receiver.Receiver`. + """ + return self._create(_receiver.Receiver, **attrs) + + def update_receiver(self, receiver, **attrs): + """Update a receiver. + + :param receiver: The value can be either the name or ID of a receiver + or a :class:`~openstack.clustering.v1.receiver.Receiver` instance. + :param attrs: The attributes to update on the receiver parameter. + Valid attribute names include ``name``, ``action`` and ``params``. + :returns: The updated receiver. + :rtype: :class:`~openstack.clustering.v1.receiver.Receiver` + """ + return self._update(_receiver.Receiver, receiver, **attrs) + + def delete_receiver(self, receiver, ignore_missing=True): + """Delete a receiver. + + :param receiver: The value can be either the name or ID of a receiver + or a :class:`~openstack.clustering.v1.receiver.Receiver` instance. + :param bool ignore_missing: When set to ``False``, an exception + :class:`~openstack.exceptions.NotFoundException` will be raised + when the receiver could not be found. When set to ``True``, no + exception will be raised when attempting to delete a non-existent + receiver. + + :returns: ``None`` + """ + self._delete( + _receiver.Receiver, receiver, ignore_missing=ignore_missing + ) + + def find_receiver(self, name_or_id, ignore_missing=True): + """Find a single receiver. + + :param str name_or_id: The name or ID of a receiver. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the specified receiver does not exist. When + set to ``True``, None will be returned when attempting to + find a nonexistent receiver. + :returns: A receiver object or None. + :rtype: :class:`~openstack.clustering.v1.receiver.Receiver` + """ + return self._find( + _receiver.Receiver, name_or_id, ignore_missing=ignore_missing + ) + + def get_receiver(self, receiver): + """Get a single receiver. + + :param receiver: The value can be the name or ID of a receiver or a + :class:`~openstack.clustering.v1.receiver.Receiver` instance. + + :returns: A receiver object. + :rtype: :class:`~openstack.clustering.v1.receiver.Receiver` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + receiver matching the criteria could be found. + """ + return self._get(_receiver.Receiver, receiver) + + def receivers(self, **query): + """Retrieve a generator of receivers. + + :param kwargs query: Optional query parameters for restricting the + receivers to be returned. Available parameters include: + + * name: The name of a receiver object. + * type: The type of receiver objects. + * cluster_id: The ID of the associated cluster. + * action: The name of the associated action. + * sort: A list of sorting keys separated by commas. Each sorting + key can optionally be attached with a sorting direction + modifier which can be ``asc`` or ``desc``. + * global_project: A boolean value indicating whether receivers + * from all projects will be returned. + + :returns: A generator of receiver instances. + """ + return self._list(_receiver.Receiver, **query) + + def get_action(self, action): + """Get a single action. + + :param action: The value can be the name or ID of an action or a + :class:`~openstack.clustering.v1.action.Action` instance. + + :returns: an action object. + :rtype: :class:`~openstack.clustering.v1.action.Action` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + action matching the criteria could be found. + """ + return self._get(_action.Action, action) + + def actions(self, **query): + """Retrieve a generator of actions. + + :param kwargs query: Optional query parameters to be sent to + restrict the actions to be returned. Available parameters include: + + * name: name of action for query. + * target: ID of the target object for which the actions should be + returned. + * action: built-in action types for query. + * sort: A list of sorting keys separated by commas. Each sorting + key can optionally be attached with a sorting direction + modifier which can be ``asc`` or ``desc``. + * limit: Requests a specified size of returned items from the + query. Returns a number of items up to the specified limit + value. + * marker: Specifies the ID of the last-seen item. Use the limit + parameter to make an initial limited request and use the ID of + the last-seen item from the response as the marker parameter + value in a subsequent limited request. + + :returns: A generator of action instances. + """ + return self._list(_action.Action, **query) + + def update_action(self, action, **attrs): + """Update a profile. + + :param action: Either the ID of the action, or an + instance of :class:`~openstack.clustering.v1.action.Action`. + :param attrs: The attributes to update on the action represented by + the ``value`` parameter. + + :returns: The updated action. + :rtype: :class:`~openstack.clustering.v1.action.Action` + """ + return self._update(_action.Action, action, **attrs) + + def get_event(self, event): + """Get a single event. + + :param event: The value can be the name or ID of an event or a + :class:`~openstack.clustering.v1.event.Event` instance. + + :returns: an event object. + :rtype: :class:`~openstack.clustering.v1.event.Event` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + event matching the criteria could be found. + """ + return self._get(_event.Event, event) + + def events(self, **query): + """Retrieve a generator of events. + + :param kwargs query: Optional query parameters to be sent to + restrict the events to be returned. Available parameters include: + + * obj_name: name string of the object associated with an event. + * obj_type: type string of the object related to an event. The + value can be ``cluster``, ``node``, ``policy`` etc. + * obj_id: ID of the object associated with an event. + * cluster_id: ID of the cluster associated with the event, if any. + * action: name of the action associated with an event. + * sort: A list of sorting keys separated by commas. Each sorting + key can optionally be attached with a sorting direction + modifier which can be ``asc`` or ``desc``. + * limit: Requests a specified size of returned items from the + query. Returns a number of items up to the specified limit + value. + * marker: Specifies the ID of the last-seen item. Use the limit + parameter to make an initial limited request and use the ID of + the last-seen item from the response as the marker parameter + value in a subsequent limited request. + * global_project: A boolean specifying whether events from all + projects should be returned. This option is subject to access + control checking. + + :returns: A generator of event instances. + """ + return self._list(_event.Event, **query) + + def services(self, **query): + """Get a generator of services. + + :returns: A generator of objects that are of type + :class:`~openstack.clustering.v1.service.Service` + """ + return self._list(_service.Service, **query) + + def list_profile_type_operations(self, profile_type): + """Get the operation about a profile type. + + :param profile_type: The name of the profile_type to retrieve or an + object of + :class:`~openstack.clustering.v1.profile_type.ProfileType`. + + :returns: A :class:`~openstack.clustering.v1.profile_type.ProfileType` + object. + :raises: :class:`~openstack.exceptions.NotFoundException` when no + profile_type matching the name could be found. + """ + obj = self._get_resource(_profile_type.ProfileType, profile_type) + return obj.type_ops(self) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/clustering/v1/action.py b/openstack/clustering/v1/action.py new file mode 100644 index 0000000000..9517cef249 --- /dev/null +++ b/openstack/clustering/v1/action.py @@ -0,0 +1,82 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import resource + + +class Action(resource.Resource): + resource_key = 'action' + resources_key = 'actions' + base_path = '/actions' + + # Capabilities + allow_list = True + allow_fetch = True + allow_commit = True + + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'name', + 'action', + 'status', + 'sort', + 'global_project', + 'cluster_id', + target_id='target', + ) + + # Properties + #: Name of the action. + name = resource.Body('name') + #: ID of the target object, which can be a cluster or a node. + target_id = resource.Body('target') + #: Built-in type name of action. + action = resource.Body('action') + #: A string representation of the reason why the action was created. + cause = resource.Body('cause') + #: The owning engine that is currently running the action. + owner_id = resource.Body('owner') + #: The ID of the user who created this action. + user_id = resource.Body('user') + #: The ID of the project this profile belongs to. + project_id = resource.Body('project') + #: The domain ID of the action. + domain_id = resource.Body('domain') + #: Interval in seconds between two consecutive executions. + interval = resource.Body('interval') + #: The time the action was started. + start_at = resource.Body('start_time') + #: The time the action completed execution. + end_at = resource.Body('end_time') + #: The timeout in seconds. + timeout = resource.Body('timeout') + #: Current status of the action. + status = resource.Body('status') + #: A string describing the reason that brought the action to its current + # status. + status_reason = resource.Body('status_reason') + #: A dictionary containing the inputs to the action. + inputs = resource.Body('inputs', type=dict) + #: A dictionary containing the outputs to the action. + outputs = resource.Body('outputs', type=dict) + #: A list of actions that must finish before this action starts execution. + depends_on = resource.Body('depends_on', type=list) + #: A list of actions that can start only after this action has finished. + depended_by = resource.Body('depended_by', type=list) + #: Timestamp when the action is created. + created_at = resource.Body('created_at') + #: Timestamp when the action was last updated. + updated_at = resource.Body('updated_at') + #: The ID of cluster which this action runs on. + cluster_id = resource.Body('cluster_id') diff --git a/openstack/cluster/v1/build_info.py b/openstack/clustering/v1/build_info.py similarity index 84% rename from openstack/cluster/v1/build_info.py rename to openstack/clustering/v1/build_info.py index 78ac642a1e..a031fac133 100644 --- a/openstack/cluster/v1/build_info.py +++ b/openstack/clustering/v1/build_info.py @@ -10,17 +10,15 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.cluster import cluster_service -from openstack import resource2 as resource +from openstack import resource class BuildInfo(resource.Resource): base_path = '/build-info' resource_key = 'build_info' - service = cluster_service.ClusterService() # Capabilities - allow_get = True + allow_fetch = True # Properties #: String representation of the API build version diff --git a/openstack/clustering/v1/cluster.py b/openstack/clustering/v1/cluster.py new file mode 100644 index 0000000000..618d7af3bd --- /dev/null +++ b/openstack/clustering/v1/cluster.py @@ -0,0 +1,177 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.clustering.v1 import _async_resource +from openstack.common import metadata +from openstack import resource +from openstack import utils + + +class Cluster(_async_resource.AsyncResource, metadata.MetadataMixin): + resource_key = 'cluster' + resources_key = 'clusters' + base_path = '/clusters' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'name', 'status', 'sort', 'global_project' + ) + + # Properties + #: The name of the cluster. + name = resource.Body('name') + #: The ID of the profile used by this cluster. + profile_id = resource.Body('profile_id') + #: The ID of the user who created this cluster, thus the owner of it. + user_id = resource.Body('user') + #: The ID of the project this cluster belongs to. + project_id = resource.Body('project') + #: The domain ID of the cluster owner. + domain_id = resource.Body('domain') + #: Timestamp of when the cluster was initialized. + #: *Type: datetime object parsed from ISO 8601 formatted string* + init_at = resource.Body('init_at') + #: Timestamp of when the cluster was created. + #: *Type: datetime object parsed from ISO 8601 formatted string* + created_at = resource.Body('created_at') + #: Timestamp of when the cluster was last updated. + #: *Type: datetime object parsed from ISO 8601 formatted string* + updated_at = resource.Body('updated_at') + #: Lower bound (inclusive) for the size of the cluster. + min_size = resource.Body('min_size', type=int) + #: Upper bound (inclusive) for the size of the cluster. A value of + #: -1 indicates that there is no upper limit of cluster size. + max_size = resource.Body('max_size', type=int) + #: Desired capacity for the cluster. A cluster would be created at the + #: scale specified by this value. + desired_capacity = resource.Body('desired_capacity', type=int) + #: Default timeout (in seconds) for cluster operations. + timeout = resource.Body('timeout') + #: A string representation of the cluster status. + status = resource.Body('status') + #: A string describing the reason why the cluster in current status. + status_reason = resource.Body('status_reason') + #: A dictionary configuration for cluster. + config = resource.Body('config', type=dict) + #: A collection of key-value pairs that are attached to the cluster. + metadata = resource.Body('metadata', type=dict) + #: A dictionary with some runtime data associated with the cluster. + data = resource.Body('data', type=dict) + #: A list IDs of nodes that are members of the cluster. + node_ids = resource.Body('nodes') + #: Name of the profile used by the cluster. + profile_name = resource.Body('profile_name') + #: Specify whether the cluster update should only pertain to the profile. + is_profile_only = resource.Body('profile_only', type=bool) + #: A dictionary with dependency information of the cluster + dependents = resource.Body('dependents', type=dict) + + def action(self, session, body): + url = utils.urljoin(self.base_path, self._get_id(self), 'actions') + resp = session.post(url, json=body) + return resp.json() + + def add_nodes(self, session, nodes): + body = { + 'add_nodes': { + 'nodes': nodes, + } + } + return self.action(session, body) + + def del_nodes(self, session, nodes, **params): + data = {'nodes': nodes} + data.update(params) + body = {'del_nodes': data} + return self.action(session, body) + + def replace_nodes(self, session, nodes): + body = { + 'replace_nodes': { + 'nodes': nodes, + } + } + return self.action(session, body) + + def scale_out(self, session, count=None): + body = { + 'scale_out': { + 'count': count, + } + } + return self.action(session, body) + + def scale_in(self, session, count=None): + body = { + 'scale_in': { + 'count': count, + } + } + return self.action(session, body) + + def resize(self, session, **params): + body = {'resize': params} + return self.action(session, body) + + def policy_attach(self, session, policy_id, **params): + data = {'policy_id': policy_id} + data.update(params) + body = {'policy_attach': data} + return self.action(session, body) + + def policy_detach(self, session, policy_id): + body = { + 'policy_detach': { + 'policy_id': policy_id, + } + } + return self.action(session, body) + + def policy_update(self, session, policy_id, **params): + data = {'policy_id': policy_id} + data.update(params) + body = {'policy_update': data} + return self.action(session, body) + + def check(self, session, **params): + body = {'check': params} + return self.action(session, body) + + def recover(self, session, **params): + body = {'recover': params} + return self.action(session, body) + + def op(self, session, operation, **params): + """Perform an operation on the cluster. + + :param session: A session object used for sending request. + :param operation: A string representing the operation to be performed. + :param dict params: An optional dict providing the parameters for the + operation. + :returns: A dictionary containing the action ID. + """ + url = utils.urljoin(self.base_path, self.id, 'ops') + resp = session.post(url, json={operation: params}) + return resp.json() + + def force_delete(self, session): + """Force delete a cluster.""" + body = {'force': True} + url = utils.urljoin(self.base_path, self.id) + response = session.delete(url, json=body) + return self._delete_response(response) diff --git a/openstack/cluster/v1/cluster_attr.py b/openstack/clustering/v1/cluster_attr.py similarity index 88% rename from openstack/cluster/v1/cluster_attr.py rename to openstack/clustering/v1/cluster_attr.py index 1755b100f0..1ca1aaf1eb 100644 --- a/openstack/cluster/v1/cluster_attr.py +++ b/openstack/clustering/v1/cluster_attr.py @@ -10,14 +10,12 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.cluster import cluster_service -from openstack import resource2 as resource +from openstack import resource class ClusterAttr(resource.Resource): resources_key = 'cluster_attributes' base_path = '/clusters/%(cluster_id)s/attrs/%(path)s' - service = cluster_service.ClusterService() # capabilities allow_list = True diff --git a/openstack/cluster/v1/cluster_policy.py b/openstack/clustering/v1/cluster_policy.py similarity index 90% rename from openstack/cluster/v1/cluster_policy.py rename to openstack/clustering/v1/cluster_policy.py index 377e3d46b8..a533c1c2b7 100644 --- a/openstack/cluster/v1/cluster_policy.py +++ b/openstack/clustering/v1/cluster_policy.py @@ -10,22 +10,21 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.cluster import cluster_service -from openstack import resource2 as resource +from openstack import resource class ClusterPolicy(resource.Resource): resource_key = 'cluster_policy' resources_key = 'cluster_policies' base_path = '/clusters/%(cluster_id)s/policies' - service = cluster_service.ClusterService() # Capabilities allow_list = True - allow_get = True + allow_fetch = True _query_mapping = resource.QueryParameters( - 'sort', 'policy_name', 'policy_type', is_enabled='enabled') + 'sort', 'policy_name', 'policy_type', is_enabled='enabled' + ) # Properties #: ID of the policy object. diff --git a/openstack/clustering/v1/event.py b/openstack/clustering/v1/event.py new file mode 100644 index 0000000000..0111b75a3e --- /dev/null +++ b/openstack/clustering/v1/event.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import resource + + +class Event(resource.Resource): + resource_key = 'event' + resources_key = 'events' + base_path = '/events' + + # Capabilities + allow_list = True + allow_fetch = True + + _query_mapping = resource.QueryParameters( + 'cluster_id', + 'action', + 'level', + 'sort', + 'global_project', + obj_id='oid', + obj_name='oname', + obj_type='otype', + ) + + # Properties + #: Timestamp string (in ISO8601 format) when the event was generated. + generated_at = resource.Body('timestamp') + #: The UUID of the object related to this event. + obj_id = resource.Body('oid') + #: The name of the object related to this event. + obj_name = resource.Body('oname') + #: The type name of the object related to this event. + obj_type = resource.Body('otype') + #: The UUID of the cluster related to this event, if any. + cluster_id = resource.Body('cluster_id') + #: The event level (priority). + level = resource.Body('level') + #: The ID of the user. + user_id = resource.Body('user') + #: The ID of the project (tenant). + project_id = resource.Body('project') + #: The string representation of the action associated with the event. + action = resource.Body('action') + #: The status of the associated object. + status = resource.Body('status') + #: A string description of the reason that brought the object into its + #: current status. + status_reason = resource.Body('status_reason') + #: The metadata of an event object. + meta_data = resource.Body('meta_data') diff --git a/openstack/clustering/v1/node.py b/openstack/clustering/v1/node.py new file mode 100644 index 0000000000..8229d24cb1 --- /dev/null +++ b/openstack/clustering/v1/node.py @@ -0,0 +1,177 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.clustering.v1 import _async_resource +from openstack import resource +from openstack import utils + + +class Node(_async_resource.AsyncResource): + resource_key = 'node' + resources_key = 'nodes' + base_path = '/nodes' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'show_details', + 'name', + 'sort', + 'global_project', + 'cluster_id', + 'status', + ) + + # Properties + #: The name of the node. + name = resource.Body('name') + #: The ID of the physical object that backs the node. + physical_id = resource.Body('physical_id') + #: The ID of the cluster in which this node is a member. + #: A node is an orphan node if this field is empty. + cluster_id = resource.Body('cluster_id') + #: The ID of the profile used by this node. + profile_id = resource.Body('profile_id') + #: The domain ID of the node. + domain_id = resource.Body('domain') + #: The ID of the user who created this node. + user_id = resource.Body('user') + #: The ID of the project this node belongs to. + project_id = resource.Body('project') + #: The name of the profile used by this node. + profile_name = resource.Body('profile_name') + #: An integer that is unique inside the owning cluster. + #: A value of -1 means this node is an orphan node. + index = resource.Body('index', type=int) + #: A string indicating the role the node plays in a cluster. + role = resource.Body('role') + #: The timestamp of the node object's initialization. + #: *Type: datetime object parsed from ISO 8601 formatted string* + init_at = resource.Body('init_at') + #: The timestamp of the node's creation, i.e. the physical object + #: represented by this node is also created. + #: *Type: datetime object parsed from ISO 8601 formatted string* + created_at = resource.Body('created_at') + #: The timestamp the node was last updated. + #: *Type: datetime object parsed from ISO 8601 formatted string* + updated_at = resource.Body('updated_at') + #: A string indicating the node's status. + status = resource.Body('status') + #: A string describing why the node entered its current status. + status_reason = resource.Body('status_reason') + #: A map containing key-value pairs attached to the node. + metadata = resource.Body('metadata', type=dict) + #: A map containing some runtime data for this node. + data = resource.Body('data', type=dict) + #: A map containing the details of the physical object this node + #: represents + details = resource.Body('details', type=dict) + #: A map containing the dependency of nodes + dependents = resource.Body('dependents', type=dict) + #: Whether the node is tainted. *Type: bool* + tainted = resource.Body('tainted', type=bool) + + def _action(self, session, body): + """Procedure the invoke an action API. + + :param session: A session object used for sending request. + :param body: The body of action to be sent. + """ + url = utils.urljoin(self.base_path, self.id, 'actions') + resp = session.post(url, json=body) + return resp.json() + + def check(self, session, **params): + """An action procedure for the node to check its health status. + + :param session: A session object used for sending request. + :returns: A dictionary containing the action ID. + """ + body = {'check': params} + return self._action(session, body) + + def recover(self, session, **params): + """An action procedure for the node to recover. + + :param session: A session object used for sending request. + :returns: A dictionary containing the action ID. + """ + body = {'recover': params} + return self._action(session, body) + + def op(self, session, operation, **params): + """Perform an operation on the specified node. + + :param session: A session object used for sending request. + :param operation: A string representing the operation to be performed. + :param dict params: An optional dict providing the parameters for the + operation. + :returns: A dictionary containing the action ID. + """ + url = utils.urljoin(self.base_path, self.id, 'ops') + resp = session.post(url, json={operation: params}) + return resp.json() + + def adopt(self, session, preview=False, **params): + """Adopt a node for management. + + :param session: A session object used for sending request. + :param preview: A boolean indicating whether the adoption is a + preview. A "preview" does not create the node object. + :param dict params: A dict providing the details of a node to be + adopted. + """ + if preview: + path = 'adopt-preview' + attrs = { + 'identity': params.get('identity'), + 'overrides': params.get('overrides'), + 'type': params.get('type'), + 'snapshot': params.get('snapshot'), + } + else: + path = 'adopt' + attrs = params + + url = utils.urljoin(self.base_path, path) + resp = session.post(url, json=attrs) + if preview: + return resp.json() + + self._translate_response(resp) + return self + + def force_delete(self, session): + """Force delete a node.""" + body = {'force': True} + url = utils.urljoin(self.base_path, self.id) + response = session.delete(url, json=body) + return self._delete_response(response) + + +class NodeDetail(Node): + base_path = '/nodes/%(node_id)s?show_details=True' + + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = False + + node_id = resource.URI('node_id') diff --git a/openstack/clustering/v1/policy.py b/openstack/clustering/v1/policy.py new file mode 100644 index 0000000000..f3ffc03a41 --- /dev/null +++ b/openstack/clustering/v1/policy.py @@ -0,0 +1,65 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Policy(resource.Resource): + resource_key = 'policy' + resources_key = 'policies' + base_path = '/policies' + + # Capabilities + allow_list = True + allow_fetch = True + allow_create = True + allow_delete = True + allow_commit = True + + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'name', 'type', 'sort', 'global_project' + ) + + # Properties + #: The name of the policy. + name = resource.Body('name') + #: The type name of the policy. + type = resource.Body('type') + #: The ID of the project this policy belongs to. + project_id = resource.Body('project') + # The domain ID of the policy. + domain_id = resource.Body('domain') + #: The ID of the user who created this policy. + user_id = resource.Body('user') + #: The timestamp when the policy is created. + created_at = resource.Body('created_at') + #: The timestamp when the policy was last updated. + updated_at = resource.Body('updated_at') + #: The specification of the policy. + spec = resource.Body('spec', type=dict) + #: A dictionary containing runtime data of the policy. + data = resource.Body('data', type=dict) + + +class PolicyValidate(Policy): + base_path = '/policies/validate' + + # Capabilities + allow_list = False + allow_fetch = False + allow_create = True + allow_delete = False + allow_commit = False + + commit_method = 'PUT' diff --git a/openstack/clustering/v1/policy_type.py b/openstack/clustering/v1/policy_type.py new file mode 100644 index 0000000000..96bf477f8e --- /dev/null +++ b/openstack/clustering/v1/policy_type.py @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class PolicyType(resource.Resource): + resource_key = 'policy_type' + resources_key = 'policy_types' + base_path = '/policy-types' + + # Capabilities + allow_list = True + allow_fetch = True + + # Properties + #: Name of policy type. + name = resource.Body('name', alternate_id=True) + #: The schema of the policy type. + schema = resource.Body('schema') + #: The support status of the policy type + support_status = resource.Body('support_status') diff --git a/openstack/clustering/v1/profile.py b/openstack/clustering/v1/profile.py new file mode 100644 index 0000000000..3c654b5262 --- /dev/null +++ b/openstack/clustering/v1/profile.py @@ -0,0 +1,63 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Profile(resource.Resource): + resource_key = 'profile' + resources_key = 'profiles' + base_path = '/profiles' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'sort', 'global_project', 'type', 'name' + ) + + # Bodyerties + #: The name of the profile + name = resource.Body('name') + #: The type of the profile. + type = resource.Body('type') + #: The ID of the project this profile belongs to. + project_id = resource.Body('project') + #: The domain ID of the profile. + domain_id = resource.Body('domain') + #: The ID of the user who created this profile. + user_id = resource.Body('user') + #: The spec of the profile. + spec = resource.Body('spec', type=dict) + #: A collection of key-value pairs that are attached to the profile. + metadata = resource.Body('metadata', type=dict) + #: Timestamp of when the profile was created. + created_at = resource.Body('created_at') + #: Timestamp of when the profile was last updated. + updated_at = resource.Body('updated_at') + + +class ProfileValidate(Profile): + base_path = '/profiles/validate' + allow_create = True + allow_fetch = False + allow_commit = False + allow_delete = False + allow_list = False + + commit_method = 'PUT' diff --git a/openstack/clustering/v1/profile_type.py b/openstack/clustering/v1/profile_type.py new file mode 100644 index 0000000000..de64525023 --- /dev/null +++ b/openstack/clustering/v1/profile_type.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource +from openstack import utils + + +class ProfileType(resource.Resource): + resource_key = 'profile_type' + resources_key = 'profile_types' + base_path = '/profile-types' + + # Capabilities + allow_list = True + allow_fetch = True + + # Properties + #: Name of the profile type. + name = resource.Body('name', alternate_id=True) + #: The schema of the profile type. + schema = resource.Body('schema') + #: The support status of the profile type + support_status = resource.Body('support_status') + + def type_ops(self, session): + url = utils.urljoin(self.base_path, self.id, 'ops') + resp = session.get(url) + return resp.json() diff --git a/openstack/clustering/v1/receiver.py b/openstack/clustering/v1/receiver.py new file mode 100644 index 0000000000..913feadb4a --- /dev/null +++ b/openstack/clustering/v1/receiver.py @@ -0,0 +1,66 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Receiver(resource.Resource): + resource_key = 'receiver' + resources_key = 'receivers' + base_path = '/receivers' + + # Capabilities + allow_list = True + allow_fetch = True + allow_create = True + allow_commit = True + allow_delete = True + + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'name', + 'type', + 'cluster_id', + 'action', + 'sort', + 'global_project', + user_id='user', + ) + + # Properties + #: The name of the receiver. + name = resource.Body('name') + #: The type of the receiver. + type = resource.Body('type') + #: The ID of the user who created the receiver, thus the owner of it. + user_id = resource.Body('user') + #: The ID of the project this receiver belongs to. + project_id = resource.Body('project') + #: The domain ID of the receiver. + domain_id = resource.Body('domain') + #: The ID of the targeted cluster. + cluster_id = resource.Body('cluster_id') + #: The name of the targeted action. + action = resource.Body('action') + #: Timestamp of when the receiver was created. + created_at = resource.Body('created_at') + #: Timestamp of when the receiver was last updated. + updated_at = resource.Body('updated_at') + #: The credential of the impersonated user. + actor = resource.Body('actor', type=dict) + #: A dictionary containing key-value pairs that are provided to the + #: targeted action. + params = resource.Body('params', type=dict) + #: The information about the channel through which you can trigger the + #: receiver hence the associated action. + channel = resource.Body('channel', type=dict) diff --git a/openstack/clustering/v1/service.py b/openstack/clustering/v1/service.py new file mode 100644 index 0000000000..52f380298d --- /dev/null +++ b/openstack/clustering/v1/service.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Service(resource.Resource): + resource_key = 'service' + resources_key = 'services' + base_path = '/services' + + # Capabilities + allow_list = True + + # Properties + #: Status of service + status = resource.Body('status') + #: State of service + state = resource.Body('state') + #: Name of service + binary = resource.Body('binary') + #: Disabled reason of service + disabled_reason = resource.Body('disabled_reason') + #: Host where service runs + host = resource.Body('host') + #: The timestamp the service was last updated. + #: *Type: datetime object parsed from ISO 8601 formatted string* + updated_at = resource.Body('updated_at') diff --git a/openstack/clustering/version.py b/openstack/clustering/version.py new file mode 100644 index 0000000000..692230a198 --- /dev/null +++ b/openstack/clustering/version.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import resource + + +class Version(resource.Resource): + resource_key = 'version' + resources_key = 'versions' + base_path = '/' + + # capabilities + allow_list = True + + # Properties + links = resource.Body('links') + status = resource.Body('status') diff --git a/openstack/tests/functional/block_store/v2/__init__.py b/openstack/common/__init__.py similarity index 100% rename from openstack/tests/functional/block_store/v2/__init__.py rename to openstack/common/__init__.py diff --git a/openstack/common/metadata.py b/openstack/common/metadata.py new file mode 100644 index 0000000000..2ecabfef3d --- /dev/null +++ b/openstack/common/metadata.py @@ -0,0 +1,153 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +import typing_extensions as ty_ext + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class MetadataMixin: + id: str + base_path: str + _body: resource._ComponentManager + + #: *Type: list of tag strings* + metadata = resource.Body('metadata', type=dict) + + def fetch_metadata(self, session: resource.AdapterT) -> ty_ext.Self: + """Lists metadata set on the entity. + + :param session: The session to use for making this request. + :return: The dictionary with metadata attached to the entity + """ + url = utils.urljoin(self.base_path, self.id, 'metadata') + response = session.get(url) + exceptions.raise_from_response(response) + json = response.json() + + if 'metadata' in json: + self._body.attributes.update({'metadata': json['metadata']}) + return self + + def set_metadata( + self, + session: resource.AdapterT, + metadata: dict[str, ty.Any] | None = None, + replace: bool = False, + ) -> ty_ext.Self: + """Sets/Replaces metadata key value pairs on the resource. + + :param session: The session to use for making this request. + :param dict metadata: Dictionary with key-value pairs + :param bool replace: Replace all resource metadata with the new object + or merge new and existing. + """ + url = utils.urljoin(self.base_path, self.id, 'metadata') + if not metadata: + metadata = {} + if not replace: + response = session.post(url, json={'metadata': metadata}) + else: + response = session.put(url, json={'metadata': metadata}) + exceptions.raise_from_response(response) + self._body.attributes.update({'metadata': metadata}) + return self + + def replace_metadata( + self, + session: resource.AdapterT, + metadata: dict[str, ty.Any] | None = None, + ) -> ty_ext.Self: + """Replaces all metadata key value pairs on the resource. + + :param session: The session to use for making this request. + :param dict metadata: Dictionary with key-value pairs + :param bool replace: Replace all resource metadata with the new object + or merge new and existing. + """ + return self.set_metadata(session, metadata, replace=True) + + def delete_metadata(self, session: resource.AdapterT) -> ty_ext.Self: + """Removes all metadata on the entity. + + :param session: The session to use for making this request. + """ + self.set_metadata(session, None, replace=True) + return self + + def get_metadata_item( + self, session: resource.AdapterT, key: str + ) -> ty_ext.Self: + """Get the single metadata item on the entity. + + If the metadata key does not exist a 404 will be returned + + :param session: The session to use for making this request. + :param str key: The key of a metadata item. + """ + url = utils.urljoin(self.base_path, self.id, 'metadata', key) + response = session.get(url) + exceptions.raise_from_response( + response, error_message='Metadata item does not exist' + ) + meta = response.json().get('meta', {}) + # Here we need to potentially init metadata + metadata = self.metadata or {} + metadata[key] = meta.get(key) + self._body.attributes.update({'metadata': metadata}) + + return self + + def set_metadata_item( + self, session: resource.AdapterT, key: str, value: ty.Any + ) -> ty_ext.Self: + """Create or replace single metadata item to the resource. + + :param session: The session to use for making this request. + :param str key: The key for the metadata item. + :param str value: The value. + """ + url = utils.urljoin(self.base_path, self.id, 'metadata', key) + response = session.put(url, json={'meta': {key: value}}) + exceptions.raise_from_response(response) + # we do not want to update tags directly + metadata = self.metadata + metadata[key] = value + self._body.attributes.update({'metadata': metadata}) + return self + + def delete_metadata_item( + self, session: resource.AdapterT, key: str + ) -> ty_ext.Self: + """Removes a single metadata item from the specified resource. + + :param session: The session to use for making this request. + :param str key: The key as a string. + """ + url = utils.urljoin(self.base_path, self.id, 'metadata', key) + response = session.delete(url) + exceptions.raise_from_response(response) + # we do not want to update tags directly + metadata = self.metadata + try: + if metadata: + metadata.pop(key) + else: + metadata = {} + except ValueError: + pass # do nothing! + self._body.attributes.update({'metadata': metadata}) + return self diff --git a/openstack/common/quota_set.py b/openstack/common/quota_set.py new file mode 100644 index 0000000000..08c7a7beff --- /dev/null +++ b/openstack/common/quota_set.py @@ -0,0 +1,161 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +import requests +import typing_extensions as ty_ext + +from openstack import exceptions +from openstack import resource + + +# ATTENTION: Please do not inherit this class for anything else then QuotaSet, +# since attribute processing is here very different! +class QuotaSet(resource.Resource): + resource_key = 'quota_set' + # ATTENTION: different services might be using different base_path + base_path = '/os-quota-sets/%(project_id)s' + + # capabilities + allow_create = True + allow_fetch = True + allow_delete = True + allow_commit = True + + _query_mapping = resource.QueryParameters("usage") + + # NOTE(gtema) Sadly this attribute is useless in all the methods, but keep + # it here extra as a reminder + requires_id = False + + # Quota-sets are not very well designed. We must keep what is + # there and try to process it on best effort + _allow_unknown_attrs_in_body = True + + #: Properties + #: Current reservations + #: *type:dict* + reservation = resource.Body('reservation', type=dict) + #: Quota usage + #: *type:dict* + usage = resource.Body('usage', type=dict) + + project_id = resource.URI('project_id') + + def fetch( + self, + session: resource.AdapterT, + requires_id: bool = False, + base_path: str | None = None, + error_message: str | None = None, + skip_cache: bool = False, + *, + resource_response_key: str | None = None, + microversion: str | None = None, + **params: ty.Any, + ) -> ty_ext.Self: + return super().fetch( + session, + requires_id, + base_path, + error_message, + skip_cache, + resource_response_key=resource_response_key, + microversion=microversion, + **params, + ) + + def _translate_response( + self, + response: requests.Response, + has_body: bool | None = None, + error_message: str | None = None, + *, + resource_response_key: str | None = None, + ) -> None: + """Given a KSA response, inflate this instance with its data + + DELETE operations don't return a body, so only try to work + with a body when has_body is True. + + This method updates attributes that correspond to headers + and body on this instance and clears the dirty set. + """ + if has_body is None: + has_body = self.has_body + exceptions.raise_from_response(response, error_message=error_message) + if has_body: + try: + body = response.json() + if self.resource_key and self.resource_key in body: + body = body[self.resource_key] + + # Do not allow keys called "self" through. Glance chose + # to name a key "self", so we need to pop it out because + # we can't send it through cls.existing and into the + # Resource initializer. "self" is already the first + # argument and is practically a reserved word. + body.pop("self", None) + + # Process body_attrs to strip usage and reservation out + normalized_attrs: dict[str, ty.Any] = dict( + reservation={}, + usage={}, + ) + + for key, val in body.items(): + if isinstance(val, dict): + if 'in_use' in val: + normalized_attrs['usage'][key] = val['in_use'] + if 'reserved' in val: + normalized_attrs['reservation'][key] = val[ + 'reserved' + ] + if 'limit' in val: + normalized_attrs[key] = val['limit'] + else: + normalized_attrs[key] = val + + self._unknown_attrs_in_body.update(normalized_attrs) + + self._body.attributes.update(normalized_attrs) + self._body.clean() + if self.commit_jsonpatch or self.allow_patch: + # We need the original body to compare against + self._original_body = normalized_attrs.copy() + except ValueError: + # Server returned not parsable response (202, 204, etc) + # Do simply nothing + pass + + headers = self._consume_header_attrs(response.headers) + self._header.attributes.update(headers) + self._header.clean() + self._update_location() + dict.update(self, self.to_dict()) + + def _prepare_request_body( + self, + patch: bool, + prepend_key: bool, + *, + resource_request_key: str | None = None, + ) -> dict[str, ty.Any] | list[ty.Any]: + body = self._body.dirty + # Ensure we never try to send meta props reservation and usage + body.pop('reservation', None) + body.pop('usage', None) + + if prepend_key and self.resource_key is not None: + body = {self.resource_key: body} + return body diff --git a/openstack/common/tag.py b/openstack/common/tag.py new file mode 100644 index 0000000000..7fa5061700 --- /dev/null +++ b/openstack/common/tag.py @@ -0,0 +1,146 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +import typing_extensions as ty_ext + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +# https://github.com/python/mypy/issues/11583 +class _TagQueryParameters(ty.TypedDict): + tags: str + any_tags: str + not_tags: str + not_any_tags: str + + +class TagMixin(resource.ResourceMixinProtocol): + _tag_query_parameters: _TagQueryParameters = { + 'tags': 'tags', + 'any_tags': 'tags-any', + 'not_tags': 'not-tags', + 'not_any_tags': 'not-tags-any', + } + + #: A list of associated tags + #: *Type: list of tag strings* + tags = resource.Body('tags', type=list, default=[]) + + def fetch_tags(self, session: resource.AdapterT) -> ty_ext.Self: + """Lists tags set on the entity. + + :param session: The session to use for making this request. + :return: The list with tags attached to the entity + """ + url = utils.urljoin(self.base_path, self.id, 'tags') + session = self._get_session(session) + microversion = self._get_microversion(session) + response = session.get(url, microversion=microversion) + exceptions.raise_from_response(response) + # NOTE(gtema): since this is a common method + # we can't rely on the resource_key, because tags are returned + # without resource_key. Do parse response here + json = response.json() + if 'tags' in json: + self._body.attributes.update({'tags': json['tags']}) + return self + + def set_tags( + self, session: resource.AdapterT, tags: list[str] + ) -> ty_ext.Self: + """Sets/Replaces all tags on the resource. + + :param session: The session to use for making this request. + :param list tags: List with tags to be set on the resource + """ + url = utils.urljoin(self.base_path, self.id, 'tags') + session = self._get_session(session) + microversion = self._get_microversion(session) + response = session.put( + url, json={'tags': tags}, microversion=microversion + ) + exceptions.raise_from_response(response) + self._body.attributes.update({'tags': tags}) + return self + + def remove_all_tags(self, session: resource.AdapterT) -> ty_ext.Self: + """Removes all tags on the entity. + + :param session: The session to use for making this request. + """ + url = utils.urljoin(self.base_path, self.id, 'tags') + session = self._get_session(session) + microversion = self._get_microversion(session) + response = session.delete(url, microversion=microversion) + exceptions.raise_from_response(response) + self._body.attributes.update({'tags': []}) + return self + + def check_tag(self, session: resource.AdapterT, tag: str) -> ty_ext.Self: + """Checks if tag exists on the entity. + + If the tag does not exist a 404 will be returned + + :param session: The session to use for making this request. + :param tag: The tag as a string. + """ + url = utils.urljoin(self.base_path, self.id, 'tags', tag) + session = self._get_session(session) + microversion = self._get_microversion(session) + response = session.get(url, microversion=microversion) + exceptions.raise_from_response( + response, error_message='Tag does not exist' + ) + return self + + def add_tag(self, session: resource.AdapterT, tag: str) -> ty_ext.Self: + """Adds a single tag to the resource. + + :param session: The session to use for making this request. + :param tag: The tag as a string. + """ + url = utils.urljoin(self.base_path, self.id, 'tags', tag) + session = self._get_session(session) + microversion = self._get_microversion(session) + response = session.put(url, microversion=microversion) + exceptions.raise_from_response(response) + # we do not want to update tags directly + tags = self.tags + tags.append(tag) + self._body.attributes.update({'tags': tags}) + return self + + def remove_tag(self, session: resource.AdapterT, tag: str) -> ty_ext.Self: + """Removes a single tag from the specified resource. + + :param session: The session to use for making this request. + :param tag: The tag as a string. + """ + url = utils.urljoin(self.base_path, self.id, 'tags', tag) + session = self._get_session(session) + microversion = self._get_microversion(session) + response = session.delete(url, microversion=microversion) + exceptions.raise_from_response(response) + # we do not want to update tags directly + tags = self.tags + try: + # NOTE(gtema): if tags were not fetched, but request suceeded + # it is ok. Just ensure tag does not exist locally + tags.remove(tag) + except ValueError: + pass # do nothing! + self._body.attributes.update({'tags': tags}) + return self diff --git a/openstack/compute/compute_service.py b/openstack/compute/compute_service.py index 64c8bd9e07..eb7d4cc146 100644 --- a/openstack/compute/compute_service.py +++ b/openstack/compute/compute_service.py @@ -10,15 +10,13 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack import service_filter +from openstack.compute.v2 import _proxy +from openstack import service_description -class ComputeService(service_filter.ServiceFilter): +class ComputeService(service_description.ServiceDescription[_proxy.Proxy]): """The compute service.""" - valid_versions = [service_filter.ValidVersion('v2')] - - def __init__(self, version=None): - """Create a compute service.""" - super(ComputeService, self).__init__(service_type='compute', - version=version) + supported_versions = { + '2': _proxy.Proxy, + } diff --git a/openstack/compute/v2/_proxy.py b/openstack/compute/v2/_proxy.py index d049bb1bfc..2323bd001b 100644 --- a/openstack/compute/v2/_proxy.py +++ b/openstack/compute/v2/_proxy.py @@ -10,38 +10,97 @@ # License for the specific language governing permissions and limitations # under the License. +import typing as ty +import warnings + +from openstack.block_storage.v3 import volume as _volume +from openstack.compute.v2 import aggregate as _aggregate from openstack.compute.v2 import availability_zone +from openstack.compute.v2 import console_auth_token as _console_auth_token from openstack.compute.v2 import extension from openstack.compute.v2 import flavor as _flavor from openstack.compute.v2 import hypervisor as _hypervisor from openstack.compute.v2 import image as _image from openstack.compute.v2 import keypair as _keypair from openstack.compute.v2 import limits +from openstack.compute.v2 import migration as _migration +from openstack.compute.v2 import quota_class_set as _quota_class_set +from openstack.compute.v2 import quota_set as _quota_set from openstack.compute.v2 import server as _server +from openstack.compute.v2 import server_action as _server_action +from openstack.compute.v2 import server_diagnostics as _server_diagnostics from openstack.compute.v2 import server_group as _server_group from openstack.compute.v2 import server_interface as _server_interface from openstack.compute.v2 import server_ip +from openstack.compute.v2 import server_migration as _server_migration +from openstack.compute.v2 import server_remote_console as _src from openstack.compute.v2 import service as _service -from openstack import proxy2 -from openstack import resource2 - - -class Proxy(proxy2.BaseProxy): +from openstack.compute.v2 import usage as _usage +from openstack.compute.v2 import volume_attachment as _volume_attachment +from openstack import exceptions +from openstack.identity.v3 import project as _project +from openstack.identity.v3 import user as _user +from openstack.network.v2 import security_group as _sg +from openstack import proxy +from openstack import resource +from openstack import types +from openstack import utils +from openstack import warnings as os_warnings + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['2']] = '2' + + _resource_registry = { + "aggregate": _aggregate.Aggregate, + "availability_zone": availability_zone.AvailabilityZone, + "extension": extension.Extension, + "flavor": _flavor.Flavor, + "hypervisor": _hypervisor.Hypervisor, + "image": _image.Image, + "keypair": _keypair.Keypair, + "limits": limits.Limits, + "migration": _migration.Migration, + "os_console_auth_token": _console_auth_token.ConsoleAuthToken, + "quota_class_set": _quota_class_set.QuotaClassSet, + "quota_set": _quota_set.QuotaSet, + "server": _server.Server, + "server_action": _server_action.ServerAction, + "server_diagnostics": _server_diagnostics.ServerDiagnostics, + "server_group": _server_group.ServerGroup, + "server_interface": _server_interface.ServerInterface, + "server_ip": server_ip.ServerIP, + "server_migration": _server_migration.ServerMigration, + "server_remote_console": _src.ServerRemoteConsole, + "service": _service.Service, + "usage": _usage.Usage, + "volume_attachment": _volume_attachment.VolumeAttachment, + } + + # ========== Extensions ========== def find_extension(self, name_or_id, ignore_missing=True): """Find a single extension :param name_or_id: The name or ID of an extension. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :returns: One :class:`~openstack.compute.v2.extension.Extension` or - None - """ - return self._find(extension.Extension, name_or_id, - ignore_missing=ignore_missing) + None + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + return self._find( + extension.Extension, + name_or_id, + ignore_missing=ignore_missing, + ) def extensions(self): """Retrieve a generator of extensions @@ -49,28 +108,54 @@ def extensions(self): :returns: A generator of extension instances. :rtype: :class:`~openstack.compute.v2.extension.Extension` """ - return self._list(extension.Extension, paginated=False) + return self._list(extension.Extension) + + # ========== Flavors ========== - def find_flavor(self, name_or_id, ignore_missing=True): + # TODO(stephenfin): Drop 'query' parameter or apply it consistently + def find_flavor( + self, + name_or_id, + ignore_missing=True, + *, + get_extra_specs=False, + **query, + ): """Find a single flavor :param name_or_id: The name or ID of a flavor. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param bool get_extra_specs: When set to ``True`` and extra_specs not + present in the response will invoke additional API call to fetch + extra_specs. + :param kwargs query: Optional query parameters to be sent to limit + the flavors being returned. + :returns: One :class:`~openstack.compute.v2.flavor.Flavor` or None - """ - return self._find(_flavor.Flavor, name_or_id, - ignore_missing=ignore_missing) + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + flavor = self._find( + _flavor.Flavor, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + if flavor and get_extra_specs and not flavor.extra_specs: + flavor = flavor.fetch_extra_specs(self) + return flavor def create_flavor(self, **attrs): """Create a new flavor from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.compute.v2.flavor.Flavor`, - comprised of the properties on the Flavor class. + a :class:`~openstack.compute.v2.flavor.Flavor`, + comprised of the properties on the Flavor class. :returns: The results of flavor creation :rtype: :class:`~openstack.compute.v2.flavor.Flavor` @@ -81,99 +166,401 @@ def delete_flavor(self, flavor, ignore_missing=True): """Delete a flavor :param flavor: The value can be either the ID of a flavor or a - :class:`~openstack.compute.v2.flavor.Flavor` instance. + :class:`~openstack.compute.v2.flavor.Flavor` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the flavor does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent flavor. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the flavor does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent flavor. :returns: ``None`` """ self._delete(_flavor.Flavor, flavor, ignore_missing=ignore_missing) - def get_flavor(self, flavor): + def update_flavor(self, flavor, **attrs): + """Update a flavor + + :param flavor: Either the ID of a flavor or a + :class:`~openstack.compute.v2.flavor.Flavor` instance. + :param attrs: The attributes to update on the flavor represented + by ``flavor``. + + :returns: The updated flavor + :rtype: :class:`~openstack.compute.v2.flavor.Flavor` + """ + return self._update(_flavor.Flavor, flavor, **attrs) + + def get_flavor(self, flavor, get_extra_specs=False): """Get a single flavor :param flavor: The value can be the ID of a flavor or a - :class:`~openstack.compute.v2.flavor.Flavor` instance. + :class:`~openstack.compute.v2.flavor.Flavor` instance. + :param bool get_extra_specs: When set to ``True`` and extra_specs not + present in the response will invoke additional API call to fetch + extra_specs. :returns: One :class:`~openstack.compute.v2.flavor.Flavor` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._get(_flavor.Flavor, flavor) + flavor = self._get(_flavor.Flavor, flavor) + if get_extra_specs and not flavor.extra_specs: + flavor = flavor.fetch_extra_specs(self) + return flavor - def flavors(self, details=True, **query): + def flavors(self, details=True, get_extra_specs=False, **query): """Return a generator of flavors :param bool details: When ``True``, returns - :class:`~openstack.compute.v2.flavor.FlavorDetail` objects, - otherwise :class:`~openstack.compute.v2.flavor.Flavor`. - *Default: ``True``* - :param kwargs \*\*query: Optional query parameters to be sent to limit - the flavors being returned. + :class:`~openstack.compute.v2.flavor.Flavor` objects, + with additional attributes filled. + :param bool get_extra_specs: When set to ``True`` and extra_specs not + present in the response will invoke additional API call to fetch + extra_specs. + :param kwargs query: Optional query parameters to be sent to limit + the flavors being returned. :returns: A generator of flavor objects """ - flv = _flavor.FlavorDetail if details else _flavor.Flavor - return self._list(flv, paginated=True, **query) + base_path = '/flavors/detail' if details else '/flavors' + for flv in self._list(_flavor.Flavor, base_path=base_path, **query): + if get_extra_specs and not flv.extra_specs: + flv = flv.fetch_extra_specs(self) + yield flv + + def flavor_add_tenant_access(self, flavor, tenant): + """Adds tenant/project access to flavor. + + :param flavor: Either the ID of a flavor or a + :class:`~openstack.compute.v2.flavor.Flavor` instance. + :param str tenant: The UUID of the tenant. + + :returns: One :class:`~openstack.compute.v2.flavor.Flavor` + """ + flavor = self._get_resource(_flavor.Flavor, flavor) + return flavor.add_tenant_access(self, tenant) + + def flavor_remove_tenant_access(self, flavor, tenant): + """Removes tenant/project access to flavor. + + :param flavor: Either the ID of a flavor or a + :class:`~openstack.compute.v2.flavor.Flavor` instance. + :param str tenant: The UUID of the tenant. + + :returns: One :class:`~openstack.compute.v2.flavor.Flavor` + """ + flavor = self._get_resource(_flavor.Flavor, flavor) + return flavor.remove_tenant_access(self, tenant) + + def get_flavor_access(self, flavor): + """Lists tenants who have access to private flavor + + :param flavor: Either the ID of a flavor or a + :class:`~openstack.compute.v2.flavor.Flavor` instance. + + :returns: List of dicts with flavor_id and tenant_id attributes. + """ + flavor = self._get_resource(_flavor.Flavor, flavor) + return flavor.get_access(self) + + def fetch_flavor_extra_specs(self, flavor): + """Lists Extra Specs of a flavor + + :param flavor: Either the ID of a flavor or a + :class:`~openstack.compute.v2.flavor.Flavor` instance. + + :returns: One :class:`~openstack.compute.v2.flavor.Flavor` + """ + flavor = self._get_resource(_flavor.Flavor, flavor) + return flavor.fetch_extra_specs(self) + + def create_flavor_extra_specs(self, flavor, extra_specs): + """Lists Extra Specs of a flavor + + :param flavor: Either the ID of a flavor or a + :class:`~openstack.compute.v2.flavor.Flavor` instance. + :param dict extra_specs: dict of extra specs + + :returns: One :class:`~openstack.compute.v2.flavor.Flavor` + """ + flavor = self._get_resource(_flavor.Flavor, flavor) + return flavor.create_extra_specs(self, specs=extra_specs) + + def get_flavor_extra_specs_property(self, flavor, prop): + """Get specific Extra Spec property of a flavor + + :param flavor: Either the ID of a flavor or a + :class:`~openstack.compute.v2.flavor.Flavor` instance. + :param str prop: Property name. + + :returns: String value of the requested property. + """ + flavor = self._get_resource(_flavor.Flavor, flavor) + return flavor.get_extra_specs_property(self, prop) + + def update_flavor_extra_specs_property(self, flavor, prop, val): + """Update specific Extra Spec property of a flavor + + :param flavor: Either the ID of a flavor or a + :class:`~openstack.compute.v2.flavor.Flavor` instance. + :param str prop: Property name. + :param str val: Property value. + + :returns: String value of the requested property. + """ + flavor = self._get_resource(_flavor.Flavor, flavor) + return flavor.update_extra_specs_property(self, prop, val) + + def delete_flavor_extra_specs_property(self, flavor, prop): + """Delete specific Extra Spec property of a flavor + + :param flavor: Either the ID of a flavor or a + :class:`~openstack.compute.v2.flavor.Flavor` instance. + :param str prop: Property name. + + :returns: None + """ + flavor = self._get_resource(_flavor.Flavor, flavor) + return flavor.delete_extra_specs_property(self, prop) + + # ========== Aggregates ========== + + def aggregates(self, **query): + """Return a generator of aggregate + + :param kwargs query: Optional query parameters to be sent to limit + the aggregates being returned. + + :returns: A generator of aggregate + :rtype: class: `~openstack.compute.v2.aggregate.Aggregate` + """ + return self._list(_aggregate.Aggregate, **query) + + def get_aggregate(self, aggregate): + """Get a single host aggregate + + :param aggregate: The value can be the ID of an aggregate or a + :class:`~openstack.compute.v2.aggregate.Aggregate` instance. + + :returns: One :class:`~openstack.compute.v2.aggregate.Aggregate` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_aggregate.Aggregate, aggregate) + + def find_aggregate(self, name_or_id, ignore_missing=True): + """Find a single aggregate + + :param name_or_id: The name or ID of an aggregate. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + + :returns: One :class:`~openstack.compute.v2.aggregate.Aggregate` + or None + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + return self._find( + _aggregate.Aggregate, + name_or_id, + ignore_missing=ignore_missing, + ) + + def create_aggregate(self, **attrs): + """Create a new host aggregate from attributes + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.compute.v2.aggregate.Aggregate`, + comprised of the properties on the Aggregate class. + + :returns: The results of aggregate creation + :rtype: :class:`~openstack.compute.v2.aggregate.Aggregate` + """ + return self._create(_aggregate.Aggregate, **attrs) + + def update_aggregate(self, aggregate, **attrs): + """Update a host aggregate + + :param server: Either the ID of a host aggregate or a + :class:`~openstack.compute.v2.aggregate.Aggregate` instance. + :param attrs: The attributes to update on the aggregate represented + by ``aggregate``. + + :returns: The updated aggregate + :rtype: :class:`~openstack.compute.v2.aggregate.Aggregate` + """ + return self._update(_aggregate.Aggregate, aggregate, **attrs) + + def delete_aggregate(self, aggregate, ignore_missing=True): + """Delete a host aggregate + + :param keypair: The value can be either the ID of an aggregate or a + :class:`~openstack.compute.v2.aggregate.Aggregate` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the aggregate does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent aggregate. + + :returns: ``None`` + """ + self._delete( + _aggregate.Aggregate, + aggregate, + ignore_missing=ignore_missing, + ) + + def add_host_to_aggregate(self, aggregate, host): + """Adds a host to an aggregate + + :param aggregate: Either the ID of a aggregate or a + :class:`~openstack.compute.v2.aggregate.Aggregate` + instance. + :param str host: The host to add to the aggregate + + :returns: One :class:`~openstack.compute.v2.aggregate.Aggregate` + """ + aggregate = self._get_resource(_aggregate.Aggregate, aggregate) + return aggregate.add_host(self, host) + + def remove_host_from_aggregate(self, aggregate, host): + """Removes a host from an aggregate + + :param aggregate: Either the ID of a aggregate or a + :class:`~openstack.compute.v2.aggregate.Aggregate` + instance. + :param str host: The host to remove from the aggregate + + :returns: One :class:`~openstack.compute.v2.aggregate.Aggregate` + """ + aggregate = self._get_resource(_aggregate.Aggregate, aggregate) + return aggregate.remove_host(self, host) + + def set_aggregate_metadata(self, aggregate, metadata): + """Creates or replaces metadata for an aggregate + + :param aggregate: Either the ID of a aggregate or a + :class:`~openstack.compute.v2.aggregate.Aggregate` + instance. + :param dict metadata: Metadata key and value pairs. The maximum + size for each metadata key and value pair + is 255 bytes. + + :returns: One :class:`~openstack.compute.v2.aggregate.Aggregate` + """ + aggregate = self._get_resource(_aggregate.Aggregate, aggregate) + return aggregate.set_metadata(self, metadata) + + def aggregate_precache_images(self, aggregate, images): + """Requests image precaching on an aggregate + + :param aggregate: Either the ID of a aggregate or a + :class:`~openstack.compute.v2.aggregate.Aggregate` instance. + :param images: Single image id or list of image ids. + + :returns: ``None`` + """ + aggregate = self._get_resource(_aggregate.Aggregate, aggregate) + # We need to ensure we pass list of image IDs + if isinstance(images, str): + images = [images] + image_data = [] + for img in images: + image_data.append({'id': img}) + return aggregate.precache_images(self, image_data) + + # ========== Images ========== def delete_image(self, image, ignore_missing=True): """Delete an image :param image: The value can be either the ID of an image or a - :class:`~openstack.compute.v2.image.Image` instance. + :class:`~openstack.compute.v2.image.Image` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the image does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent image. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the image does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent image. :returns: ``None`` """ + warnings.warn( + 'This API is a proxy to the image service and has been ' + 'deprecated; use the image service proxy API instead', + os_warnings.OpenStackDeprecationWarning, + ) self._delete(_image.Image, image, ignore_missing=ignore_missing) + # NOTE(stephenfin): We haven't added 'details' support here since this + # method is deprecated def find_image(self, name_or_id, ignore_missing=True): """Find a single image :param name_or_id: The name or ID of a image. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :returns: One :class:`~openstack.compute.v2.image.Image` or None - """ - return self._find(_image.Image, name_or_id, - ignore_missing=ignore_missing) + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + warnings.warn( + 'This API is a proxy to the image service and has been ' + 'deprecated; use the image service proxy API instead', + os_warnings.OpenStackDeprecationWarning, + ) + return self._find( + _image.Image, + name_or_id, + ignore_missing=ignore_missing, + ) def get_image(self, image): """Get a single image :param image: The value can be the ID of an image or a - :class:`~openstack.compute.v2.image.Image` instance. + :class:`~openstack.compute.v2.image.Image` instance. :returns: One :class:`~openstack.compute.v2.image.Image` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. - """ + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + warnings.warn( + 'This API is a proxy to the image service and has been ' + 'deprecated; use the image service proxy API instead', + os_warnings.OpenStackDeprecationWarning, + ) return self._get(_image.Image, image) def images(self, details=True, **query): """Return a generator of images :param bool details: When ``True``, returns - :class:`~openstack.compute.v2.image.ImageDetail` objects, - otherwise :class:`~openstack.compute.v2.image.Image`. + :class:`~openstack.compute.v2.image.Image` objects with all + available properties, otherwise only basic properties are returned. *Default: ``True``* - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of image objects """ - img = _image.ImageDetail if details else _image.Image - return self._list(img, paginated=True, **query) + warnings.warn( + 'This API is a proxy to the image service and has been ' + 'deprecated; use the image service proxy API instead', + os_warnings.OpenStackDeprecationWarning, + ) + base_path = '/images/detail' if details else None + return self._list(_image.Image, base_path=base_path, **query) def _get_base_resource(self, res, base): # Metadata calls for Image and Server can work for both those @@ -188,135 +575,168 @@ def get_image_metadata(self, image): """Return a dictionary of metadata for an image :param image: Either the ID of an image or a - :class:`~openstack.compute.v2.image.Image` or - :class:`~openstack.compute.v2.image.ImageDetail` - instance. + :class:`~openstack.compute.v2.image.Image` instance. :returns: A :class:`~openstack.compute.v2.image.Image` with only the - image's metadata. All keys and values are Unicode text. + image's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.compute.v2.image.Image` """ res = self._get_base_resource(image, _image.Image) - metadata = res.get_metadata(self.session) - result = _image.Image.existing(id=res.id, metadata=metadata) - return result + return res.fetch_metadata(self) def set_image_metadata(self, image, **metadata): """Update metadata for an image :param image: Either the ID of an image or a - :class:`~openstack.compute.v2.image.Image` or - :class:`~openstack.compute.v2.image.ImageDetail` - instance. + :class:`~openstack.compute.v2.image.Image` instance. :param kwargs metadata: Key/value pairs to be updated in the image's - metadata. No other metadata is modified - by this call. All keys and values are stored - as Unicode. + metadata. No other metadata is modified + by this call. All keys and values are stored + as Unicode. :returns: A :class:`~openstack.compute.v2.image.Image` with only the - image's metadata. All keys and values are Unicode text. + image's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.compute.v2.image.Image` """ res = self._get_base_resource(image, _image.Image) - metadata = res.set_metadata(self.session, **metadata) - result = _image.Image.existing(id=res.id, metadata=metadata) - return result + return res.set_metadata(self, metadata=metadata) - def delete_image_metadata(self, image, keys): + def delete_image_metadata(self, image, keys=None): """Delete metadata for an image Note: This method will do a HTTP DELETE request for every key in keys. :param image: Either the ID of an image or a - :class:`~openstack.compute.v2.image.Image` or - :class:`~openstack.compute.v2.image.ImageDetail` - instance. - :param keys: The keys to delete. + :class:`~openstack.compute.v2.image.Image` instance. + :param list keys: The keys to delete. If left empty complete metadata + will be removed. :rtype: ``None`` """ res = self._get_base_resource(image, _image.Image) - return res.delete_metadata(self.session, keys) + if keys is not None: + # Create a set as a snapshot of keys to avoid "changed during + # iteration" + for key in set(keys): + res.delete_metadata_item(self, key) + else: + res.delete_metadata(self) + + # ========== Keypairs ========== def create_keypair(self, **attrs): """Create a new keypair from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.compute.v2.keypair.Keypair`, - comprised of the properties on the Keypair class. + a :class:`~openstack.compute.v2.keypair.Keypair`, + comprised of the properties on the Keypair class. :returns: The results of keypair creation :rtype: :class:`~openstack.compute.v2.keypair.Keypair` """ return self._create(_keypair.Keypair, **attrs) - def delete_keypair(self, keypair, ignore_missing=True): + def delete_keypair(self, keypair, ignore_missing=True, user_id=None): """Delete a keypair :param keypair: The value can be either the ID of a keypair or a - :class:`~openstack.compute.v2.keypair.Keypair` - instance. + :class:`~openstack.compute.v2.keypair.Keypair` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the keypair does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent keypair. + :class:`~openstack.exceptions.NotFoundException` will be raised + when the keypair does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + keypair. + :param str user_id: Optional user_id owning the keypair :returns: ``None`` """ - self._delete(_keypair.Keypair, keypair, ignore_missing=ignore_missing) + # NOTE(gtema): it is necessary to overload normal logic since query + # parameters are not properly respected in typical DELETE case + res = self._get_resource(_keypair.Keypair, keypair) + + try: + delete_params = {'user_id': user_id} if user_id else {} + res.delete(self, params=delete_params) + except exceptions.NotFoundException: + if ignore_missing: + return None + raise - def get_keypair(self, keypair): + def get_keypair(self, keypair, user_id=None): """Get a single keypair :param keypair: The value can be the ID of a keypair or a - :class:`~openstack.compute.v2.keypair.Keypair` - instance. + :class:`~openstack.compute.v2.keypair.Keypair` instance. + :param str user_id: Optional user_id owning the keypair :returns: One :class:`~openstack.compute.v2.keypair.Keypair` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._get(_keypair.Keypair, keypair) + # NOTE(gtema): it is necessary to overload normal logic since query + # parameters are not properly respected in typical fetch case + res = self._get_resource(_keypair.Keypair, keypair) - def find_keypair(self, name_or_id, ignore_missing=True): + get_params = {'user_id': user_id} if user_id else {} + return res.fetch( + self, error_message=f"No Keypair found for {keypair}", **get_params + ) + + def find_keypair(self, name_or_id, ignore_missing=True, *, user_id=None): """Find a single keypair :param name_or_id: The name or ID of a keypair. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.compute.v2.keypair.Keypair` or None - """ - return self._find(_keypair.Keypair, name_or_id, - ignore_missing=ignore_missing) + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param str user_id: Optional user_id owning the keypair - def keypairs(self): + :returns: One :class:`~openstack.compute.v2.keypair.Keypair` or None + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + attrs = {'user_id': user_id} if user_id else {} + return self._find( + _keypair.Keypair, + name_or_id, + ignore_missing=ignore_missing, + **attrs, + ) + + def keypairs(self, **query): """Return a generator of keypairs + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of keypair objects :rtype: :class:`~openstack.compute.v2.keypair.Keypair` """ - return self._list(_keypair.Keypair, paginated=False) + return self._list(_keypair.Keypair, **query) + + # ========== Limits ========== - def get_limits(self): + def get_limits(self, **query): """Retrieve limits that are applied to the project's account :returns: A Limits object, including both - :class:`~openstack.compute.v2.limits.AbsoluteLimits` and - :class:`~openstack.compute.v2.limits.RateLimits` + :class:`~openstack.compute.v2.limits.AbsoluteLimits` and + :class:`~openstack.compute.v2.limits.RateLimits` :rtype: :class:`~openstack.compute.v2.limits.Limits` """ - return self._get(limits.Limits) + res = self._get_resource(limits.Limits, None) + return res.fetch(self, **query) + + # ========== Servers ========== def create_server(self, **attrs): """Create a new server from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.compute.v2.server.Server`, - comprised of the properties on the Server class. + a :class:`~openstack.compute.v2.server.Server`, + comprised of the properties on the Server class. :returns: The results of server creation :rtype: :class:`~openstack.compute.v2.server.Server` @@ -327,96 +747,101 @@ def delete_server(self, server, ignore_missing=True, force=False): """Delete a server :param server: The value can be either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the server does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent server + :class:`~openstack.exceptions.NotFoundException` will be + raised when the server does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent server :param bool force: When set to ``True``, the server deletion will be - forced immediatly. + forced immediately. :returns: ``None`` """ if force: server = self._get_resource(_server.Server, server) - server.force_delete(self.session) + server.force_delete(self) else: self._delete(_server.Server, server, ignore_missing=ignore_missing) - def find_server(self, name_or_id, ignore_missing=True): + def find_server( + self, + name_or_id, + ignore_missing=True, + *, + details=True, + all_projects=False, + ): """Find a single server :param name_or_id: The name or ID of a server. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param bool details: When set to ``False`` + instances with only basic data will be returned. The default, + ``True``, will cause instances with full data to be returned. + :param bool all_projects: When set to ``True``, search for server + by name across all projects. Note that this will likely result in a + higher chance of duplicates. Admin-only by default. + :returns: One :class:`~openstack.compute.v2.server.Server` or None - """ - return self._find(_server.Server, name_or_id, - ignore_missing=ignore_missing) + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + query = {} + if all_projects: + query['all_projects'] = True + list_base_path = '/servers/detail' if details else None + return self._find( + _server.Server, + name_or_id, + ignore_missing=ignore_missing, + list_base_path=list_base_path, + **query, + ) def get_server(self, server): """Get a single server :param server: The value can be the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :returns: One :class:`~openstack.compute.v2.server.Server` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_server.Server, server) - def servers(self, details=True, **query): + def servers(self, details=True, all_projects=False, **query): """Retrieve a generator of servers :param bool details: When set to ``False`` - :class:`~openstack.compute.v2.server.Server` instances - will be returned. The default, ``True``, will cause - :class:`~openstack.compute.v2.server.ServerDetail` - instances to be returned. - :param kwargs \*\*query: Optional query parameters to be sent to limit - the servers being returned. Available parameters include: - - * changes_since: A time/date stamp for when the server last changed - status. - * image: An image resource or ID. - * flavor: A flavor resource or ID. - * name: Name of the server as a string. Can be queried with - regular expressions. The regular expression - ?name=bob returns both bob and bobb. If you must match on - only bob, you can use a regular expression that - matches the syntax of the underlying database server that - is implemented for Compute, such as MySQL or PostgreSQL. - * status: Value of the status of the server so that you can filter - on "ACTIVE" for example. - * host: Name of the host as a string. - * limit: Requests a specified page size of returned items from the - query. Returns a number of items up to the specified - limit value. Use the limit parameter to make an initial - limited request and use the ID of the last-seen item from - the response as the marker parameter value in a subsequent - limited request. - * marker: Specifies the ID of the last-seen item. Use the limit - parameter to make an initial limited request and use the - ID of the last-seen item from the response as the marker - parameter value in a subsequent limited request. + instances with only basic data will be returned. The default, + ``True``, will cause instances with full data to be returned. + :param bool all_projects: When set to ``True``, lists servers from all + projects. Admin-only by default. + :param kwargs query: Optional query parameters to be sent to limit + the servers being returned. Available parameters can be seen + under https://docs.openstack.org/api-ref/compute/#list-servers :returns: A generator of server instances. """ - srv = _server.ServerDetail if details else _server.Server - return self._list(srv, paginated=True, **query) + if all_projects: + query['all_projects'] = True + base_path = '/servers/detail' if details else None + return self._list(_server.Server, base_path=base_path, **query) def update_server(self, server, **attrs): """Update a server :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. - :attrs kwargs: The attributes to update on the server represented - by ``server``. + :class:`~openstack.compute.v2.server.Server` instance. + :param attrs: The attributes to update on the server represented + by ``server``. :returns: The updated server :rtype: :class:`~openstack.compute.v2.server.Server` @@ -427,45 +852,67 @@ def change_server_password(self, server, new_password): """Change the administrator password :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :param str new_password: The new password to be set. :returns: None """ server = self._get_resource(_server.Server, server) - server.change_password(self.session, new_password) + server.change_password(self, new_password) + + def get_server_password(self, server): + """Get the administrator password + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + + :returns: encrypted password. + """ + server = self._get_resource(_server.Server, server) + return server.get_password(self) + + def clear_server_password(self, server): + """Clear the administrator password + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.clear_password(self) def reset_server_state(self, server, state): """Reset the state of server :param server: The server can be either the ID of a server or a - :class:`~openstack.compute.v2.server.Server`. + :class:`~openstack.compute.v2.server.Server`. :param state: The state of the server to be set, `active` or - `error` are valid. + `error` are valid. :returns: None """ res = self._get_base_resource(server, _server.Server) - res.reset_state(self.session, state) + res.reset_state(self, state) def reboot_server(self, server, reboot_type): """Reboot a server :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :param str reboot_type: The type of reboot to perform. - "HARD" and "SOFT" are the current options. + "HARD" and "SOFT" are the current options. :returns: None """ server = self._get_resource(_server.Server, server) - server.reboot(self.session, reboot_type) + server.reboot(self, reboot_type) - def rebuild_server(self, server, name, admin_password, **attrs): + def rebuild_server(self, server, image, **attrs): """Rebuild a server :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :param str name: The name of the server :param str admin_password: The administrator password :param bool preserve_ephemeral: Indicates whether the server @@ -473,276 +920,251 @@ def rebuild_server(self, server, name, admin_password, **attrs): *Default: False* :param str image: The id of an image to rebuild with. *Default: None* :param str access_ipv4: The IPv4 address to rebuild with. - *Default: None* + *Default: None* :param str access_ipv6: The IPv6 address to rebuild with. - *Default: None* + *Default: None* :param dict metadata: A dictionary of metadata to rebuild with. - *Default: None* + *Default: None* :param personality: A list of dictionaries, each including a - **path** and **contents** key, to be injected - into the rebuilt server at launch. - *Default: None* + **path** and **contents** key, to be injected + into the rebuilt server at launch. + *Default: None* :returns: The rebuilt :class:`~openstack.compute.v2.server.Server` - instance. + instance. """ server = self._get_resource(_server.Server, server) - return server.rebuild(self.session, name, admin_password, **attrs) + return server.rebuild(self, image=image, **attrs) def resize_server(self, server, flavor): """Resize a server :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :param flavor: Either the ID of a flavor or a - :class:`~openstack.compute.v2.flavor.Flavor` instance. + :class:`~openstack.compute.v2.flavor.Flavor` instance. :returns: None """ server = self._get_resource(_server.Server, server) - flavor_id = resource2.Resource._get_id(flavor) - server.resize(self.session, flavor_id) + flavor_id = resource.Resource._get_id(flavor) + server.resize(self, flavor_id) def confirm_server_resize(self, server): """Confirm a server resize :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) - server.confirm_resize(self.session) + server.confirm_resize(self) def revert_server_resize(self, server): """Revert a server resize :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) - server.revert_resize(self.session) - - def create_server_image(self, server, name, metadata=None): + server.revert_resize(self) + + def create_server_image( + self, + server, + name, + metadata=None, + wait=False, + timeout=120, + ): """Create an image from a server :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :param str name: The name of the image to be created. :param dict metadata: A dictionary of metadata to be set on the image. - :returns: None + :returns: :class:`~openstack.image.v2.image.Image` object. """ server = self._get_resource(_server.Server, server) - server.create_image(self.session, name, metadata) + image_id = server.create_image(self, name, metadata) - def add_security_group_to_server(self, server, security_group): - """Add a security group to a server + image = self._connection.get_image(image_id) - :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. - :param security_group: Either the ID of a security group or a - :class:`~openstack.network.v2.security_group.SecurityGroup` - instance. + if not wait: + return image + return self._connection.wait_for_image(image, timeout=timeout) - :returns: None - """ - server = self._get_resource(_server.Server, server) - security_group_id = resource2.Resource._get_id(security_group) - server.add_security_group(self.session, security_group_id) - - def remove_security_group_from_server(self, server, security_group): - """Add a security group to a server + def backup_server(self, server, name, backup_type, rotation): + """Backup a server :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. - :param security_group: Either the ID of a security group or a - :class:`~openstack.network.v2.security_group.SecurityGroup` - instance. - - :returns: None - """ - server = self._get_resource(_server.Server, server) - security_group_id = resource2.Resource._get_id(security_group) - server.remove_security_group(self.session, security_group_id) - - def add_fixed_ip_to_server(self, server, network_id): - """Adds a fixed IP address to a server instance. - - :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. - :param network_id: The ID of the network from which a fixed IP address - is about to be allocated. - :returns: None - """ - server = self._get_resource(_server.Server, server) - server.add_fixed_ip(self.session, network_id) - - def remove_fixed_ip_from_server(self, server, address): - """Removes a fixed IP address from a server instance. - - :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. - :param address: The fixed IP address to be disassociated from the - server. - :returns: None - """ - server = self._get_resource(_server.Server, server) - server.remove_fixed_ip(self.session, address) - - def add_floating_ip_to_server(self, server, address, fixed_address=None): - """Adds a floating IP address to a server instance. - - :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. - :param address: The floating IP address to be added to the server. - :param fixed_address: The fixed IP address to be associated with the - floating IP address. Used when the server is - connected to multiple networks. - :returns: None - """ - server = self._get_resource(_server.Server, server) - server.add_floating_ip(self.session, address, - fixed_address=fixed_address) + :class:`~openstack.compute.v2.server.Server` instance. + :param name: The name of the backup image. + :param backup_type: The type of the backup, for example, daily. + :param rotation: The rotation of the back up image, the oldest + image will be removed when image count exceed + the rotation count. - def remove_floating_ip_from_server(self, server, address): - """Removes a floating IP address from a server instance. - - :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. - :param address: The floating IP address to be disassociated from the - server. :returns: None """ server = self._get_resource(_server.Server, server) - server.remove_floating_ip(self.session, address) + server.backup(self, name, backup_type, rotation) def pause_server(self, server): """Pauses a server and changes its status to ``PAUSED``. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) - server.pause(self.session) + server.pause(self) def unpause_server(self, server): """Unpauses a paused server and changes its status to ``ACTIVE``. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) - server.unpause(self.session) + server.unpause(self) def suspend_server(self, server): """Suspends a server and changes its status to ``SUSPENDED``. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) - server.suspend(self.session) + server.suspend(self) def resume_server(self, server): """Resumes a suspended server and changes its status to ``ACTIVE``. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) - server.resume(self.session) + server.resume(self) - def lock_server(self, server): + def lock_server(self, server, locked_reason=None): """Locks a server. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. + :param locked_reason: The reason behind locking the server. Limited to + 255 characters in length. :returns: None """ server = self._get_resource(_server.Server, server) - server.lock(self.session) + server.lock(self, locked_reason=locked_reason) def unlock_server(self, server): """Unlocks a locked server. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) - server.unlock(self.session) + server.unlock(self) def rescue_server(self, server, admin_pass=None, image_ref=None): """Puts a server in rescue mode and changes it status to ``RESCUE``. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :param admin_pass: The password for the rescued server. If you omit - this parameter, the operation generates a new - password. + this parameter, the operation generates a new + password. :param image_ref: The image reference to use to rescue your server. - This can be the image ID or its full URL. If you - omit this parameter, the base image reference will - be used. + This can be the image ID or its full URL. If you + omit this parameter, the base image reference will + be used. :returns: None """ server = self._get_resource(_server.Server, server) - server.rescue(self.session, admin_pass=admin_pass, image_ref=image_ref) + server.rescue(self, admin_pass=admin_pass, image_ref=image_ref) def unrescue_server(self, server): """Unrescues a server and changes its status to ``ACTIVE``. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) - server.unrescue(self.session) - - def evacuate_server(self, server, host=None, admin_pass=None, force=None): + server.unrescue(self) + + def evacuate_server( + self, + server, + host=None, + admin_pass=None, + force=None, + *, + on_shared_storage=None, + ): """Evacuates a server from a failed host to a new host. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :param host: An optional parameter specifying the name or ID of the - host to which the server is evacuated. + host to which the server is evacuated. :param admin_pass: An optional parameter specifying the administrative - password to access the evacuated or rebuilt server. + password to access the evacuated or rebuilt server. :param force: Force an evacuation by not verifying the provided - destination host by the scheduler. (New in API version - 2.29). + destination host by the scheduler. (New in API version + 2.29). + :param on_shared_storage: Whether the host is using shared storage. + (Optional) (Only supported before API version 2.14) :returns: None """ server = self._get_resource(_server.Server, server) - server.evacuate(self.session, host=host, admin_pass=admin_pass, - force=force) + server.evacuate( + self, + host=host, + admin_pass=admin_pass, + force=force, + on_shared_storage=on_shared_storage, + ) def start_server(self, server): """Starts a stopped server and changes its state to ``ACTIVE``. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) - server.start(self.session) + server.start(self) def stop_server(self, server): """Stops a running server and changes its state to ``SHUTOFF``. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.stop(self) + + def restore_server(self, server): + """Restore a soft-deleted server. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) - server.stop(self.session) + server.restore(self) def shelve_server(self, server): """Shelves a server. @@ -753,30 +1175,201 @@ def shelve_server(self, server): operation. Cloud provides could change this permission though. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. :returns: None """ server = self._get_resource(_server.Server, server) - server.shelve(self.session) + server.shelve(self) - def unshelve_server(self, server): - """Unselves or restores a shelved server. + def shelve_offload_server(self, server): + """Shelve-offloads, or removes, a server + + Data and resource associations are deleted. + + Policy defaults enable only users with administrative role or the owner + of the server to perform this operation. Cloud provides could change + this permission though. + + Note that in some clouds, shelved servers are automatically offloaded, + sometimes after a certain time period. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.shelve_offload(self) + + def unshelve_server( + self, server, *, host=None, availability_zone=types.UNSET + ): + """Unshelves or restores a shelved server. Policy defaults enable only users with administrative role or the owner of the server to perform this operation. Cloud provides could change this permission though. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` instance. + :class:`~openstack.compute.v2.server.Server` instance. + :param host: An optional parameter specifying the name the compute + host to unshelve to. (New in API version 2.91). :returns: None """ server = self._get_resource(_server.Server, server) - server.unshelve(self.session) + server.unshelve(self, host=host, availability_zone=availability_zone) + + def trigger_server_crash_dump(self, server): + """Trigger a crash dump in a server. - def wait_for_server(self, server, status='ACTIVE', failures=['ERROR'], - interval=2, wait=120): - return resource2.wait_for_status(self.session, server, status, - failures, interval, wait) + When a server starts behaving oddly at a fundamental level, it maybe be + useful to get a kernel level crash dump to debug further. The crash + dump action forces a crash dump followed by a system reboot of the + server. Once the server comes back online, you can find a Kernel Crash + Dump file in a certain location of the filesystem. For example, for + Ubuntu you can find it in the /var/crash directory. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.trigger_crash_dump(self) + + def add_tag_to_server(self, server, tag): + """Add a tag to a server. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param tag: The tag to add. + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.add_tag(self, tag) + + def remove_tag_from_server(self, server, tag): + """Remove a tag from a server. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param tag: The tag to remove. + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.remove_tag(self, tag) + + def remove_tags_from_server(self, server): + """Remove all tags from a server. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param tag: The tag to remove. + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.remove_all_tags(self) + + # ========== Server security groups ========== + + def fetch_server_security_groups(self, server): + """Fetch security groups with details for a server. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + + :returns: updated :class:`~openstack.compute.v2.server.Server` instance + """ + server = self._get_resource(_server.Server, server) + return server.fetch_security_groups(self) + + def add_security_group_to_server(self, server, security_group): + """Add a security group to a server + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param security_group: Either the ID or name of a security group or a + :class:`~openstack.network.v2.security_group.SecurityGroup` + instance. + + :returns: None + """ + server = self._get_resource(_server.Server, server) + security_group = self._get_resource(_sg.SecurityGroup, security_group) + server.add_security_group( + self, + security_group.name or security_group.id, + ) + + def remove_security_group_from_server(self, server, security_group): + """Remove a security group from a server + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param security_group: Either the ID or name of a security group or a + :class:`~openstack.network.v2.security_group.SecurityGroup` + instance. + + :returns: None + """ + server = self._get_resource(_server.Server, server) + security_group = self._get_resource(_sg.SecurityGroup, security_group) + server.remove_security_group( + self, + security_group.name or security_group.id, + ) + + # ========== Server IPs ========== + + def add_fixed_ip_to_server(self, server, network_id): + """Adds a fixed IP address to a server instance. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param network_id: The ID of the network from which a fixed IP address + is about to be allocated. + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.add_fixed_ip(self, network_id) + + def remove_fixed_ip_from_server(self, server, address): + """Removes a fixed IP address from a server instance. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param address: The fixed IP address to be disassociated from the + server. + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.remove_fixed_ip(self, address) + + def add_floating_ip_to_server(self, server, address, fixed_address=None): + """Adds a floating IP address to a server instance. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param address: The floating IP address to be added to the server. + :param fixed_address: The fixed IP address to be associated with the + floating IP address. Used when the server is + connected to multiple networks. + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.add_floating_ip(self, address, fixed_address=fixed_address) + + def remove_floating_ip_from_server(self, server, address): + """Removes a floating IP address from a server instance. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param address: The floating IP address to be disassociated from the + server. + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.remove_floating_ip(self, address) + + # ========== Server Interfaces ========== def create_server_interface(self, server, **attrs): """Create a new server interface from attributes @@ -791,12 +1384,19 @@ def create_server_interface(self, server, **attrs): :returns: The results of server interface creation :rtype: :class:`~openstack.compute.v2.server_interface.ServerInterface` """ - server_id = resource2.Resource._get_id(server) - return self._create(_server_interface.ServerInterface, - server_id=server_id, **attrs) - - def delete_server_interface(self, server_interface, server=None, - ignore_missing=True): + server_id = resource.Resource._get_id(server) + return self._create( + _server_interface.ServerInterface, + server_id=server_id, + **attrs, + ) + + def delete_server_interface( + self, + server_interface, + server=None, + ignore_missing=True, + ): """Delete a server interface :param server_interface: @@ -804,25 +1404,30 @@ def delete_server_interface(self, server_interface, server=None, :class:`~openstack.compute.v2.server_interface.ServerInterface` instance. :param server: This parameter need to be specified when ServerInterface - ID is given as value. It can be either the ID of a - server or a :class:`~openstack.compute.v2.server.Server` - instance that the interface belongs to. + ID is given as value. It can be either the ID of a + server or a :class:`~openstack.compute.v2.server.Server` + instance that the interface belongs to. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the server interface does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent server interface. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the server interface does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent server interface. :returns: ``None`` """ - server_id = self._get_uri_attribute(server_interface, server, - "server_id") - server_interface = resource2.Resource._get_id(server_interface) + server_id = self._get_uri_attribute( + server_interface, + server, + "server_id", + ) + server_interface = resource.Resource._get_id(server_interface) - self._delete(_server_interface.ServerInterface, - port_id=server_interface, - server_id=server_id, - ignore_missing=ignore_missing) + self._delete( + _server_interface.ServerInterface, + server_interface, + server_id=server_id, + ignore_missing=ignore_missing, + ) def get_server_interface(self, server_interface, server=None): """Get a single server interface @@ -832,121 +1437,138 @@ def get_server_interface(self, server_interface, server=None): :class:`~openstack.compute.v2.server_interface.ServerInterface` instance. :param server: This parameter need to be specified when ServerInterface - ID is given as value. It can be either the ID of a - server or a :class:`~openstack.compute.v2.server.Server` - instance that the interface belongs to. + ID is given as value. It can be either the ID of a + server or a :class:`~openstack.compute.v2.server.Server` + instance that the interface belongs to. :returns: One :class:`~openstack.compute.v2.server_interface.ServerInterface` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. - """ - server_id = self._get_uri_attribute(server_interface, server, - "server_id") - server_interface = resource2.Resource._get_id(server_interface) - - return self._get(_server_interface.ServerInterface, - server_id=server_id, port_id=server_interface) - - def server_interfaces(self, server): + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + server_id = self._get_uri_attribute( + server_interface, + server, + "server_id", + ) + server_interface = resource.Resource._get_id(server_interface) + + return self._get( + _server_interface.ServerInterface, + server_id=server_id, + port_id=server_interface, + ) + + def server_interfaces(self, server, **query): """Return a generator of server interfaces :param server: The server can be either the ID of a server or a - :class:`~openstack.compute.v2.server.Server`. + :class:`~openstack.compute.v2.server.Server`. + :param query: Optional query parameters to be sent to limit the + resources being returned. :returns: A generator of ServerInterface objects :rtype: :class:`~openstack.compute.v2.server_interface.ServerInterface` """ - server_id = resource2.Resource._get_id(server) - return self._list(_server_interface.ServerInterface, paginated=False, - server_id=server_id) + server_id = resource.Resource._get_id(server) + return self._list( + _server_interface.ServerInterface, + server_id=server_id, + **query, + ) def server_ips(self, server, network_label=None): """Return a generator of server IPs :param server: The server can be either the ID of a server or a - :class:`~openstack.compute.v2.server.Server`. + :class:`~openstack.compute.v2.server.Server`. :param network_label: The name of a particular network to list - IP addresses from. + IP addresses from. :returns: A generator of ServerIP objects :rtype: :class:`~openstack.compute.v2.server_ip.ServerIP` """ - server_id = resource2.Resource._get_id(server) - return self._list(server_ip.ServerIP, paginated=False, - server_id=server_id, network_label=network_label) + server_id = resource.Resource._get_id(server) + return self._list( + server_ip.ServerIP, + server_id=server_id, + network_label=network_label, + ) def availability_zones(self, details=False): """Return a generator of availability zones :param bool details: Return extra details about the availability - zones. This defaults to `False` as it generally - requires extra permission. + zones. This defaults to `False` as it generally + requires extra permission. :returns: A generator of availability zone - :rtype: :class:`~openstack.compute.v2.availability_zone. - AvailabilityZone` + :rtype: + :class:`~openstack.compute.v2.availability_zone.AvailabilityZone` """ - if details: - az = availability_zone.AvailabilityZoneDetail - else: - az = availability_zone.AvailabilityZone + base_path = '/os-availability-zone/detail' if details else None + + return self._list( + availability_zone.AvailabilityZone, + base_path=base_path, + ) - return self._list(az, paginated=False) + # ========== Server Metadata ========== def get_server_metadata(self, server): """Return a dictionary of metadata for a server :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` or - :class:`~openstack.compute.v2.server.ServerDetail` - instance. + :class:`~openstack.compute.v2.server.Server` or + :class:`~openstack.compute.v2.server.ServerDetail` + instance. - :returns: A :class:`~openstack.compute.v2.server.Server` with only the - server's metadata. All keys and values are Unicode text. + :returns: A :class:`~openstack.compute.v2.server.Server` with the + server's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.compute.v2.server.Server` """ res = self._get_base_resource(server, _server.Server) - metadata = res.get_metadata(self.session) - result = _server.Server.existing(id=res.id, metadata=metadata) - return result + return res.fetch_metadata(self) def set_server_metadata(self, server, **metadata): """Update metadata for a server :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` or - :class:`~openstack.compute.v2.server.ServerDetail` - instance. + :class:`~openstack.compute.v2.server.Server` instance. :param kwargs metadata: Key/value pairs to be updated in the server's - metadata. No other metadata is modified - by this call. All keys and values are stored - as Unicode. + metadata. No other metadata is modified + by this call. All keys and values are stored + as Unicode. :returns: A :class:`~openstack.compute.v2.server.Server` with only the - server's metadata. All keys and values are Unicode text. + server's metadata. All keys and values are Unicode text. :rtype: :class:`~openstack.compute.v2.server.Server` """ res = self._get_base_resource(server, _server.Server) - metadata = res.set_metadata(self.session, **metadata) - result = _server.Server.existing(id=res.id, metadata=metadata) - return result + return res.set_metadata(self, metadata=metadata) - def delete_server_metadata(self, server, keys): + def delete_server_metadata(self, server, keys=None): """Delete metadata for a server Note: This method will do a HTTP DELETE request for every key in keys. :param server: Either the ID of a server or a - :class:`~openstack.compute.v2.server.Server` or - :class:`~openstack.compute.v2.server.ServerDetail` - instance. - :param keys: The keys to delete + :class:`~openstack.compute.v2.server.Server` instance. + :param list keys: The keys to delete. If left empty complete + metadata will be removed. :rtype: ``None`` """ res = self._get_base_resource(server, _server.Server) - return res.delete_metadata(self.session, keys) + if keys is not None: + # Create a set as a snapshot of keys to avoid "changed during + # iteration" + for key in set(keys): + res.delete_metadata_item(self, key) + else: + res.delete_metadata(self) + + # ========== Server Groups ========== def create_server_group(self, **attrs): """Create a new server group from attributes @@ -964,158 +1586,1238 @@ def delete_server_group(self, server_group, ignore_missing=True): """Delete a server group :param server_group: The value can be either the ID of a server group - or a :class:`~openstack.compute.v2.server_group.ServerGroup` - instance. + or a :class:`~openstack.compute.v2.server_group.ServerGroup` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the server group does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent server group. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the server group does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent server group. :returns: ``None`` """ - self._delete(_server_group.ServerGroup, server_group, - ignore_missing=ignore_missing) - - def find_server_group(self, name_or_id, ignore_missing=True): + self._delete( + _server_group.ServerGroup, + server_group, + ignore_missing=ignore_missing, + ) + + def find_server_group( + self, + name_or_id, + ignore_missing=True, + *, + all_projects=False, + ): """Find a single server group :param name_or_id: The name or ID of a server group. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: - One :class:`~openstack.compute.v2.server_group.ServerGroup` object + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param bool all_projects: When set to ``True``, search for server + groups by name across all projects. Note that this will likely + result in a higher chance of duplicates. Admin-only by default. + + :returns: One :class:`~openstack.compute.v2.server_group.ServerGroup` or None - """ - return self._find(_server_group.ServerGroup, name_or_id, - ignore_missing=ignore_missing) + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + query = {} + if all_projects: + query['all_projects'] = True + return self._find( + _server_group.ServerGroup, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_server_group(self, server_group): """Get a single server group :param server_group: The value can be the ID of a server group or a - :class:`~openstack.compute.v2.server_group.ServerGroup` - instance. + :class:`~openstack.compute.v2.server_group.ServerGroup` + instance. :returns: A :class:`~openstack.compute.v2.server_group.ServerGroup` object. - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_server_group.ServerGroup, server_group) - def server_groups(self, **query): + def server_groups(self, *, all_projects=False, **query): """Return a generator of server groups - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param bool all_projects: When set to ``True``, lists servers groups + from all projects. Admin-only by default. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of ServerGroup objects :rtype: :class:`~openstack.compute.v2.server_group.ServerGroup` """ - return self._list(_server_group.ServerGroup, paginated=False, **query) + if all_projects: + query['all_projects'] = True + return self._list(_server_group.ServerGroup, **query) + + # ========== Hypervisors ========== - def hypervisors(self): - """Return a generator of hypervisor + def hypervisors(self, details=False, **query): + """Return a generator of hypervisors + + :param bool details: When set to the default, ``False``, + :class:`~openstack.compute.v2.hypervisor.Hypervisor` + instances will be returned with only basic information populated. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of hypervisor :rtype: class: `~openstack.compute.v2.hypervisor.Hypervisor` """ + base_path = '/os-hypervisors/detail' if details else None + if ( + 'hypervisor_hostname_pattern' in query + and not utils.supports_microversion(self, '2.53') + ): + # Until 2.53 we need to use other API + base_path = '/os-hypervisors/{pattern}/search'.format( + pattern=query.pop('hypervisor_hostname_pattern') + ) + return self._list(_hypervisor.Hypervisor, base_path=base_path, **query) + + def find_hypervisor( + self, + name_or_id, + ignore_missing=True, + *, + details=True, + ): + """Find a single hypervisor + + :param name_or_id: The name or ID of a hypervisor + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param bool details: When set to ``False`` + instances with only basic data will be returned. The default, + ``True``, will cause instances with full data to be returned. - return self._list(_hypervisor.Hypervisor, paginated=False) - - def find_hypervisor(self, name_or_id, ignore_missing=True): - """Find a hypervisor from name or id to get the corresponding info - - :param name_or_id: The name or id of a hypervisor - - :returns: - One: class:`~openstack.compute.v2.hypervisor.Hypervisor` object + :returns: One: class:`~openstack.compute.v2.hypervisor.Hypervisor` or None - """ - - return self._find(_hypervisor.Hypervisor, name_or_id, - ignore_missing=ignore_missing) + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + list_base_path = '/os-hypervisors/detail' if details else None + return self._find( + _hypervisor.Hypervisor, + name_or_id, + list_base_path=list_base_path, + ignore_missing=ignore_missing, + ) def get_hypervisor(self, hypervisor): """Get a single hypervisor :param hypervisor: The value can be the ID of a hypervisor or a - :class:`~openstack.compute.v2.hypervisor.Hypervisor` - instance. + :class:`~openstack.compute.v2.hypervisor.Hypervisor` + instance. :returns: A :class:`~openstack.compute.v2.hypervisor.Hypervisor` object. - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_hypervisor.Hypervisor, hypervisor) - def get_service(self, service): - """Get a single service + def get_hypervisor_uptime(self, hypervisor): + """Get uptime information for hypervisor - :param service: The value can be the ID of a serivce or a - :class:`~openstack.compute.v2.service.Service` - instance. + :param hypervisor: The value can be the ID of a hypervisor or a + :class:`~openstack.compute.v2.hypervisor.Hypervisor` + instance. :returns: - A :class:`~openstack.compute.v2.serivce.Service` object. - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + A :class:`~openstack.compute.v2.hypervisor.Hypervisor` object. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._get(_service.Service, service) + hypervisor = self._get_resource(_hypervisor.Hypervisor, hypervisor) + return hypervisor.get_uptime(self) + + # ========== Services ========== - def force_service_down(self, service, host, binary): - """Force a service down + def update_service_forced_down( + self, + service, + host=None, + binary=None, + forced=True, + ): + """Update service forced_down information :param service: Either the ID of a service or a - :class:`~openstack.compute.v2.server.Service` instance. + :class:`~openstack.compute.v2.service.Service` instance. :param str host: The host where service runs. :param str binary: The name of service. + :param bool forced: Whether or not this service was forced down + manually by an administrator after the service was fenced. - :returns: None + :returns: Updated service instance + :rtype: class: `~openstack.compute.v2.service.Service` """ - service = self._get_resource(_service.Service, service) - service.force_down(self.session, host, binary) + if utils.supports_microversion(self, '2.53'): + return self.update_service(service, forced_down=forced) - def disable_service(self, service, host, binary, disabled_reason=None): + service = self._get_resource(_service.Service, service) + if (not host or not binary) and ( + not service.host or not service.binary + ): + raise ValueError( + 'Either service instance should have host and binary ' + 'or they should be passed' + ) + service.set_forced_down(self, host, binary, forced) + + force_service_down = update_service_forced_down + + def disable_service( + self, + service, + host=None, + binary=None, + disabled_reason=None, + ): """Disable a service :param service: Either the ID of a service or a - :class:`~openstack.compute.v2.server.Service` instance. + :class:`~openstack.compute.v2.service.Service` instance. :param str host: The host where service runs. :param str binary: The name of service. :param str disabled_reason: The reason of force down a service. - :returns: None + :returns: Updated service instance + :rtype: class: `~openstack.compute.v2.service.Service` """ + if utils.supports_microversion(self, '2.53'): + attrs = {'status': 'disabled'} + if disabled_reason: + attrs['disabled_reason'] = disabled_reason + return self.update_service(service, **attrs) + service = self._get_resource(_service.Service, service) - service.disable(self.session, - host, binary, - disabled_reason) + return service.disable(self, host, binary, disabled_reason) - def enable_service(self, service, host, binary): + def enable_service(self, service, host=None, binary=None): """Enable a service :param service: Either the ID of a service or a - :class:`~openstack.compute.v2.server.Service` instance. + :class:`~openstack.compute.v2.service.Service` instance. :param str host: The host where service runs. :param str binary: The name of service. - - :returns: None + :returns: Updated service instance + :rtype: class: `~openstack.compute.v2.service.Service` """ + if utils.supports_microversion(self, '2.53'): + return self.update_service(service, status='enabled') + service = self._get_resource(_service.Service, service) - service.enable(self.session, host, binary) + return service.enable(self, host, binary) - def services(self): + def services(self, **query): """Return a generator of service + :params dict query: Query parameters :returns: A generator of service :rtype: class: `~openstack.compute.v2.service.Service` """ + return self._list(_service.Service, **query) + + def find_service(self, name_or_id, ignore_missing=True, **query): + """Find a service from name or id to get the corresponding info + + :param name_or_id: The name or id of a service + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Additional attributes like 'host' + + :returns: One: class:`~openstack.compute.v2.service.Service` or None + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + return self._find( + _service.Service, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def delete_service(self, service, ignore_missing=True): + """Delete a service + + :param service: + The value can be either the ID of a service or a + :class:`~openstack.compute.v2.service.Service` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the service does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + service. + + :returns: ``None`` + """ + self._delete(_service.Service, service, ignore_missing=ignore_missing) + + def update_service(self, service, **attrs): + """Update a service + + :param service: Either the ID of a service or a + :class:`~openstack.compute.v2.service.Service` instance. + :param attrs: The attributes to update on the service represented + by ``service``. + + :returns: The updated service + :rtype: :class:`~openstack.compute.v2.service.Service` + """ + if utils.supports_microversion(self, '2.53'): + return self._update(_service.Service, service, **attrs) + + raise exceptions.SDKException( + 'Method require at least microversion 2.53' + ) + + # ========== Volume Attachments ========== + + # TODO(stephenfin): Make the volume argument required in 2.0 + def create_volume_attachment(self, server, volume=None, **attrs): + """Create a new volume attachment from attributes + + :param server: The value can be either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance that the + volume is attached to. + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.compute.v2.volume_attachment.VolumeAttachment`, + comprised of the properties on the VolumeAttachment class. + + :returns: The results of volume attachment creation + :rtype: + :class:`~openstack.compute.v2.volume_attachment.VolumeAttachment` + """ + # if the user didn't pass the new 'volume' argument, they're probably + # calling things using a legacy parameter + if volume is None: + # there are two ways to pass this legacy parameter: either using + # the openstacksdk alias, 'volume_id', or using the real nova field + # name, 'volumeId' + if 'volume_id' in attrs: + volume_id = attrs.pop('volume_id') + elif 'volumeId' in attrs: + volume_id = attrs.pop('volumeId') + else: + # the user has used neither the new way nor the old way so they + # should start using the new way + # NOTE(stephenfin): we intentionally mimic the behavior of a + # missing positional parameter in stdlib + # https://github.com/python/cpython/blob/v3.10.0/Lib/inspect.py#L1464-L1467 + raise TypeError( + 'create_volume_attachment() missing 1 required positional ' + 'argument: volume' + ) + + # encourage users to the new way so we can eventually remove this + # mess of logic + deprecation_msg = ( + 'This method was called with a volume_id or volumeId ' + 'argument. This is legacy behavior that will be removed in ' + 'a future version. Update callers to use a volume argument.' + ) + warnings.warn( + deprecation_msg, + os_warnings.RemovedInSDK50Warning, + ) + else: + volume_id = resource.Resource._get_id(volume) + + server_id = resource.Resource._get_id(server) + return self._create( + _volume_attachment.VolumeAttachment, + server_id=server_id, + volume_id=volume_id, + **attrs, + ) + + def update_volume_attachment( + self, + server, + volume, + volume_id=None, + **attrs, + ): + """Update a volume attachment + + Note that the underlying API expects a volume ID, not a volume + attachment ID. There is currently no way to update volume attachments + by their own ID. + + :param server: The value can be either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance that the + volume is attached to. + :param volume: The value can be either the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param volume_id: The ID of a volume to swap to. If this is not + specified, we will default to not swapping the volume. + :param attrs: The attributes to update on the volume attachment + represented by ``volume_attachment``. + + :returns: ``None`` + """ + new_volume_id = volume_id + + server_id = resource.Resource._get_id(server) + volume_id = resource.Resource._get_id(volume) + + if new_volume_id is None: + new_volume_id = volume_id + + return self._update( + _volume_attachment.VolumeAttachment, + None, + id=volume_id, + server_id=server_id, + volume_id=new_volume_id, + **attrs, + ) + + # TODO(stephenfin): Remove this hack in openstacksdk 2.0 + def _verify_server_volume_args(self, server, volume): + deprecation_msg = ( + 'The server and volume arguments to this function appear to ' + 'be backwards and have been reversed. This is a breaking ' + 'change introduced in openstacksdk 1.0. This shim will be ' + 'removed in a future version' + ) + + # if we have even partial type information and things look as they + # should, we can assume the user did the right thing + if isinstance(server, _server.Server) or isinstance( + volume, _volume.Volume + ): + return server, volume + + # conversely, if there's type info and things appear off, tell the user + if isinstance(server, _volume.Volume) or isinstance( + volume, _server.Server + ): + warnings.warn( + deprecation_msg, + os_warnings.RemovedInSDK50Warning, + ) + return volume, server + + # without type info we have to try a find the server corresponding to + # the provided ID and validate it + if self.find_server(server, ignore_missing=True) is not None: + return server, volume + else: + warnings.warn( + deprecation_msg, + os_warnings.RemovedInSDK50Warning, + ) + return volume, server + + def delete_volume_attachment(self, server, volume, ignore_missing=True): + """Delete a volume attachment + + Note that the underlying API expects a volume ID, not a volume + attachment ID. There is currently no way to delete volume attachments + by their own ID. - return self._list(_service.Service, paginated=False) + :param server: The value can be either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance that the + volume is attached to. + :param volume: The value can be the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the volume attachment does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + volume attachment. + + :returns: ``None`` + """ + server, volume = self._verify_server_volume_args(server, volume) + + server_id = resource.Resource._get_id(server) + volume_id = resource.Resource._get_id(volume) + + self._delete( + _volume_attachment.VolumeAttachment, + None, + id=volume_id, + server_id=server_id, + ignore_missing=ignore_missing, + ) + + def get_volume_attachment(self, server, volume): + """Get a single volume attachment + + Note that the underlying API expects a volume ID, not a volume + attachment ID. There is currently no way to retrieve volume attachments + by their own ID. + + :param server: The value can be either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance that the + volume is attached to. + :param volume: The value can be the ID of a volume or a + :class:`~openstack.block_storage.v3.volume.Volume` instance. + + :returns: One + :class:`~openstack.compute.v2.volume_attachment.VolumeAttachment` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + server_id = resource.Resource._get_id(server) + volume_id = resource.Resource._get_id(volume) + + return self._get( + _volume_attachment.VolumeAttachment, + id=volume_id, + server_id=server_id, + ) + + def volume_attachments(self, server, **query): + """Return a generator of volume attachments + + :param server: The server can be either the ID of a server or a + :class:`~openstack.compute.v2.server.Server`. + :params dict query: Query parameters + + :returns: A generator of VolumeAttachment objects + :rtype: + :class:`~openstack.compute.v2.volume_attachment.VolumeAttachment` + """ + server_id = resource.Resource._get_id(server) + return self._list( + _volume_attachment.VolumeAttachment, + server_id=server_id, + **query, + ) + + # ========== Server Migrations ========== + + def migrate_server(self, server, *, host=None): + """Migrate a server from one host to another + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param str host: The host to which to migrate the server. + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.migrate(self, host=host) + + def live_migrate_server( + self, + server, + host=None, + force=False, + block_migration=None, + disk_over_commit=None, + ): + """Live migrate a server from one host to target host + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param str host: The host to which to migrate the server. If the Nova + service is too old, the host parameter implies force=True which + causes the Nova scheduler to be bypassed. On such clouds, a + ``ValueError`` will be thrown if ``host`` is given without + ``force``. + :param bool force: Force a live-migration by not verifying the provided + destination host by the scheduler. This is unsafe and not + recommended. + :param block_migration: Perform a block live migration to the + destination host by the scheduler. Can be 'auto', True or False. + Some clouds are too old to support 'auto', in which case a + ValueError will be thrown. If omitted, the value will be 'auto' on + clouds that support it, and False on clouds that do not. + :param disk_over_commit: Whether to allow disk over-commit on the + destination host. (Optional) + :returns: None + """ + server = self._get_resource(_server.Server, server) + server.live_migrate( + self, + host=host, + force=force, + block_migration=block_migration, + disk_over_commit=disk_over_commit, + ) + + def abort_server_migration( + self, + server_migration, + server, + ignore_missing=True, + ): + """Abort an in-progress server migration + + :param server_migration: The value can be either the ID of a server + migration or a + :class:`~openstack.compute.v2.server_migration.ServerMigration` + instance. + :param server: This parameter needs to be specified when + ServerMigration ID is given as value. It can be either the ID of a + server or a :class:`~openstack.compute.v2.server.Server` instance + that the migration belongs to. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the server migration does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + server migration. + + :returns: ``None`` + """ + server_id = self._get_uri_attribute( + server_migration, + server, + 'server_id', + ) + server_migration = resource.Resource._get_id(server_migration) + + self._delete( + _server_migration.ServerMigration, + server_migration, + server_id=server_id, + ignore_missing=ignore_missing, + ) + + def force_complete_server_migration(self, server_migration, server=None): + """Force complete an in-progress server migration + + :param server_migration: The value can be either the ID of a server + migration or a + :class:`~openstack.compute.v2.server_migration.ServerMigration` + instance. + :param server: This parameter needs to be specified when + ServerMigration ID is given as value. It can be either the ID of a + server or a :class:`~openstack.compute.v2.server.Server` instance + that the migration belongs to. + + :returns: ``None`` + """ + server_id = self._get_uri_attribute( + server_migration, + server, + 'server_id', + ) + server_migration = self._get_resource( + _server_migration.ServerMigration, + server_migration, + server_id=server_id, + ) + server_migration.force_complete(self) + + def get_server_migration( + self, + server_migration, + server, + ignore_missing=True, + ): + """Get a single server migration + + :param server_migration: The value can be the ID of a server migration + or a + :class:`~openstack.compute.v2.server_migration.ServerMigration` + instance. + :param server: This parameter need to be specified when ServerMigration + ID is given as value. It can be either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance that the + migration belongs to. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the server migration does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + server migration. + + :returns: One + :class:`~openstack.compute.v2.server_migration.ServerMigration` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + server_id = self._get_uri_attribute( + server_migration, + server, + 'server_id', + ) + server_migration = resource.Resource._get_id(server_migration) + + return self._get( + _server_migration.ServerMigration, + server_migration, + server_id=server_id, + ignore_missing=ignore_missing, + ) + + def server_migrations(self, server): + """Return a generator of migrations for a server. + + :param server: The server can be either the ID of a server or a + :class:`~openstack.compute.v2.server.Server`. + + :returns: A generator of ServerMigration objects + :rtype: + :class:`~openstack.compute.v2.server_migration.ServerMigration` + """ + server_id = resource.Resource._get_id(server) + return self._list( + _server_migration.ServerMigration, + server_id=server_id, + ) + + # ========== Migrations ========== + + def migrations(self, **query): + """Return a generator of migrations for all servers. + + :param kwargs query: Optional query parameters to be sent to limit + the migrations being returned. + :returns: A generator of Migration objects + :rtype: :class:`~openstack.compute.v2.migration.Migration` + """ + return self._list(_migration.Migration, **query) + + # ========== Server diagnostics ========== + + def get_server_diagnostics(self, server): + """Get a single server diagnostics + + :param server: This parameter need to be specified when ServerInterface + ID is given as value. It can be either the ID of a + server or a :class:`~openstack.compute.v2.server.Server` + instance that the interface belongs to. + + :returns: One + :class:`~openstack.compute.v2.server_diagnostics.ServerDiagnostics` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + server_id = self._get_resource(_server.Server, server).id + return self._get( + _server_diagnostics.ServerDiagnostics, + server_id=server_id, + requires_id=False, + ) + + # ========== Project usage ============ + + def usages(self, start=None, end=None, **query): + """Get project usages. + + :param datetime.datetime start: Usage range start date. + :param datetime.datetime end: Usage range end date. + :param dict query: Additional query parameters to use. + :returns: A list of compute ``Usage`` objects. + """ + if start is not None: + query['start'] = start.isoformat() + + if end is not None: + query['end'] = end.isoformat() + + return self._list(_usage.Usage, **query) + + def get_usage(self, project, start=None, end=None, **query): + """Get usage for a single project. + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the usage should be retrieved. + :param datetime.datetime start: Usage range start date. + :param datetime.datetime end: Usage range end date. + :param dict query: Additional query parameters to use. + :returns: A compute ``Usage`` object. + """ + project = self._get_resource(_project.Project, project) + + if start is not None: + query['start'] = start.isoformat() + + if end is not None: + query['end'] = end.isoformat() + + res = self._get_resource(_usage.Usage, project.id) + return res.fetch(self, **query) + + # ========== Server consoles ========== + + def create_server_remote_console(self, server, **attrs): + """Create a remote console on the server. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :returns: One + :class:`~openstack.compute.v2.server_remote_console.ServerRemoteConsole` + """ + server_id = resource.Resource._get_id(server) + return self._create( + _src.ServerRemoteConsole, + server_id=server_id, + **attrs, + ) + + def get_server_console_url(self, server, console_type): + """Create a remote console on the server. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param console_type: Type of the console connection. + :returns: Dictionary with console type and url + """ + server = self._get_resource(_server.Server, server) + return server.get_console_url(self, console_type) + + def validate_console_auth_token(self, console_token): + """Lookup console connection information for a console auth token. + + :param console_token: The console auth token as returned in the URL + from get_server_console_url. + :returns: Dictionary with connection details, varying by console type. + """ + return self._get(_console_auth_token.ConsoleAuthToken, console_token) + + def get_server_console_output(self, server, length=None): + """Return the console output for a server. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param length: Optional number of line to fetch from the end of console + log. All lines will be returned if this is not specified. + :returns: The console output as a dict. Control characters will be + escaped to create a valid JSON string. + """ + server = self._get_resource(_server.Server, server) + return server.get_console_output(self, length=length) + + def create_console(self, server, console_type, console_protocol=None): + """Create a remote console on the server. + + When microversion supported is higher then 2.6 remote console is + created, otherwise deprecated call to get server console is issued. + + :param server: Either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance. + :param console_type: Type of the remote console. Supported values as: + * novnc + * spice-html5 + * rdp-html5 + * serial + * webmks (supported after 2.8) + * spice-direct (supported after 2.99) + :param console_protocol: Optional console protocol (is respected only + after microversion 2.6). + + :returns: Dictionary with console type, connection details (a url), and + optionally protocol. + """ + server = self._get_resource(_server.Server, server) + # NOTE: novaclient supports undocumented type xcpvnc also supported + # historically by OSC. We support it, but do not document either. + if utils.supports_microversion(self, '2.6'): + console = self._create( + _src.ServerRemoteConsole, + server_id=server.id, + type=console_type, + protocol=console_protocol, + ) + return console.to_dict() + else: + return server.get_console_url(self, console_type) + + # ========== Quota class sets ========== + + def get_quota_class_set(self, quota_class_set='default'): + """Get a single quota class set + + Only one quota class is permitted, ``default``. + + :param quota_class_set: The value can be the ID of a quota class set + (only ``default`` is supported) or a + :class:`~openstack.compute.v2.quota_class_set.QuotaClassSet` + instance. + + :returns: One + :class:`~openstack.compute.v2.quota_class_set.QuotaClassSet` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_quota_class_set.QuotaClassSet, quota_class_set) + + def update_quota_class_set(self, quota_class_set, **attrs): + """Update a QuotaClassSet. + + Only one quota class is permitted, ``default``. + + :param quota_class_set: Either the ID of a quota class set (only + ``default`` is supported) or a + :class:`~openstack.compute.v2.quota_class_set.QuotaClassSet` + instance. + :param attrs: The attributes to update on the QuotaClassSet represented + by ``quota_class_set``. + + :returns: The updated QuotaSet + :rtype: :class:`~openstack.compute.v2.quota_set.QuotaSet` + """ + return self._update( + _quota_class_set.QuotaClassSet, quota_class_set, **attrs + ) + + # ========== Quota sets ========== + + def get_quota_set(self, project, usage=False, **query): + """Show QuotaSet information for the project. + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the quota should be retrieved + :param bool usage: When set to ``True`` quota usage and reservations + would be filled. + :param dict query: Additional query parameters to use. + + :returns: One :class:`~openstack.compute.v2.quota_set.QuotaSet` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + project = self._get_resource(_project.Project, project) + res = self._get_resource( + _quota_set.QuotaSet, + None, + project_id=project.id, + ) + base_path = '/os-quota-sets/%(project_id)s/detail' if usage else None + return res.fetch(self, base_path=base_path, **query) + + def get_quota_set_defaults(self, project): + """Show QuotaSet defaults for the project. + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the quota should be retrieved + + :returns: One :class:`~openstack.compute.v2.quota_set.QuotaSet` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + project = self._get_resource(_project.Project, project) + res = self._get_resource( + _quota_set.QuotaSet, + None, + project_id=project.id, + ) + return res.fetch( + self, base_path='/os-quota-sets/%(project_id)s/defaults' + ) + + def revert_quota_set(self, project, **query): + """Reset Quota for the project/user. + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the quota should be reset. + :param dict query: Additional parameters to be used. + + :returns: ``None`` + """ + project = self._get_resource(_project.Project, project) + res = self._get_resource( + _quota_set.QuotaSet, None, project_id=project.id + ) + + if not query: + query = {} + return res.delete(self, **query) + + def update_quota_set(self, project, *, user=None, **attrs): + """Update a QuotaSet. + + :param project: ID or instance of + :class:`~openstack.identity.project.Project` of the project for + which the quota should be reset. + :param user_id: Optional ID of the user to set quotas as. + :param attrs: The attributes to update on the QuotaSet represented + by ``quota_set``. + + :returns: The updated QuotaSet + :rtype: :class:`~openstack.compute.v2.quota_set.QuotaSet` + """ + if 'project_id' in attrs or isinstance(project, _quota_set.QuotaSet): + warnings.warn( + "The signature of 'update_quota_set' has changed and it " + "now expects a Project as the first argument, in line " + "with the other quota set methods.", + os_warnings.RemovedInSDK50Warning, + ) + if user is not None: + raise exceptions.SDKException( + 'The user argument can only be provided once the entire ' + 'call has been updated.' + ) + + if 'query' in attrs: + warnings.warn( + "The query argument is no longer supported and should " + "be removed.", + os_warnings.RemovedInSDK50Warning, + ) + query = attrs.pop('query') or {} + else: + query = {} + + res = self._get_resource(_quota_set.QuotaSet, project, **attrs) + return res.commit(self, **query) + else: + project = self._get_resource(_project.Project, project) + attrs['project_id'] = project.id + + if user: + user = self._get_resource(_user.User, user) + query = {'user_id': user.id} + else: + query = {} + + # we don't use Proxy._update since that doesn't allow passing + # arbitrary query string parameters + quota_set = self._get_resource(_quota_set.QuotaSet, None, **attrs) + return quota_set.commit(self, **query) + + # ========== Server actions ========== + + def get_server_action(self, server_action, server, ignore_missing=True): + """Get a single server action + + :param server_action: The value can be the ID of a server action or a + :class:`~openstack.compute.v2.server_action.ServerAction` instance. + :param server: This parameter need to be specified when ServerAction ID + is given as value. It can be either the ID of a server or a + :class:`~openstack.compute.v2.server.Server` instance that the + action is associated with. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the server action does not exist. When set to ``True``, no + exception will be set when attempting to retrieve a non-existent + server action. + + :returns: One :class:`~openstack.compute.v2.server_action.ServerAction` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + server_id = self._get_uri_attribute(server_action, server, 'server_id') + server_action = resource.Resource._get_id(server_action) + + return self._get( + _server_action.ServerAction, + server_id=server_id, + request_id=server_action, + ignore_missing=ignore_missing, + ) + + def server_actions(self, server, **query): + """Return a generator of server actions + + :param server: The server can be either the ID of a server or a + :class:`~openstack.compute.v2.server.Server`. + :param kwargs query: Optional query parameters to be sent to limit + the actions being returned. + + :returns: A generator of ServerAction objects + :rtype: :class:`~openstack.compute.v2.server_action.ServerAction` + """ + server_id = resource.Resource._get_id(server) + return self._list( + _server_action.ServerAction, + server_id=server_id, + **query, + ) + + # ========== Utilities ========== + + def wait_for_server( + self, + server: _server.Server, + status: str = 'ACTIVE', + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> _server.Server: + """Wait for a server to be in a particular status. + + :param server: The :class:`~openstack.compute.v2.server.Server` to wait + on to reach the specified status. + :type server: :class:`~openstack.compute.v2.server.Server`: + :param status: Desired status. + :type status: str + :param failures: Statuses that would be interpreted as failures. + :type failures: :py:class:`list` + :param interval: Number of seconds to wait before to consecutive + checks. Default to 2. + :type interval: int + :param wait: Maximum number of seconds to wait before the change. + Default to 120. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + :type callback: callable + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to the desired status failed to occur in specified seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + has transited to one of the failure statuses. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute. + """ + failures = ['ERROR'] if failures is None else failures + return resource.wait_for_status( + self, + server, + status, + failures, + interval, + wait, + callback=callback, + ) + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) + + def _get_cleanup_dependencies(self): + return { + 'compute': { + 'before': ['block_storage', 'network', 'identity', 'image'] + } + } + + def _service_cleanup( + self, + dry_run=True, + client_status_queue=None, + identified_resources=None, + filters=None, + resource_evaluation_fn=None, + skip_resources=None, + ): + if self.should_skip_resource_cleanup("server", skip_resources): + return + + servers = [] + for obj in self.servers(): + need_delete = self._service_cleanup_del_res( + self.delete_server, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + if not dry_run and need_delete: + # In the dry run we identified, that server will go. To propely + # identify consequences we need to tell others, that the port + # will disappear as well + for port in self._connection.network.ports(device_id=obj.id): + identified_resources[port.id] = port + servers.append(obj) + + # We actually need to wait for servers to really disappear, since they + # might be still holding ports on the subnet + for server in servers: + self.wait_for_delete(server) + + for obj in self.server_groups(): + # Do not delete server groups that still have members + if obj.member_ids: + continue + + self._service_cleanup_del_res( + self.delete_server_group, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) diff --git a/openstack/compute/v2/aggregate.py b/openstack/compute/v2/aggregate.py new file mode 100644 index 0000000000..787d57a093 --- /dev/null +++ b/openstack/compute/v2/aggregate.py @@ -0,0 +1,84 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Aggregate(resource.Resource): + resource_key = 'aggregate' + resources_key = 'aggregates' + base_path = '/os-aggregates' + + # capabilities + allow_create = True + allow_fetch = True + allow_delete = True + allow_list = True + allow_commit = True + + # Properties + #: Availability zone of aggregate + availability_zone = resource.Body('availability_zone') + #: The date and time when the resource was created. + created_at = resource.Body('created_at') + #: The date and time when the resource was deleted. + deleted_at = resource.Body('deleted_at') + #: Deleted? + is_deleted = resource.Body('deleted', type=bool) + #: Name of aggregate + name = resource.Body('name') + #: Hosts + hosts = resource.Body('hosts', type=list) + #: Metadata + metadata = resource.Body('metadata', type=dict) + #: The date and time when the resource was updated + updated_at = resource.Body('updated_at') + #: UUID + uuid = resource.Body('uuid') + # Image pre-caching introduced in 2.81 + _max_microversion = '2.81' + + def _action(self, session, body, microversion=None): + """Preform aggregate actions given the message body.""" + url = utils.urljoin(self.base_path, self.id, 'action') + response = session.post(url, json=body, microversion=microversion) + exceptions.raise_from_response(response) + aggregate = Aggregate() + aggregate._translate_response(response) + return aggregate + + def add_host(self, session, host): + """Adds a host to an aggregate.""" + body = {'add_host': {'host': host}} + return self._action(session, body) + + def remove_host(self, session, host): + """Removes a host from an aggregate.""" + body = {'remove_host': {'host': host}} + return self._action(session, body) + + def set_metadata(self, session, metadata): + """Creates or replaces metadata for an aggregate.""" + body = {'set_metadata': {'metadata': metadata}} + return self._action(session, body) + + def precache_images(self, session, images): + """Requests image pre-caching""" + body = {'cache': images} + url = utils.urljoin(self.base_path, self.id, 'images') + response = session.post( + url, json=body, microversion=self._max_microversion + ) + exceptions.raise_from_response(response) + # This API has no result diff --git a/openstack/compute/v2/availability_zone.py b/openstack/compute/v2/availability_zone.py index 56f49295c0..aecfecb2df 100644 --- a/openstack/compute/v2/availability_zone.py +++ b/openstack/compute/v2/availability_zone.py @@ -10,27 +10,23 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.compute import compute_service -from openstack import resource2 +from openstack import resource -class AvailabilityZone(resource2.Resource): +class AvailabilityZone(resource.Resource): resources_key = 'availabilityZoneInfo' base_path = '/os-availability-zone' - service = compute_service.ComputeService() - # capabilities allow_list = True # Properties #: name of availability zone - name = resource2.Body('zoneName') + name = resource.Body('zoneName') #: state of availability zone - state = resource2.Body('zoneState') + state = resource.Body('zoneState') #: hosts of availability zone - hosts = resource2.Body('hosts') + hosts = resource.Body('hosts') -class AvailabilityZoneDetail(AvailabilityZone): - base_path = '/os-availability-zone/detail' +AvailabilityZoneDetail = AvailabilityZone diff --git a/openstack/compute/v2/console_auth_token.py b/openstack/compute/v2/console_auth_token.py new file mode 100644 index 0000000000..c6aa5ac95d --- /dev/null +++ b/openstack/compute/v2/console_auth_token.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ConsoleAuthToken(resource.Resource): + resource_key = 'console' + base_path = '/os-console-auth-tokens' + + # capabilities + allow_fetch = True + + _max_microversion = '2.99' + + # Properties + #: Instance UUID + instance_uuid = resource.Body('instance_uuid') + #: Hypervisor host + host = resource.Body('host') + #: Hypervisor port + port = resource.Body('port') + #: Hypervisor TLS port + tls_port = resource.Body('tls_port') + #: Internal access path + internal_access_path = resource.Body('internal_access_path') diff --git a/openstack/compute/v2/extension.py b/openstack/compute/v2/extension.py index 6d36810340..c4da9681ba 100644 --- a/openstack/compute/v2/extension.py +++ b/openstack/compute/v2/extension.py @@ -10,32 +10,29 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.compute import compute_service -from openstack import resource2 +from openstack import resource -class Extension(resource2.Resource): +class Extension(resource.Resource): resource_key = 'extension' resources_key = 'extensions' base_path = '/extensions' - service = compute_service.ComputeService() - id_attribute = "alias" # capabilities - allow_get = True + allow_fetch = True allow_list = True # Properties #: A short name by which this extension is also known. - alias = resource2.Body('alias', alternate_id=True) + alias = resource.Body('alias', alternate_id=True) #: Text describing this extension's purpose. - description = resource2.Body('description') + description = resource.Body('description') #: Links pertaining to this extension. This is a list of dictionaries, #: each including keys ``href`` and ``rel``. - links = resource2.Body('links') + links = resource.Body('links', type=list, list_type=dict) #: The name of the extension. - name = resource2.Body('name') + name = resource.Body('name') #: A URL pointing to the namespace for this extension. - namespace = resource2.Body('namespace') + namespace = resource.Body('namespace') #: Timestamp when this extension was last updated. - updated_at = resource2.Body('updated') + updated_at = resource.Body('updated') diff --git a/openstack/compute/v2/flavor.py b/openstack/compute/v2/flavor.py index 34df465ce3..594d111ac6 100644 --- a/openstack/compute/v2/flavor.py +++ b/openstack/compute/v2/flavor.py @@ -10,56 +10,244 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.compute import compute_service -from openstack import resource2 +import typing as ty +from keystoneauth1 import adapter +import typing_extensions as ty_ext -class Flavor(resource2.Resource): +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Flavor(resource.Resource): resource_key = 'flavor' resources_key = 'flavors' base_path = '/flavors' - service = compute_service.ComputeService() # capabilities allow_create = True - allow_get = True + allow_fetch = True allow_delete = True allow_list = True + allow_commit = True - _query_mapping = resource2.QueryParameters("sort_key", "sort_dir", - min_disk="minDisk", - min_ram="minRam") + _query_mapping = resource.QueryParameters( + "sort_key", + "sort_dir", + "is_public", + min_disk="minDisk", + min_ram="minRam", + ) + + # extra_specs introduced in 2.61 + _max_microversion = '2.61' # Properties - #: Links pertaining to this flavor. This is a list of dictionaries, - #: each including keys ``href`` and ``rel``. - links = resource2.Body('links') #: The name of this flavor. - name = resource2.Body('name') + name = resource.Body('name', alias='original_name') + #: The name of this flavor when returned by server list/show + original_name = resource.Body('original_name') + #: The description of the flavor. + description = resource.Body('description') #: Size of the disk this flavor offers. *Type: int* - disk = resource2.Body('disk', type=int) + disk = resource.Body('disk', type=int, default=0) #: ``True`` if this is a publicly visible flavor. ``False`` if this is #: a private image. *Type: bool* - is_public = resource2.Body('os-flavor-access:is_public', type=bool) + is_public = resource.Body( + 'os-flavor-access:is_public', type=bool, default=True + ) #: The amount of RAM (in MB) this flavor offers. *Type: int* - ram = resource2.Body('ram', type=int) + ram = resource.Body('ram', type=int, default=0) #: The number of virtual CPUs this flavor offers. *Type: int* - vcpus = resource2.Body('vcpus', type=int) + vcpus = resource.Body('vcpus', type=int, default=0) #: Size of the swap partitions. - swap = resource2.Body('swap') + swap = resource.Body('swap', type=int, default=0) #: Size of the ephemeral data disk attached to this server. *Type: int* - ephemeral = resource2.Body('OS-FLV-EXT-DATA:ephemeral', type=int) + ephemeral = resource.Body('OS-FLV-EXT-DATA:ephemeral', type=int, default=0) #: ``True`` if this flavor is disabled, ``False`` if not. *Type: bool* - is_disabled = resource2.Body('OS-FLV-DISABLED:disabled', type=bool) + is_disabled = resource.Body('OS-FLV-DISABLED:disabled', type=bool) #: The bandwidth scaling factor this flavor receives on the network. - rxtx_factor = resource2.Body('rxtx_factor', type=float) + rxtx_factor = resource.Body('rxtx_factor', type=float) + # TODO(mordred) extra_specs can historically also come from + # OS-FLV-WITH-EXT-SPECS:extra_specs. Do we care? + #: A dictionary of the flavor's extra-specs key-and-value pairs. + extra_specs = resource.Body('extra_specs', type=dict, default={}) + def __getattribute__(self, name): + """Return an attribute on this instance -class FlavorDetail(Flavor): - base_path = '/flavors/detail' + This is mostly a pass-through except for a specialization on + the 'id' name, as this can exist under a different name via the + `alternate_id` argument to resource.Body. + """ + if name == "id": + # ID handling in flavor is very tricky. Sometimes we get ID back, + # sometimes we get only name (but it is same as id), sometimes we + # get original_name back, but it is still id. + # To get this handled try sequentially to access it from various + # places until we find first non-empty value. + for xname in ["id", "name", "original_name"]: + if self._body.get(xname): + return self._body[xname] + else: + return super().__getattribute__(name) - allow_create = False - allow_get = False - allow_update = False - allow_delete = False - allow_list = True + @classmethod + def list( + cls, + session: adapter.Adapter, + paginated: bool = True, + base_path: str | None = '/flavors/detail', + allow_unknown_params: bool = False, + *, + microversion: str | None = None, + headers: dict[str, str] | None = None, + max_items: int | None = None, + **params: ty.Any, + ) -> ty.Generator[ty_ext.Self, None, None]: + # Find will invoke list when name was passed. Since we want to return + # flavor with details (same as direct get) we need to swap default here + # and list with "/flavors" if no details explicitely requested + if 'is_public' not in params or params['is_public'] is None: + # is_public is ternary - None means give all flavors. + # Force it to string to avoid requests skipping it. + params['is_public'] = 'None' + return super().list( + session, + paginated=paginated, + base_path=base_path, + allow_unknown_params=allow_unknown_params, + microversion=microversion, + headers=headers, + max_items=max_items, + **params, + ) + + def _action(self, session, body, microversion=None): + """Preform flavor actions given the message body.""" + url = utils.urljoin(Flavor.base_path, self.id, 'action') + headers = {'Accept': ''} + attrs = {} + if microversion: + # Do not reset microversion if it is set on a session level + attrs['microversion'] = microversion + response = session.post(url, json=body, headers=headers, **attrs) + exceptions.raise_from_response(response) + return response + + def add_tenant_access(self, session, tenant): + """Adds flavor access to a tenant and flavor. + + :param session: The session to use for making this request. + :param tenant: + :returns: None + """ + body = {'addTenantAccess': {'tenant': tenant}} + self._action(session, body) + + def remove_tenant_access(self, session, tenant): + """Removes flavor access to a tenant and flavor. + + :param session: The session to use for making this request. + :param tenant: + :returns: None + """ + body = {'removeTenantAccess': {'tenant': tenant}} + self._action(session, body) + + def get_access(self, session): + """Lists tenants who have access to a private flavor + + By default, only administrators can manage private flavor access. A + private flavor has ``is_public`` set to false while a public flavor has + ``is_public`` set to true. + + :param session: The session to use for making this request. + :return: List of dicts with flavor_id and tenant_id attributes + """ + url = utils.urljoin(Flavor.base_path, self.id, 'os-flavor-access') + response = session.get(url) + exceptions.raise_from_response(response) + return response.json().get('flavor_access', []) + + def fetch_extra_specs(self, session): + """Fetch extra specs of the flavor + + Starting with 2.61 extra specs are returned with the flavor details, + before that a separate call is required. + + :param session: The session to use for making this request. + :returns: The updated flavor. + """ + url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs') + microversion = self._get_microversion(session) + response = session.get(url, microversion=microversion) + exceptions.raise_from_response(response) + specs = response.json().get('extra_specs', {}) + self._update(extra_specs=specs) + return self + + def create_extra_specs(self, session, specs): + """Creates extra specs for a flavor. + + :param session: The session to use for making this request. + :param specs: + :returns: The updated flavor. + """ + url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs') + microversion = self._get_microversion(session) + response = session.post( + url, json={'extra_specs': specs}, microversion=microversion + ) + exceptions.raise_from_response(response) + specs = response.json().get('extra_specs', {}) + self._update(extra_specs=specs) + return self + + def get_extra_specs_property(self, session, prop): + """Get an individual extra spec property. + + :param session: The session to use for making this request. + :param prop: The property to fetch. + :returns: The value of the property if it exists, else ``None``. + """ + url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs', prop) + microversion = self._get_microversion(session) + response = session.get(url, microversion=microversion) + exceptions.raise_from_response(response) + val = response.json().get(prop) + return val + + def update_extra_specs_property(self, session, prop, val): + """Update an extra spec for a flavor. + + :param session: The session to use for making this request. + :param prop: The property to update. + :param val: The value to update with. + :returns: The updated value of the property. + """ + url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs', prop) + microversion = self._get_microversion(session) + response = session.put( + url, json={prop: val}, microversion=microversion + ) + exceptions.raise_from_response(response) + val = response.json().get(prop) + return val + + def delete_extra_specs_property(self, session, prop): + """Delete an extra spec for a flavor. + + :param session: The session to use for making this request. + :param prop: The property to delete. + :returns: None + """ + url = utils.urljoin(Flavor.base_path, self.id, 'os-extra_specs', prop) + microversion = self._get_microversion(session) + response = session.delete(url, microversion=microversion) + exceptions.raise_from_response(response) + + +# TODO(stephenfin): Deprecate this for removal in 2.0 +FlavorDetail = Flavor diff --git a/openstack/compute/v2/hypervisor.py b/openstack/compute/v2/hypervisor.py index 25293f7e91..c80016941b 100644 --- a/openstack/compute/v2/hypervisor.py +++ b/openstack/compute/v2/hypervisor.py @@ -10,58 +10,95 @@ # License for the specific language governing permissions and limitations # under the License. +import warnings -from openstack.compute import compute_service -from openstack import resource2 +from openstack import exceptions +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings -class Hypervisor(resource2.Resource): +class Hypervisor(resource.Resource): resource_key = 'hypervisor' resources_key = 'hypervisors' base_path = '/os-hypervisors' - service = compute_service.ComputeService() - # capabilities - allow_get = True + allow_fetch = True allow_list = True + _query_mapping = resource.QueryParameters( + 'hypervisor_hostname_pattern', 'with_servers' + ) + + # Lot of attributes are dropped in 2.88 + _max_microversion = '2.88' + # Properties - #: Status of hypervisor - status = resource2.Body('status') - #: State of hypervisor - state = resource2.Body('state') - #: Name of hypervisor - name = resource2.Body('hypervisor_hostname') - #: Service details - service_details = resource2.Body('service') - #: Count of the VCPUs in use - vcpus_used = resource2.Body('vcpus_used') - #: Count of all VCPUs - vcpus = resource2.Body('vcpus') - #: Count of the running virtual machines - running_vms = resource2.Body('running_vms') + #: Information about the hypervisor's CPU. Up to 2.28 it was string. + cpu_info = resource.Body('cpu_info') + #: IP address of the host + host_ip = resource.Body('host_ip') #: The type of hypervisor - hypervisor_type = resource2.Body('hypervisor_type') + hypervisor_type = resource.Body('hypervisor_type') #: Version of the hypervisor - hypervisor_version = resource2.Body('hypervisor_version') + hypervisor_version = resource.Body('hypervisor_version') + #: Name of hypervisor + name = resource.Body('hypervisor_hostname') + #: Service details + service_details = resource.Body('service', type=dict) + #: List of Servers + servers = resource.Body('servers', type=list, list_type=dict) + #: State of hypervisor + state = resource.Body('state') + #: Status of hypervisor + status = resource.Body('status') + #: The total uptime of the hypervisor and information about average load. + #: This attribute is set only when querying uptime explicitly. + uptime = resource.Body('uptime') + + # Attributes deprecated with 2.88 + #: Measurement of the hypervisor's current workload + current_workload = resource.Body('current_workload', deprecated=True) + #: Disk space available to the scheduler + disk_available = resource.Body("disk_available_least", deprecated=True) #: The amount, in gigabytes, of local storage used - local_disk_used = resource2.Body('local_gb_used') + local_disk_used = resource.Body('local_gb_used', deprecated=True) #: The amount, in gigabytes, of the local storage device - local_disk_size = resource2.Body('local_gb') + local_disk_size = resource.Body('local_gb', deprecated=True) #: The amount, in gigabytes, of free space on the local storage device - local_disk_free = resource2.Body('free_disk_gb') + local_disk_free = resource.Body('free_disk_gb', deprecated=True) #: The amount, in megabytes, of memory - memory_used = resource2.Body('memory_mb_used') + memory_used = resource.Body('memory_mb_used', deprecated=True) #: The amount, in megabytes, of total memory - memory_size = resource2.Body('memory_mb') + memory_size = resource.Body('memory_mb', deprecated=True) #: The amount, in megabytes, of available memory - memory_free = resource2.Body('free_ram_mb') - #: Measurement of the hypervisor's current workload - current_workload = resource2.Body('current_workload') - #: Information about the hypervisor's CPU - cpu_info = resource2.Body('cpu_info') - #: IP address of the host - host_ip = resource2.Body('host_ip') - #: Disk space available to the scheduler - disk_available = resource2.Body("disk_available_least") + memory_free = resource.Body('free_ram_mb', deprecated=True) + #: Count of the running virtual machines + running_vms = resource.Body('running_vms', deprecated=True) + #: Count of the VCPUs in use + vcpus_used = resource.Body('vcpus_used', deprecated=True) + #: Count of all VCPUs + vcpus = resource.Body('vcpus', deprecated=True) + + def get_uptime(self, session): + """Get uptime information for the hypervisor + + Updates uptime attribute of the hypervisor object + """ + warnings.warn( + "This call is deprecated and is only available until Nova 2.88", + os_warnings.LegacyAPIWarning, + ) + if utils.supports_microversion(session, '2.88'): + raise exceptions.SDKException( + 'Hypervisor.get_uptime is not supported anymore' + ) + url = utils.urljoin(self.base_path, self.id, 'uptime') + microversion = self._get_microversion(session) + response = session.get(url, microversion=microversion) + self._translate_response(response) + return self + + +HypervisorDetail = Hypervisor diff --git a/openstack/compute/v2/image.py b/openstack/compute/v2/image.py index a0cd539b8f..24a3e5a2d4 100644 --- a/openstack/compute/v2/image.py +++ b/openstack/compute/v2/image.py @@ -9,57 +9,51 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from openstack.common import metadata +from openstack import resource -from openstack.compute import compute_service -from openstack.compute.v2 import metadata -from openstack import resource2 - -class Image(resource2.Resource, metadata.MetadataMixin): +class Image(resource.Resource, metadata.MetadataMixin): resource_key = 'image' resources_key = 'images' base_path = '/images' - service = compute_service.ComputeService() # capabilities - allow_get = True + allow_fetch = True allow_delete = True allow_list = True - _query_mapping = resource2.QueryParameters("server", "name", - "status", "type", - min_disk="minDisk", - min_ram="minRam", - changes_since="changes-since") + _query_mapping = resource.QueryParameters( + "server", + "name", + "status", + "type", + min_disk="minDisk", + min_ram="minRam", + changes_since="changes-since", + ) # Properties #: Links pertaining to this image. This is a list of dictionaries, #: each including keys ``href`` and ``rel``, and optionally ``type``. - links = resource2.Body('links') + links = resource.Body('links') #: The name of this image. - name = resource2.Body('name') + name = resource.Body('name') #: Timestamp when the image was created. - created_at = resource2.Body('created') - #: Metadata pertaining to this image. *Type: dict* - metadata = resource2.Body('metadata', type=dict) + created_at = resource.Body('created') #: The mimimum disk size. *Type: int* - min_disk = resource2.Body('minDisk', type=int) + min_disk = resource.Body('minDisk', type=int) #: The minimum RAM size. *Type: int* - min_ram = resource2.Body('minRam', type=int) + min_ram = resource.Body('minRam', type=int) #: If this image is still building, its progress is represented here. #: Once an image is created, progres will be 100. *Type: int* - progress = resource2.Body('progress', type=int) + progress = resource.Body('progress', type=int) #: The status of this image. - status = resource2.Body('status') + status = resource.Body('status') #: Timestamp when the image was updated. - updated_at = resource2.Body('updated') + updated_at = resource.Body('updated') #: Size of the image in bytes. *Type: int* - size = resource2.Body('OS-EXT-IMG-SIZE:size', type=int) - + size = resource.Body('OS-EXT-IMG-SIZE:size', type=int) -class ImageDetail(Image): - base_path = '/images/detail' - allow_get = False - allow_delete = False - allow_list = True +ImageDetail = Image diff --git a/openstack/compute/v2/keypair.py b/openstack/compute/v2/keypair.py index 26580068eb..3a7bb958e8 100644 --- a/openstack/compute/v2/keypair.py +++ b/openstack/compute/v2/keypair.py @@ -10,48 +10,75 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.compute import compute_service -from openstack import resource2 +from openstack import resource -class Keypair(resource2.Resource): +class Keypair(resource.Resource): resource_key = 'keypair' resources_key = 'keypairs' base_path = '/os-keypairs' - service = compute_service.ComputeService() + + _query_mapping = resource.QueryParameters('user_id') # capabilities allow_create = True - allow_get = True + allow_fetch = True allow_delete = True allow_list = True + _max_microversion = '2.10' + # Properties + #: The date and time when the resource was created. + created_at = resource.Body('created_at') + #: A boolean indicates whether this keypair is deleted or not. + is_deleted = resource.Body('deleted', type=bool) #: The short fingerprint associated with the ``public_key`` for #: this keypair. - fingerprint = resource2.Body('fingerprint') + fingerprint = resource.Body('fingerprint') # NOTE: There is in fact an 'id' field. However, it's not useful # because all operations use the 'name' as an identifier. # Additionally, the 'id' field only appears *after* creation, # so suddenly you have an 'id' field filled in after the fact, - # and it just gets in the way. We need to cover this up by having - # the name be both our id and name. + # and it just gets in the way. We need to cover this up by listing + # name as alternate_id and listing id as coming from name. #: The id identifying the keypair - id = resource2.Body('name') + id = resource.Body('name') #: A name identifying the keypair - name = resource2.Body('name', alternate_id=True) + name = resource.Body('name', alternate_id=True) #: The private key for the keypair - private_key = resource2.Body('private_key') + private_key = resource.Body('private_key') #: The SSH public key that is paired with the server. - public_key = resource2.Body('public_key') + public_key = resource.Body('public_key') + #: The type of the keypair. + type = resource.Body('type', default='ssh') + #: The user_id for a keypair. + user_id = resource.Body('user_id') + + def _consume_attrs(self, mapping, attrs): + # TODO(mordred) This should not be required. However, without doing + # it **SOMETIMES** keypair picks up id and not name. This is a hammer. + if 'id' in attrs: + attrs.setdefault('name', attrs.pop('id')) + return super()._consume_attrs(mapping, attrs) @classmethod - def list(cls, session, paginated=False): - resp = session.get(cls.base_path, endpoint_filter=cls.service, - headers={"Accept": "application/json"}) - resp = resp.json() - resp = resp[cls.resources_key] - - for data in resp: - value = cls.existing(**data[cls.resource_key]) - yield value + def existing(cls, connection=None, **kwargs): + """Create an instance of an existing remote resource. + + When creating the instance set the ``_synchronized`` parameter + of :class:`Resource` to ``True`` to indicate that it represents the + state of an existing server-side resource. As such, all attributes + passed in ``**kwargs`` are considered "clean", such that an immediate + :meth:`update` call would not generate a body of attributes to be + modified on the server. + + :param dict kwargs: Each of the named arguments will be set as + attributes on the resulting Resource object. + """ + # Listing KPs return list with resource_key structure. Instead of + # overriding whole list just try to create object smart. + if cls.resource_key in kwargs: + args = kwargs.pop(cls.resource_key) + kwargs.update(**args) + return cls(_synchronized=True, connection=connection, **kwargs) diff --git a/openstack/compute/v2/limits.py b/openstack/compute/v2/limits.py index 113e2e85e0..ef223e06d1 100644 --- a/openstack/compute/v2/limits.py +++ b/openstack/compute/v2/limits.py @@ -10,100 +10,137 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.compute import compute_service -from openstack import resource2 +from openstack import resource -class AbsoluteLimits(resource2.Resource): +class AbsoluteLimits(resource.Resource): + _max_microversion = '2.57' + # Properties #: The number of key-value pairs that can be set as image metadata. - image_meta = resource2.Body("maxImageMeta") + image_meta = resource.Body("maxImageMeta", aka="max_image_meta") #: The maximum number of personality contents that can be supplied. - personality = resource2.Body("maxPersonality") + personality = resource.Body("maxPersonality", deprecated=True) #: The maximum size, in bytes, of a personality. - personality_size = resource2.Body("maxPersonalitySize") + personality_size = resource.Body("maxPersonalitySize", deprecated=True) #: The maximum amount of security group rules allowed. - security_group_rules = resource2.Body("maxSecurityGroupRules") + security_group_rules = resource.Body( + "maxSecurityGroupRules", aka="max_security_group_rules" + ) #: The maximum amount of security groups allowed. - security_groups = resource2.Body("maxSecurityGroups") + security_groups = resource.Body( + "maxSecurityGroups", aka="max_security_groups" + ) #: The amount of security groups currently in use. - security_groups_used = resource2.Body("totalSecurityGroupsUsed") - #: The number of key-value pairs that can be set as sever metadata. - server_meta = resource2.Body("maxServerMeta") + security_groups_used = resource.Body( + "totalSecurityGroupsUsed", aka="total_security_groups_used" + ) + #: The number of key-value pairs that can be set as server metadata. + server_meta = resource.Body("maxServerMeta", aka="max_server_meta") #: The maximum amount of cores. - total_cores = resource2.Body("maxTotalCores") + total_cores = resource.Body("maxTotalCores", aka="max_total_cores") #: The amount of cores currently in use. - total_cores_used = resource2.Body("totalCoresUsed") + total_cores_used = resource.Body("totalCoresUsed", aka="total_cores_used") #: The maximum amount of floating IPs. - floating_ips = resource2.Body("maxTotalFloatingIps") + floating_ips = resource.Body( + "maxTotalFloatingIps", aka="max_total_floating_ips" + ) #: The amount of floating IPs currently in use. - floating_ips_used = resource2.Body("totalFloatingIpsUsed") + floating_ips_used = resource.Body( + "totalFloatingIpsUsed", aka="total_floating_ips_used" + ) #: The maximum amount of instances. - instances = resource2.Body("maxTotalInstances") + instances = resource.Body("maxTotalInstances", aka="max_total_instances") #: The amount of instances currently in use. - instances_used = resource2.Body("totalInstancesUsed") + instances_used = resource.Body( + "totalInstancesUsed", aka="total_instances_used" + ) #: The maximum amount of keypairs. - keypairs = resource2.Body("maxTotalKeypairs") + keypairs = resource.Body("maxTotalKeypairs", aka="max_total_keypairs") #: The maximum RAM size in megabytes. - total_ram = resource2.Body("maxTotalRAMSize") + total_ram = resource.Body("maxTotalRAMSize", aka="max_total_ram_size") #: The RAM size in megabytes currently in use. - total_ram_used = resource2.Body("totalRAMUsed") + total_ram_used = resource.Body("totalRAMUsed", aka="total_ram_used") #: The maximum amount of server groups. - server_groups = resource2.Body("maxServerGroups") + server_groups = resource.Body("maxServerGroups", aka="max_server_groups") #: The amount of server groups currently in use. - server_groups_used = resource2.Body("totalServerGroupsUsed") + server_groups_used = resource.Body( + "totalServerGroupsUsed", aka="total_server_groups_used" + ) #: The maximum number of members in a server group. - server_group_members = resource2.Body("maxServerGroupMembers") - - -class RateLimit(resource2.Resource): - + server_group_members = resource.Body( + "maxServerGroupMembers", aka="max_server_group_members" + ) + + +class RateLimit(resource.Resource): + # Properties + #: Rate limits next availabe time. + next_available = resource.Body("next-available") + #: Integer for rate limits remaining. + remaining = resource.Body("remaining", type=int) + #: Unit of measurement for the value parameter. + unit = resource.Body("unit") + #: Integer number of requests which can be made. + value = resource.Body("value", type=int) + #: An HTTP verb (POST, PUT, etc.). + verb = resource.Body("verb") + + +class RateLimits(resource.Resource): + # Properties #: A list of the specific limits that apply to the ``regex`` and ``uri``. - limits = resource2.Body("limit", type=list) + limits = resource.Body("limit", type=list, list_type=RateLimit) #: A regex representing which routes this rate limit applies to. - regex = resource2.Body("regex") + regex = resource.Body("regex") #: A URI representing which routes this rate limit applies to. - uri = resource2.Body("uri") + uri = resource.Body("uri") -class Limits(resource2.Resource): +class Limits(resource.Resource): base_path = "/limits" resource_key = "limits" - service = compute_service.ComputeService() - allow_get = True - - absolute = resource2.Body("absolute", type=AbsoluteLimits) - rate = resource2.Body("rate", type=list) - - def get(self, session, requires_id=False): + allow_fetch = True + + _query_mapping = resource.QueryParameters( + 'tenant_id', + 'reserved', + project_id='tenant_id', + ) + + # Properties + #: An absolute limits object. + absolute = resource.Body("absolute", type=AbsoluteLimits) + #: Rate-limit compute resources. This is only populated when using the + #: legacy v2 API which was removed in Nova 14.0.0 (Newton). In v2.1 it will + #: always be an empty list. + rate = resource.Body("rate", type=list, list_type=RateLimits) + + def fetch( + self, + session, + requires_id=False, + base_path=None, + error_message=None, + skip_cache=False, + **params, + ): """Get the Limits resource. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` + :type session: :class:`~keystoneauth1.adapter.Adapter` :returns: A Limits instance :rtype: :class:`~openstack.compute.v2.limits.Limits` """ - request = self._prepare_request(requires_id=False, prepend_key=False) - - response = session.get(request.uri, endpoint_filter=self.service) - - body = response.json() - body = body[self.resource_key] - - absolute_body = self._filter_component( - body["absolute"], AbsoluteLimits._body_mapping()) - self.absolute = AbsoluteLimits.existing(**absolute_body) - - rates_body = body["rate"] - - rates = [] - for rate_body in rates_body: - rate_body = self._filter_component(rate_body, - RateLimit._body_mapping()) - rates.append(RateLimit(**rate_body)) - - self.rate = rates - - return self + # TODO(mordred) We shouldn't have to subclass just to declare + # requires_id = False. + return super().fetch( + session, + requires_id, + error_message, + base_path, + skip_cache, + **params, + ) diff --git a/openstack/compute/v2/metadata.py b/openstack/compute/v2/metadata.py deleted file mode 100644 index e611fd98b6..0000000000 --- a/openstack/compute/v2/metadata.py +++ /dev/null @@ -1,98 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from openstack import utils - - -class MetadataMixin(object): - - def _metadata(self, method, key=None, clear=False, delete=False, - **metadata): - for k, v in metadata.items(): - if not isinstance(v, six.string_types): - raise ValueError("The value for %s (%s) must be " - "a text string" % (k, v)) - - # If we're in a ServerDetail, we need to pop the "detail" portion - # of the URL off and then everything else will work the same. - pos = self.base_path.find("detail") - if pos != -1: - base = self.base_path[:pos] - else: - base = self.base_path - - if key is not None: - url = utils.urljoin(base, self.id, "metadata", key) - else: - url = utils.urljoin(base, self.id, "metadata") - - kwargs = {"endpoint_filter": self.service} - if metadata or clear: - # 'meta' is the key for singular modifications. - # 'metadata' is the key for mass modifications. - key = "meta" if key is not None else "metadata" - kwargs["json"] = {key: metadata} - - headers = {"Accept": ""} if delete else {} - - response = method(url, headers=headers, **kwargs) - - # DELETE doesn't return a JSON body while everything else does. - return response.json() if not delete else None - - def get_metadata(self, session): - """Retrieve metadata - - :param session: The session to use for this request. - - :returns: A dictionary of the requested metadata. All keys and values - are Unicode text. - :rtype: dict - """ - result = self._metadata(session.get) - return result["metadata"] - - def set_metadata(self, session, **metadata): - """Update metadata - - This call will replace only the metadata with the same keys - given here. Metadata with other keys will not be modified. - - :param session: The session to use for this request. - :param kwargs metadata: key/value metadata pairs to be update on - this server instance. All keys and values - are stored as Unicode. - - :returns: A dictionary of the metadata after being updated. - All keys and values are Unicode text. - :rtype: dict - """ - if not metadata: - return dict() - - result = self._metadata(session.post, **metadata) - return result["metadata"] - - def delete_metadata(self, session, keys): - """Delete metadata - - Note: This method will do a HTTP DELETE request for every key in keys. - - :param session: The session to use for this request. - :param list keys: The keys to delete. - - :rtype: ``None`` - """ - for key in keys: - self._metadata(session.delete, key=key, delete=True) diff --git a/openstack/compute/v2/migration.py b/openstack/compute/v2/migration.py new file mode 100644 index 0000000000..5fc8c8f5c2 --- /dev/null +++ b/openstack/compute/v2/migration.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Migration(resource.Resource): + resources_key = 'migrations' + base_path = '/os-migrations' + + # capabilities + allow_list = True + + _query_mapping = resource.QueryParameters( + 'host', + 'status', + 'migration_type', + 'source_compute', + 'user_id', + 'project_id', + changes_since='changes-since', + changes_before='changes-before', + server_id='instance_uuid', + ) + + #: The date and time when the resource was created. + created_at = resource.Body('created_at') + #: The target compute of the migration. + dest_compute = resource.Body('dest_compute') + #: The target host of the migration. + dest_host = resource.Body('dest_host') + #: The target node of the migration. + dest_node = resource.Body('dest_node') + #: The type of the migration. One of 'migration', 'resize', + #: 'live-migration' or 'evacuation' + migration_type = resource.Body('migration_type') + #: The ID of the old flavor. This value corresponds to the ID of the flavor + #: in the database. This will be the same as new_flavor_id except for + #: resize operations. + new_flavor_id = resource.Body('new_instance_type_id') + #: The ID of the old flavor. This value corresponds to the ID of the flavor + #: in the database. + old_flavor_id = resource.Body('old_instance_type_id') + #: The ID of the project that initiated the server migration (since + #: microversion 2.80) + project_id = resource.Body('project_id') + #: The UUID of the server + server_id = resource.Body('instance_uuid') + #: The source compute of the migration. + source_compute = resource.Body('source_compute') + #: The source node of the migration. + source_node = resource.Body('source_node') + #: The current status of the migration. + status = resource.Body('status') + #: The date and time when the resource was last updated. + updated_at = resource.Body('updated_at') + #: The ID of the user that initiated the server migration (since + #: microversion 2.80) + user_id = resource.Body('user_id') + #: The UUID of the migration (since microversion 2.59) + uuid = resource.Body('uuid', alternate_id=True) + + _max_microversion = '2.80' diff --git a/openstack/compute/v2/quota_class_set.py b/openstack/compute/v2/quota_class_set.py new file mode 100644 index 0000000000..8f09a5512c --- /dev/null +++ b/openstack/compute/v2/quota_class_set.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class QuotaClassSet(resource.Resource): + resource_key = 'quota_class_set' + base_path = '/os-quota-class-sets' + + _max_microversion = '2.56' + + # capabilities + allow_fetch = True + allow_commit = True + + #: Properties + #: The number of allowed server cores for each tenant. + cores = resource.Body('cores', type=int) + #: The number of allowed fixed IP addresses for each tenant. Must be + #: equal to or greater than the number of allowed servers. + fixed_ips = resource.Body('fixed_ips', type=int) + #: The number of allowed floating IP addresses for each tenant. + floating_ips = resource.Body('floating_ips', type=int) + #: The number of allowed bytes of content for each injected file. + injected_file_content_bytes = resource.Body( + 'injected_file_content_bytes', type=int + ) + #: The number of allowed bytes for each injected file path. + injected_file_path_bytes = resource.Body( + 'injected_file_path_bytes', type=int + ) + #: The number of allowed injected files for each tenant. + injected_files = resource.Body('injected_files', type=int) + #: The number of allowed servers for each tenant. + instances = resource.Body('instances', type=int) + #: The number of allowed key pairs for each user. + key_pairs = resource.Body('key_pairs', type=int) + #: The number of allowed metadata items for each server. + metadata_items = resource.Body('metadata_items', type=int) + #: The number of private networks that can be created per project. + networks = resource.Body('networks', type=int) + #: The amount of allowed server RAM, in MiB, for each tenant. + ram = resource.Body('ram', type=int) + #: The number of allowed rules for each security group. + security_group_rules = resource.Body('security_group_rules', type=int) + #: The number of allowed security groups for each tenant. + security_groups = resource.Body('security_groups', type=int) + #: The number of allowed server groups for each tenant. + server_groups = resource.Body('server_groups', type=int) + #: The number of allowed members for each server group. + server_group_members = resource.Body('server_group_members', type=int) diff --git a/openstack/compute/v2/quota_set.py b/openstack/compute/v2/quota_set.py new file mode 100644 index 0000000000..482cc7bf38 --- /dev/null +++ b/openstack/compute/v2/quota_set.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.common import quota_set +from openstack import resource + + +class QuotaSet(quota_set.QuotaSet): + # We generally only want compute QS support max_microversion. Otherwise be + # explicit and list all the attributes + _max_microversion = '2.56' + + #: Properties + #: The number of allowed server cores for each tenant. + cores = resource.Body('cores', type=int) + #: The number of allowed fixed IP addresses for each tenant. Must be + #: equal to or greater than the number of allowed servers. + fixed_ips = resource.Body('fixed_ips', type=int) + #: The number of allowed floating IP addresses for each tenant. + floating_ips = resource.Body('floating_ips', type=int) + #: You can force the update even if the quota has already been used and + #: the reserved quota exceeds the new quota. + force = resource.Body('force', type=bool) + #: The number of allowed bytes of content for each injected file. + injected_file_content_bytes = resource.Body( + 'injected_file_content_bytes', type=int + ) + #: The number of allowed bytes for each injected file path. + injected_file_path_bytes = resource.Body( + 'injected_file_path_bytes', type=int + ) + #: The number of allowed injected files for each tenant. + injected_files = resource.Body('injected_files', type=int) + #: The number of allowed servers for each tenant. + instances = resource.Body('instances', type=int) + #: The number of allowed key pairs for each user. + key_pairs = resource.Body('key_pairs', type=int) + #: The number of allowed metadata items for each server. + metadata_items = resource.Body('metadata_items', type=int) + #: The number of private networks that can be created per project. + networks = resource.Body('networks', type=int) + #: The amount of allowed server RAM, in MiB, for each tenant. + ram = resource.Body('ram', type=int) + #: The number of allowed rules for each security group. + security_group_rules = resource.Body('security_group_rules', type=int) + #: The number of allowed security groups for each tenant. + security_groups = resource.Body('security_groups', type=int) + #: The number of allowed server groups for each tenant. + server_groups = resource.Body('server_groups', type=int) + #: The number of allowed members for each server group. + server_group_members = resource.Body('server_group_members', type=int) diff --git a/openstack/compute/v2/server.py b/openstack/compute/v2/server.py index 24a579a763..267cac23c6 100644 --- a/openstack/compute/v2/server.py +++ b/openstack/compute/v2/server.py @@ -10,172 +10,459 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.compute import compute_service -from openstack.compute.v2 import metadata -from openstack import resource2 +import typing as ty + +from openstack.common import metadata +from openstack.common import tag +from openstack.compute.v2 import flavor +from openstack.compute.v2 import volume_attachment +from openstack import exceptions +from openstack.image.v2 import image +from openstack import resource +from openstack import types from openstack import utils -class Server(resource2.Resource, metadata.MetadataMixin): +CONSOLE_TYPE_ACTION_MAPPING = { + 'novnc': 'os-getVNCConsole', + 'xvpvnc': 'os-getVNCConsole', + 'spice-html5': 'os-getSPICEConsole', + 'spice-direct': 'os-getSPICEConsole', + 'rdp-html5': 'os-getRDPConsole', + 'serial': 'os-getSerialConsole', +} + + +class Server(resource.Resource, metadata.MetadataMixin, tag.TagMixin): resource_key = 'server' resources_key = 'servers' base_path = '/servers' - service = compute_service.ComputeService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - _query_mapping = resource2.QueryParameters("image", "flavor", "name", - "status", "host", "all_tenants", - "sort_key", "sort_dir", - "reservation_id", "tags", - tags_any="tags-any", - not_tags="not-tags", - not_tags_any="not-tags-any", - is_deleted="deleted", - ipv4_address="ip", - ipv6_address="ip6", - changes_since="changes-since") + _query_mapping = resource.QueryParameters( + "auto_disk_config", + "availability_zone", + "created_at", + "description", + "flavor", + "hostname", + "image", + "kernel_id", + "key_name", + "launch_index", + "launched_at", + "locked", + "locked_by", + "name", + "node", + "power_state", + "progress", + "project_id", + "ramdisk_id", + "reservation_id", + "root_device_name", + "status", + "task_state", + "terminated_at", + "user_id", + "vm_state", + "sort_key", + "sort_dir", + "pinned_availability_zone", + access_ipv4="access_ip_v4", + access_ipv6="access_ip_v6", + has_config_drive="config_drive", + deleted_only="deleted", + compute_host="host", + is_soft_deleted="soft_deleted", + ipv4_address="ip", + ipv6_address="ip6", + changes_since="changes-since", + changes_before="changes-before", + id="uuid", + all_projects="all_tenants", + **tag.TagMixin._tag_query_parameters, + ) + + _max_microversion = '2.100' #: A list of dictionaries holding links relevant to this server. - links = resource2.Body('links') + links = resource.Body('links') - access_ipv4 = resource2.Body('accessIPv4') - access_ipv6 = resource2.Body('accessIPv6') + access_ipv4 = resource.Body('accessIPv4') + access_ipv6 = resource.Body('accessIPv6') #: A dictionary of addresses this server can be accessed through. #: The dictionary contains keys such as ``private`` and ``public``, #: each containing a list of dictionaries for addresses of that type. #: The addresses are contained in a dictionary with keys ``addr`` #: and ``version``, which is either 4 or 6 depending on the protocol #: of the IP address. *Type: dict* - addresses = resource2.Body('addresses', type=dict) + addresses = resource.Body('addresses', type=dict) + #: When a server is first created, it provides the administrator password. + admin_password = resource.Body('adminPass') + #: A list of an attached volumes. Each item in the list contains at least + #: an "id" key to identify the specific volumes. + attached_volumes = resource.Body( + 'os-extended-volumes:volumes_attached', + aka='volumes', + type=list, + list_type=volume_attachment.VolumeAttachment, + default=[], + ) + #: The name of the availability zone this server is a part of. + availability_zone = resource.Body('OS-EXT-AZ:availability_zone') + #: Enables fine grained control of the block device mapping for an + #: instance. This is typically used for booting servers from volumes. + block_device_mapping = resource.Body('block_device_mapping_v2') + #: Indicates whether or not a config drive was used for this server. + config_drive = resource.Body('config_drive') + #: The name of the compute host on which this instance is running. + #: Appears in the response for administrative users only. + compute_host = resource.Body('OS-EXT-SRV-ATTR:host') #: Timestamp of when the server was created. - created_at = resource2.Body('created') + created_at = resource.Body('created') + #: The description of the server. Before microversion + #: 2.19 this was set to the server name. + description = resource.Body('description') + #: The disk configuration. Either AUTO or MANUAL. + disk_config = resource.Body('OS-DCF:diskConfig') #: The flavor reference, as a ID or full URL, for the flavor to use for #: this server. - flavor_id = resource2.Body('flavorRef') + flavor_id = resource.Body('flavorRef') #: The flavor property as returned from server. - flavor = resource2.Body('flavor', type=dict) + flavor = resource.Body('flavor', type=flavor.Flavor) + #: Indicates whether a configuration drive enables metadata injection. + #: Not all cloud providers enable this feature. + has_config_drive = resource.Body('config_drive') #: An ID representing the host of this server. - host_id = resource2.Body('hostId') + host_id = resource.Body('hostId') + #: A fault object. Only available when the server status + #: is ERROR or DELETED and a fault occurred. + fault = resource.Body('fault') + #: The host to boot the server on. + host = resource.Body('host') + #: The host status. + host_status = resource.Body('host_status') + #: The hostname set on the instance when it is booted. + #: By default, it appears in the response for administrative users only. + hostname = resource.Body('OS-EXT-SRV-ATTR:hostname') + #: The hypervisor host name. Appears in the response for administrative + #: users only. + hypervisor_hostname = resource.Body('OS-EXT-SRV-ATTR:hypervisor_hostname') #: The image reference, as a ID or full URL, for the image to use for #: this server. - image_id = resource2.Body('imageRef') + image_id = resource.Body('imageRef') #: The image property as returned from server. - image = resource2.Body('image', type=dict) - #: Metadata stored for this server. *Type: dict* - metadata = resource2.Body('metadata', type=dict) + image = resource.Body('image', type=image.Image) + #: The instance name. The Compute API generates the instance name from the + #: instance name template. Appears in the response for administrative users + #: only. + instance_name = resource.Body('OS-EXT-SRV-ATTR:instance_name') + #: The address to use to connect to this server from the current calling + #: context. This will be set to public_ipv6 if the calling host has + #: routable ipv6 addresses, and to private_ipv4 if the Connection was + #: created with private=True. Otherwise it will be set to public_ipv4. + interface_ip = resource.Computed('interface_ip', default='') + # The locked status of the server + is_locked = resource.Body('locked', type=bool) + #: The UUID of the kernel image when using an AMI. Will be null if not. + #: By default, it appears in the response for administrative users only. + kernel_id = resource.Body('OS-EXT-SRV-ATTR:kernel_id') + #: The name of an associated keypair + key_name = resource.Body('key_name') + #: When servers are launched via multiple create, this is the + #: sequence in which the servers were launched. By default, it + #: appears in the response for administrative users only. + launch_index = resource.Body('OS-EXT-SRV-ATTR:launch_index', type=int) + #: The timestamp when the server was launched. + launched_at = resource.Body('OS-SRV-USG:launched_at') + #: The reason the server was locked, if any. + locked_reason = resource.Body('locked_reason') + #: The maximum number of servers to create. + max_count = resource.Body('max_count') + #: The minimum number of servers to create. + min_count = resource.Body('min_count') + #: A networks object. Required parameter when there are multiple + #: networks defined for the tenant. When you do not specify the + #: networks parameter, the server attaches to the only network + #: created for the current tenant. + networks = resource.Body('networks') + #: Personality files. This should be a list of dicts with each dict + #: containing a file name ('name') and a base64-encoded file contents + #: ('contents') + personality = resource.Body('personality', type=list) + #: The availability zone requested during server creation OR pinned + #: availability zone, which is configured using default_schedule_zone + #: config option. + pinned_availability_zone = resource.Body('pinned_availability_zone') + #: The power state of this server. + power_state = resource.Body('OS-EXT-STS:power_state') #: While the server is building, this value represents the percentage #: of completion. Once it is completed, it will be 100. *Type: int* - progress = resource2.Body('progress', type=int) + progress = resource.Body('progress', type=int) #: The ID of the project this server is associated with. - project_id = resource2.Body('tenant_id') + project_id = resource.Body('tenant_id') + + #: The private IPv4 address of this server + private_v4 = resource.Computed('private_v4', default='') + #: The private IPv6 address of this server + private_v6 = resource.Computed('private_v6', default='') + + #: The public IPv4 address of this server + public_v4 = resource.Computed('public_v4', default='') + #: The public IPv6 address of this server + public_v6 = resource.Computed('public_v6', default='') + + #: The UUID of the ramdisk image when using an AMI. Will be null if not. + #: By default, it appears in the response for administrative users only. + ramdisk_id = resource.Body('OS-EXT-SRV-ATTR:ramdisk_id') + #: The reservation id for the server. This is an id that can be + #: useful in tracking groups of servers created with multiple create, + #: that will all have the same reservation_id. By default, it appears + #: in the response for administrative users only. + reservation_id = resource.Body('OS-EXT-SRV-ATTR:reservation_id') + #: The root device name for the instance By default, it appears in the + #: response for administrative users only. + root_device_name = resource.Body('OS-EXT-SRV-ATTR:root_device_name') + #: The dictionary of data to send to the scheduler. + scheduler_hints = resource.Body('OS-SCH-HNT:scheduler_hints', type=dict) + #: A list of applicable security groups. Each group contains keys for + #: description, name, id, and rules. + security_groups = resource.Body( + 'security_groups', type=list, list_type=dict + ) + #: The UUIDs of the server groups to which the server belongs. + #: Currently this can contain at most one entry. + server_groups = resource.Body('server_groups', type=list) #: The state this server is in. Valid values include ``ACTIVE``, #: ``BUILDING``, ``DELETED``, ``ERROR``, ``HARD_REBOOT``, ``PASSWORD``, #: ``PAUSED``, ``REBOOT``, ``REBUILD``, ``RESCUED``, ``RESIZED``, #: ``REVERT_RESIZE``, ``SHUTOFF``, ``SOFT_DELETED``, ``STOPPED``, #: ``SUSPENDED``, ``UNKNOWN``, or ``VERIFY_RESIZE``. - status = resource2.Body('status') - #: Timestamp of when this server was last updated. - updated_at = resource2.Body('updated') - #: The ID of the owners of this server. - user_id = resource2.Body('user_id') - #: The name of an associated keypair - key_name = resource2.Body('key_name') - #: The disk configuration. Either AUTO or MANUAL. - disk_config = resource2.Body('OS-DCF:diskConfig') - #: Indicates whether a configuration drive enables metadata injection. - #: Not all cloud providers enable this feature. - has_config_drive = resource2.Body('config_drive') - #: The name of the availability zone this server is a part of. - availability_zone = resource2.Body('OS-EXT-AZ:availability_zone') - #: The power state of this server. - power_state = resource2.Body('OS-EXT-STS:power_state') + status = resource.Body('status') #: The task state of this server. - task_state = resource2.Body('OS-EXT-STS:task_state') - #: The VM state of this server. - vm_state = resource2.Body('OS-EXT-STS:vm_state') - #: A list of an attached volumes. Each item in the list contains at least - #: an "id" key to identify the specific volumes. - attached_volumes = resource2.Body( - 'os-extended-volumes:volumes_attached') - #: The timestamp when the server was launched. - launched_at = resource2.Body('OS-SRV-USG:launched_at') + task_state = resource.Body('OS-EXT-STS:task_state') #: The timestamp when the server was terminated (if it has been). - terminated_at = resource2.Body('OS-SRV-USG:terminated_at') - #: A list of applicable security groups. Each group contains keys for - #: description, name, id, and rules. - security_groups = resource2.Body('security_groups') - #: When a server is first created, it provides the administrator password. - admin_password = resource2.Body('adminPass') - #: The file path and contents, text only, to inject into the server at - #: launch. The maximum size of the file path data is 255 bytes. - #: The maximum limit is The number of allowed bytes in the decoded, - #: rather than encoded, data. - personality = resource2.Body('personality') + terminated_at = resource.Body('OS-SRV-USG:terminated_at') + #: A list of trusted certificate IDs, that were used during image + #: signature verification to verify the signing certificate. + trusted_image_certificates = resource.Body( + 'trusted_image_certificates', type=list + ) + #: Timestamp of when this server was last updated. + updated_at = resource.Body('updated') #: Configuration information or scripts to use upon launch. #: Must be Base64 encoded. - user_data = resource2.Body('user_data') - #: Enables fine grained control of the block device mapping for an - #: instance. This is typically used for booting servers from volumes. - block_device_mapping = resource2.Body('block_device_mapping_v2', type=dict) - #: The dictionary of data to send to the scheduler. - scheduler_hints = resource2.Body('os:scheduler_hints', type=dict) - #: A networks object. Required parameter when there are multiple - #: networks defined for the tenant. When you do not specify the - #: networks parameter, the server attaches to the only network - #: created for the current tenant. - networks = resource2.Body('networks') + user_data = resource.Body('OS-EXT-SRV-ATTR:user_data') + #: The ID of the owners of this server. + user_id = resource.Body('user_id') + #: The VM state of this server. + vm_state = resource.Body('OS-EXT-STS:vm_state') + + def _prepare_request( + self, + requires_id=True, + prepend_key=True, + patch=False, + base_path=None, + params=None, + **kwargs, + ): + request = super()._prepare_request( + requires_id=requires_id, + prepend_key=prepend_key, + patch=patch, + base_path=base_path, + params=params, + **kwargs, + ) + + server_body = request.body[self.resource_key] + + # Some names exist without prefix on requests but with a prefix + # on responses. If we find that we've populated one of these + # attributes with something and then go to make a request, swap out + # the name to the bare version. + + # Availability Zones exist with a prefix on response, but not request + az_key = "OS-EXT-AZ:availability_zone" + if az_key in server_body: + server_body["availability_zone"] = server_body.pop(az_key) + + # User Data exists with a prefix on response, but not request + ud_key = "OS-EXT-SRV-ATTR:user_data" + if ud_key in server_body: + server_body["user_data"] = server_body.pop(ud_key) + + # Scheduler hints are sent in a top-level scope, not within the + # resource_key scope like everything else. If we try to send + # scheduler_hints, pop them out of the resource_key scope and into + # their own top-level scope. + hint_key = "OS-SCH-HNT:scheduler_hints" + if hint_key in server_body: + request.body[hint_key] = server_body.pop(hint_key) - def _action(self, session, body): + # hostname exists with a prefix on response, but not request + hostname_key = "OS-EXT-SRV-ATTR:hostname" + if hostname_key in server_body: + server_body["hostname"] = server_body.pop(hostname_key) + + return request + + def _action(self, session, body, microversion=None): """Preform server actions given the message body.""" # NOTE: This is using Server.base_path instead of self.base_path # as both Server and ServerDetail instances can be acted on, but # the URL used is sans any additional /detail/ part. url = utils.urljoin(Server.base_path, self.id, 'action') headers = {'Accept': ''} - return session.post( - url, endpoint_filter=self.service, json=body, headers=headers) - def change_password(self, session, new_password): - """Change the administrator password to the given password.""" - body = {'changePassword': {'adminPass': new_password}} - self._action(session, body) + # these aren't all necessary "commit" actions (i.e. updates) but it's + # good enough... + if microversion is None: + microversion = self._get_microversion(session) + + response = session.post( + url, + json=body, + headers=headers, + microversion=microversion, + ) + exceptions.raise_from_response(response) + return response + + def change_password(self, session, password, *, microversion=None): + """Change the administrator password to the given password. + + :param session: The session to use for making this request. + :param password: The new password. + :returns: None + """ + body = {'changePassword': {'adminPass': password}} + self._action(session, body, microversion=microversion) + + def get_password(self, session, *, microversion=None): + """Get the encrypted administrator password. + + :param session: The session to use for making this request. + :returns: The encrypted administrator password. + """ + url = utils.urljoin(Server.base_path, self.id, 'os-server-password') + if microversion is None: + microversion = self._get_microversion(session) + + response = session.get(url, microversion=microversion) + exceptions.raise_from_response(response) + + data = response.json() + return data.get('password') + + def clear_password(self, session, *, microversion=None): + """Clear the administrator password. + + This removes the password from the database. It does not actually + change the server password. + + :param session: The session to use for making this request. + :returns: None + """ + url = utils.urljoin(Server.base_path, self.id, 'os-server-password') + if microversion is None: + microversion = self._get_microversion(session) + + response = session.delete(url, microversion=microversion) + exceptions.raise_from_response(response) def reboot(self, session, reboot_type): - """Reboot server where reboot_type might be 'SOFT' or 'HARD'.""" + """Reboot server where reboot_type might be 'SOFT' or 'HARD'. + + :param session: The session to use for making this request. + :param reboot_type: The type of reboot. One of: ``SOFT``, ``HARD``. + :returns: None + """ body = {'reboot': {'type': reboot_type}} self._action(session, body) def force_delete(self, session): - """Force delete a server.""" + """Force delete the server. + + :param session: The session to use for making this request. + :returns: None + """ body = {'forceDelete': None} self._action(session, body) - def rebuild(self, session, name, admin_password, - preserve_ephemeral=False, image=None, - access_ipv4=None, access_ipv6=None, - metadata=None, personality=None): - """Rebuild the server with the given arguments.""" - action = { - 'name': name, - 'adminPass': admin_password, - 'preserve_ephemeral': preserve_ephemeral - } - if image is not None: - action['imageRef'] = resource2.Resource._get_id(image) - if access_ipv4 is not None: + def rebuild( + self, + session, + image, + name=types.UNSET, + admin_password=types.UNSET, + preserve_ephemeral=types.UNSET, + access_ipv4=types.UNSET, + access_ipv6=types.UNSET, + metadata=types.UNSET, + user_data=types.UNSET, + key_name=types.UNSET, + description=types.UNSET, + trusted_image_certificates=types.UNSET, + hostname=types.UNSET, + ): + """Rebuild the server with the given arguments. + + :param session: The session to use for making this request. + :param image: The image to rebuild to. Either an ID or a + :class:`~openstack.image.v1.image.Image` instance. + :param name: A name to set on the rebuilt server. (Optional) + :param admin_password: An admin password to set on the rebuilt server. + (Optional) + :param preserve_ephemeral: Whether to preserve the ephemeral drive + during the rebuild. (Optional) + :param access_ipv4: An IPv4 address that will be used to access the + rebuilt server. (Optional) + :param access_ipv6: An IPv6 address that will be used to access the + rebuilt server. (Optional) + :param metadata: Metadata to set on the updated server. (Optional) + :param user_data: User data to set on the updated server. (Optional) + :param key_name: A key name to set on the updated server. (Optional) + :param description: The description to set on the updated server. + (Optional) (Requires API microversion 2.19) + :param trusted_image_certificates: The trusted image certificates to + set on the updated server. (Optional) (Requires API microversion + 2.78) + :param hostname: The hostname to set on the updated server. (Optional) + (Requires API microversion 2.90) + :returns: The updated server. + """ + action = {'imageRef': resource.Resource._get_id(image)} + if preserve_ephemeral is not types.UNSET: + action['preserve_ephemeral'] = preserve_ephemeral + if name is not types.UNSET: + action['name'] = name + if admin_password is not types.UNSET: + action['adminPass'] = admin_password + if access_ipv4 is not types.UNSET: action['accessIPv4'] = access_ipv4 - if access_ipv6 is not None: + if access_ipv6 is not types.UNSET: action['accessIPv6'] = access_ipv6 - if metadata is not None: + if metadata is not types.UNSET: action['metadata'] = metadata - if personality is not None: - action['personality'] = personality + if user_data is not types.UNSET: + action['user_data'] = user_data + if key_name is not types.UNSET: + action['key_name'] = key_name + if description is not types.UNSET: + action['description'] = description + if trusted_image_certificates is not types.UNSET: + action['trusted_image_certificates'] = trusted_image_certificates + if hostname is not types.UNSET: + action['hostname'] = hostname body = {'rebuild': action} response = self._action(session, body) @@ -183,84 +470,254 @@ def rebuild(self, session, name, admin_password, return self def resize(self, session, flavor): - """Resize server to flavor reference.""" + """Resize server to flavor reference. + + :param session: The session to use for making this request. + :param flavor: The server to resize to. + :returns: None + """ body = {'resize': {'flavorRef': flavor}} self._action(session, body) def confirm_resize(self, session): - """Confirm the resize of the server.""" + """Confirm the resize of the server. + + :param session: The session to use for making this request. + :returns: None + """ body = {'confirmResize': None} self._action(session, body) def revert_resize(self, session): - """Revert the resize of the server.""" + """Revert the resize of the server. + + :param session: The session to use for making this request. + :returns: None + """ body = {'revertResize': None} self._action(session, body) def create_image(self, session, name, metadata=None): - """Create image from server.""" + """Create image from server. + + :param session: The session to use for making this request. + :param name: The name to use for the created image. + :param metadata: Metadata to set on the created image. (Optional) + :returns: None + """ action = {'name': name} if metadata is not None: action['metadata'] = metadata body = {'createImage': action} - self._action(session, body) - def add_security_group(self, session, security_group): - body = {"addSecurityGroup": {"name": security_group}} + # You won't believe it - wait, who am I kidding - of course you will! + # Nova returns the URL of the image created in the Location + # header of the response. (what?) But, even better, the URL it responds + # with has a very good chance of being wrong (it is built from + # nova.conf values that point to internal API servers in any cloud + # large enough to have both public and internal endpoints. + # However, nobody has ever noticed this because novaclient doesn't + # actually use that URL - it extracts the id from the end of + # the url, then returns the id. This leads us to question: + # a) why Nova is going to return a value in a header + # b) why it's going to return data that probably broken + # c) indeed the very nature of the fabric of reality + # Although it fills us with existential dread, we have no choice but + # to follow suit like a lemming being forced over a cliff by evil + # producers from Disney. + microversion = None + if utils.supports_microversion(session, '2.45'): + microversion = '2.45' + response = self._action(session, body, microversion) + + try: + # There might be a body, there might not be + response_body = response.json() + except Exception: + response_body = None + if response_body and 'image_id' in response_body: + image_id = response_body['image_id'] + else: + image_id = response.headers['Location'].rsplit('/', 1)[1] + + return image_id + + def add_security_group(self, session, security_group_name): + """Add a security group to the server. + + :param session: The session to use for making this request. + :param security_group_name: The security group to add to the server. + :returns: None + """ + body = {"addSecurityGroup": {"name": security_group_name}} self._action(session, body) - def remove_security_group(self, session, security_group): - body = {"removeSecurityGroup": {"name": security_group}} + def remove_security_group(self, session, security_group_name): + """Remove a security group from the server. + + :param session: The session to use for making this request. + :param security_group_name: The security group to remove from the + server. + :returns: None + """ + body = {"removeSecurityGroup": {"name": security_group_name}} self._action(session, body) def reset_state(self, session, state): + """Reset server state. + + This is admin-only by default. + + :param session: The session to use for making this request. + :param state: The state to set on the server. + :returns: None + """ body = {"os-resetState": {"state": state}} self._action(session, body) def add_fixed_ip(self, session, network_id): + """Add a fixed IP to the server. + + This is effectively an alias for adding a network. + + :param session: The session to use for making this request. + :param network_id: The network to connect the server to. + :returns: None + """ body = {"addFixedIp": {"networkId": network_id}} self._action(session, body) def remove_fixed_ip(self, session, address): + """Remove a fixed IP from the server. + + This is effectively an alias from removing a port from the server. + + :param session: The session to use for making this request. + :param network_id: The address to remove from the server. + :returns: None + """ body = {"removeFixedIp": {"address": address}} self._action(session, body) def add_floating_ip(self, session, address, fixed_address=None): + """Add a floating IP to the server. + + :param session: The session to use for making this request. + :param address: The floating IP address to associate with the server. + :param fixed_address: A fixed IP address with which to associated the + floating IP. (Optional) + :returns: None + """ body = {"addFloatingIp": {"address": address}} if fixed_address is not None: body['addFloatingIp']['fixed_address'] = fixed_address self._action(session, body) def remove_floating_ip(self, session, address): + """Remove a floating IP from the server. + + :param session: The session to use for making this request. + :param address: The floating IP address to disassociate from the + server. + :returns: None + """ body = {"removeFloatingIp": {"address": address}} self._action(session, body) + def backup(self, session, name, backup_type, rotation): + """Create a backup of the server. + + :param session: The session to use for making this request. + :param name: The name to use for the backup image. + :param backup_type: The type of backup. The value and meaning of this + atribute is user-defined and can be used to separate backups of + different types. For example, this could be used to distinguish + between ``daily`` and ``weekly`` backups. + :param rotation: The number of backups to retain. All images older than + the rotation'th image will be deleted. + :returns: None + """ + body = { + "createBackup": { + "name": name, + "backup_type": backup_type, + "rotation": rotation, + } + } + self._action(session, body) + def pause(self, session): + """Pause the server. + + :param session: The session to use for making this request. + :returns: None + """ body = {"pause": None} self._action(session, body) def unpause(self, session): + """Unpause the server. + + :param session: The session to use for making this request. + :returns: None + """ body = {"unpause": None} self._action(session, body) def suspend(self, session): + """Suspend the server. + + :param session: The session to use for making this request. + :returns: None + """ body = {"suspend": None} self._action(session, body) def resume(self, session): + """Resume the server. + + :param session: The session to use for making this request. + :returns: None + """ body = {"resume": None} self._action(session, body) - def lock(self, session): - body = {"lock": None} + def lock(self, session, locked_reason=None): + """Lock the server. + + :param session: The session to use for making this request. + :param locked_reason: The reason for locking the server. + :returns: None + """ + body: dict[str, ty.Any] = {"lock": None} + if locked_reason is not None: + body["lock"] = { + "locked_reason": locked_reason, + } self._action(session, body) def unlock(self, session): + """Unlock the server. + + :param session: The session to use for making this request. + :returns: None + """ body = {"unlock": None} self._action(session, body) def rescue(self, session, admin_pass=None, image_ref=None): - body = {"rescue": {}} + """Rescue the server. + + This is admin-only by default. + + :param session: The session to use for making this request. + :param admin_pass: A new admin password to set on the rescued server. + (Optional) + :param image_ref: The image to use when rescuing the server. If not + provided, the server will use the existing image. (Optional) + :returns: None + """ + body: dict[str, ty.Any] = {"rescue": {}} if admin_pass is not None: body["rescue"]["adminPass"] = admin_pass if image_ref is not None: @@ -268,42 +725,330 @@ def rescue(self, session, admin_pass=None, image_ref=None): self._action(session, body) def unrescue(self, session): + """Unrescue the server. + + This is admin-only by default. + + :param session: The session to use for making this request. + :returns: None + """ body = {"unrescue": None} self._action(session, body) - def evacuate(self, session, host=None, admin_pass=None, force=None): - body = {"evacuate": {}} + def evacuate( + self, + session, + host=None, + admin_pass=None, + force=None, + on_shared_storage=None, + ): + """Evacuate the server. + + :param session: The session to use for making this request. + :param host: The host to evacuate the instance to. (Optional) + :param admin_pass: The admin password to set on the evacuated instance. + (Optional) + :param force: Whether to force evacuation. + :param on_shared_storage: Whether the host is using shared storage. + (Optional) (Only supported before microversion 2.14) + :returns: None + """ + body: dict[str, ty.Any] = {"evacuate": {}} if host is not None: body["evacuate"]["host"] = host if admin_pass is not None: body["evacuate"]["adminPass"] = admin_pass if force is not None: body["evacuate"]["force"] = force + if on_shared_storage is not None: + body["evacuate"]["onSharedStorage"] = on_shared_storage self._action(session, body) def start(self, session): + """Start the server. + + :param session: The session to use for making this request. + :returns: None + """ body = {"os-start": None} self._action(session, body) def stop(self, session): + """Stop the server. + + :param session: The session to use for making this request. + :returns: None + """ body = {"os-stop": None} self._action(session, body) + def restore(self, session): + """Restore the server. + + This is only supported if the server is soft-deleted. This is + cloud-specific. + + :param session: The session to use for making this request. + :returns: None + """ + body = {"restore": None} + self._action(session, body) + def shelve(self, session): + """Shelve the server. + + :param session: The session to use for making this request. + :returns: None + """ body = {"shelve": None} self._action(session, body) - def unshelve(self, session): - body = {"unshelve": None} + def shelve_offload(self, session): + """Shelve-offload the server. + + :param session: The session to use for making this request. + :returns: None + """ + body = {"shelveOffload": None} self._action(session, body) + def unshelve(self, session, availability_zone=types.UNSET, host=None): + """Unshelve the server. -class ServerDetail(Server): - base_path = '/servers/detail' + :param session: The session to use for making this request. + :param availability_zone: If specified the instance will be unshelved + to the availability_zone. + If None is passed the instance defined availability_zone is unpin + and the instance will be scheduled to any availability_zone (free + scheduling). + If not specified the instance will be unshelved to either its + defined availability_zone or any availability_zone + (free scheduling). + :param host: If specified the host to unshelve the instance. + """ + data = {} + if host: + data["host"] = host + if availability_zone is None or isinstance(availability_zone, str): + data["availability_zone"] = availability_zone + body = {'unshelve': data or None} + self._action(session, body) - # capabilities - allow_create = False - allow_get = False - allow_update = False - allow_delete = False - allow_list = True + def migrate(self, session, *, host=None): + """Migrate the server. + + :param session: The session to use for making this request. + :param host: The host to migrate the server to. (Optional) + :returns: None + """ + if host and not utils.supports_microversion(session, '2.56'): + raise ValueError( + "The 'host' option is only supported on microversion 2.56 or " + "greater." + ) + + body: dict[str, ty.Any] = {"migrate": None} + if host: + body["migrate"] = {"host": host} + self._action(session, body) + + def trigger_crash_dump(self, session): + """Trigger a crash dump for the server. + + :param session: The session to use for making this request. + :returns: None + """ + body = {"trigger_crash_dump": None} + self._action(session, body) + + def get_console_output(self, session, length=None): + """Get console output for the server. + + :param session: The session to use for making this request. + :param length: The max length of the console output to return. + (Optional) + :returns: None + """ + body: dict[str, ty.Any] = {"os-getConsoleOutput": {}} + if length is not None: + body["os-getConsoleOutput"]["length"] = length + resp = self._action(session, body) + return resp.json() + + def get_console_url(self, session, console_type): + """Get the console URL for the server. + + :param session: The session to use for making this request. + :param console_type: The type of console to return. This is + cloud-specific. One of: ``novnc``, ``xvpvnc``, ``spice-html5``, + ``spice-direct`` (after Nova microversion 2.99), ``rdp-html5``, + or ``serial``. + :returns: None + """ + action = CONSOLE_TYPE_ACTION_MAPPING.get(console_type) + if not action: + raise ValueError(f"Unsupported console type {console_type}") + body = {action: {'type': console_type}} + resp = self._action(session, body) + return resp.json().get('console') + + def live_migrate( + self, + session, + host, + force, + block_migration, + disk_over_commit=False, + ): + """Live migrate the server. + + :param session: The session to use for making this request. + :param host: The host to live migrate the server to. (Optional) + :param force: Whether to force the migration. (Optional) + :param block_migration: Whether to do block migration. One of: + ``True``, ``False``, ``'auto'``. (Optional) + :param disk_over_commit: Whether to allow disk over-commit on the + destination host. (Optional) + :returns: None + """ + if utils.supports_microversion(session, '2.30'): + return self._live_migrate_30( + session, + host, + force=force, + block_migration=block_migration, + ) + elif utils.supports_microversion(session, '2.25'): + return self._live_migrate_25( + session, + host, + force=force, + block_migration=block_migration, + ) + else: + return self._live_migrate( + session, + host, + force=force, + block_migration=block_migration, + disk_over_commit=disk_over_commit, + ) + + def _live_migrate_30(self, session, host, force, block_migration): + microversion = '2.30' + body = {'host': None} + if block_migration is None: + block_migration = 'auto' + body['block_migration'] = block_migration + if host: + body['host'] = host + if force: + body['force'] = force + self._action( + session, + {'os-migrateLive': body}, + microversion=microversion, + ) + + def _live_migrate_25(self, session, host, force, block_migration): + microversion = '2.25' + body = {'host': None} + if block_migration is None: + block_migration = 'auto' + body['block_migration'] = block_migration + if host: + body['host'] = host + if not force: + raise ValueError( + "Live migration on this cloud implies 'force' " + "if the 'host' option has been given and it is not " + "possible to disable. It is recommended to not use 'host' " + "at all on this cloud as it is inherently unsafe, but if " + "it is unavoidable, please supply 'force=True' so that it " + "is clear you understand the risks." + ) + self._action( + session, + {'os-migrateLive': body}, + microversion=microversion, + ) + + def _live_migrate( + self, + session, + host, + force, + block_migration, + disk_over_commit, + ): + microversion = None + body: dict[str, ty.Any] = { + 'host': None, + } + if block_migration == 'auto': + raise ValueError( + "Live migration on this cloud does not support 'auto' as " + "a parameter to block_migration, but only True and False." + ) + body['block_migration'] = block_migration or False + body['disk_over_commit'] = disk_over_commit or False + if host: + body['host'] = host + if not force: + raise ValueError( + "Live migration on this cloud implies 'force' " + "if the 'host' option has been given and it is not " + "possible to disable. It is recommended to not use 'host' " + "at all on this cloud as it is inherently unsafe, but if " + "it is unavoidable, please supply 'force=True' so that it " + "is clear you understand the risks." + ) + self._action( + session, + {'os-migrateLive': body}, + microversion=microversion, + ) + + def fetch_topology(self, session): + """Fetch the topology information for the server. + + :param session: The session to use for making this request. + :returns: None + """ + utils.require_microversion(session, '2.78') + + url = utils.urljoin(Server.base_path, self.id, 'topology') + + response = session.get(url) + + exceptions.raise_from_response(response) + + try: + return response.json() + except ValueError: + pass + + def fetch_security_groups(self, session): + """Fetch security groups of the server. + + :param session: The session to use for making this request. + :returns: Updated Server instance. + """ + url = utils.urljoin(Server.base_path, self.id, 'os-security-groups') + + response = session.get(url) + + exceptions.raise_from_response(response) + + try: + data = response.json() + if 'security_groups' in data: + self.security_groups = data['security_groups'] + except ValueError: + pass + + return self + + +ServerDetail = Server diff --git a/openstack/compute/v2/server_action.py b/openstack/compute/v2/server_action.py new file mode 100644 index 0000000000..07e45d4d79 --- /dev/null +++ b/openstack/compute/v2/server_action.py @@ -0,0 +1,87 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ServerActionEvent(resource.Resource): + # Added the 'details' field in 2.84 + _max_microversion = '2.84' + + #: The name of the event + event = resource.Body('event') + #: The date and time when the event was started. The date and time stamp + #: format is ISO 8601 + start_time = resource.Body('start_time') + #: The date and time when the event finished. The date and time stamp + #: format is ISO 8601 + finish_time = resource.Body('finish_time') + #: The result of the event + result = resource.Body('result') + #: The traceback stack if an error occurred in this event. + #: This is only visible to cloud admins by default. + traceback = resource.Body('traceback') + #: The name of the host on which the event occurred. + #: This is only visible to cloud admins by default. + host = resource.Body('host') + #: An obfuscated hashed host ID string, or the empty string if there is no + #: host for the event. This is a hashed value so will not actually look + #: like a hostname, and is hashed with data from the project_id, so the + #: same physical host as seen by two different project_ids will be + #: different. This is useful when within the same project you need to + #: determine if two events occurred on the same or different physical + #: hosts. + host_id = resource.Body('hostId') + #: Details of the event. May be unset. + details = resource.Body('details') + + +class ServerAction(resource.Resource): + resource_key = 'instanceAction' + resources_key = 'instanceActions' + base_path = '/servers/%(server_id)s/os-instance-actions' + + # capabilities + allow_fetch = True + allow_list = True + + # Properties + + #: The ID of the server that this action relates to. + server_id = resource.URI('server_id') + + #: The name of the action. + action = resource.Body('action') + # FIXME(stephenfin): This conflicts since there is a server ID in the URI + # *and* in the body. We need a field that handles both or we need to use + # different names. + # #: The ID of the server that this action relates to. + # server_id = resource.Body('instance_uuid') + #: The ID of the request that this action related to. + request_id = resource.Body('request_id', alternate_id=True) + #: The ID of the user which initiated the server action. + user_id = resource.Body('user_id') + #: The ID of the project that this server belongs to. + project_id = resource.Body('project_id') + start_time = resource.Body('start_time') + #: The related error message for when an action fails. + message = resource.Body('message') + #: Events + events = resource.Body('events', type=list, list_type=ServerActionEvent) + + # events.details field added in 2.84 + _max_microversion = '2.84' + + _query_mapping = resource.QueryParameters( + changes_since="changes-since", + changes_before="changes-before", + ) diff --git a/openstack/compute/v2/server_diagnostics.py b/openstack/compute/v2/server_diagnostics.py new file mode 100644 index 0000000000..1208639eda --- /dev/null +++ b/openstack/compute/v2/server_diagnostics.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ServerDiagnostics(resource.Resource): + resource_key = 'diagnostics' + base_path = '/servers/%(server_id)s/diagnostics' + + # capabilities + allow_fetch = True + + requires_id = False + + _max_microversion = '2.48' + + #: Indicates whether or not a config drive was used for this server. + has_config_drive = resource.Body('config_drive') + #: The current state of the VM. + state = resource.Body('state') + #: The driver on which the VM is running. + driver = resource.Body('driver') + #: The hypervisor on which the VM is running. + hypervisor = resource.Body('hypervisor') + #: The hypervisor OS. + hypervisor_os = resource.Body('hypervisor_os') + #: The amount of time in seconds that the VM has been running. + uptime = resource.Body('uptime') + #: The number of vCPUs. + num_cpus = resource.Body('num_cpus') + #: The number of disks. + num_disks = resource.Body('num_disks') + #: The number of vNICs. + num_nics = resource.Body('num_nics') + #: The dictionary with information about VM memory usage. + memory_details = resource.Body('memory_details') + #: The list of dictionaries with detailed information about VM CPUs. + cpu_details = resource.Body('cpu_details') + #: The list of dictionaries with detailed information about VM disks. + disk_details = resource.Body('disk_details') + #: The list of dictionaries with detailed information about VM NICs. + nic_details = resource.Body('nic_details') + #: The ID for the server. + server_id = resource.URI('server_id') diff --git a/openstack/compute/v2/server_group.py b/openstack/compute/v2/server_group.py index f57c8e58aa..597453a4d5 100644 --- a/openstack/compute/v2/server_group.py +++ b/openstack/compute/v2/server_group.py @@ -10,30 +10,130 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.compute import compute_service -from openstack import resource2 +from openstack import exceptions +from openstack import resource +from openstack import utils -class ServerGroup(resource2.Resource): +class ServerGroup(resource.Resource): resource_key = 'server_group' resources_key = 'server_groups' base_path = '/os-server-groups' - service = compute_service.ComputeService() - _query_mapping = resource2.QueryParameters("all_projects") + _query_mapping = resource.QueryParameters("all_projects") + + _max_microversion = '2.64' # capabilities allow_create = True - allow_get = True + allow_fetch = True allow_delete = True allow_list = True # Properties #: A name identifying the server group - name = resource2.Body('name') - #: The list of policies supported by the server group - policies = resource2.Body('policies') + name = resource.Body('name') + #: The list of policies supported by the server group (till 2.63) + policies = resource.Body('policies') + #: The policy field represents the name of the policy (from 2.64) + policy = resource.Body('policy') #: The list of members in the server group - member_ids = resource2.Body('members') - #: The metadata associated with the server group - metadata = resource2.Body('metadata') + member_ids = resource.Body('members') + #: The metadata associated with the server group. This is always empty and + #: only used for preserving compatibility. + metadata = resource.Body('metadata') + #: The project ID who owns the server group. + project_id = resource.Body('project_id') + #: The rules field, which is a dict, can be applied to the policy. + #: Currently, only the max_server_per_host rule is supported for the + #: anti-affinity policy. The max_server_per_host rule allows specifying how + #: many members of the anti-affinity group can reside on the same compute + #: host. If not specified, only one member from the same anti-affinity + #: group can reside on a given host. + rules = resource.Body('rules', type=dict) + #: The user ID who owns the server group + user_id = resource.Body('user_id') + + # TODO(stephenfin): It would be nice to have a hookpoint to do this + # microversion-based request manipulation, but we don't have anything like + # that right now + def create(self, session, prepend_key=True, base_path=None, **params): + """Create a remote resource based on this instance. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param prepend_key: A boolean indicating whether the resource_key + should be prepended in a resource creation request. Default to + True. + :param str base_path: Base part of the URI for creating resources, if + different from :data:`~openstack.resource.Resource.base_path`. + :param dict params: Additional params to pass. + :return: This :class:`Resource` instance. + :raises: :exc:`~openstack.exceptions.MethodNotSupported` if + :data:`Resource.allow_create` is not set to ``True``. + """ + if not self.allow_create: + raise exceptions.MethodNotSupported(self, 'create') + + session = self._get_session(session) + microversion = self._get_microversion(session) + requires_id = ( + self.create_requires_id + if self.create_requires_id is not None + else self.create_method == 'PUT' + ) + + if self.create_exclude_id_from_body: + self._body._dirty.discard("id") + + # `policy` and `rules` are added with mv=2.64. In it also + # `policies` are removed. + if utils.supports_microversion(session, '2.64'): + if self.policies: + if not self.policy and isinstance(self.policies, list): + self.policy = self.policies[0] + self._body.clean(only={'policies'}) + microversion = self._max_microversion + else: # microversion < 2.64 + if self.rules: + msg = ( + "API version 2.64 is required to set rules, but " + "it is not available." + ) + raise exceptions.NotSupported(msg) + + if self.policy: + if not self.policies: + self.policies = [self.policy] + self._body.clean(only={'policy'}) + + if self.create_method == 'POST': + request = self._prepare_request( + requires_id=requires_id, + prepend_key=prepend_key, + base_path=base_path, + ) + response = session.post( + request.url, + json=request.body, + headers=request.headers, + microversion=microversion, + params=params, + ) + else: + raise exceptions.ResourceFailure( + f"Invalid create method: {self.create_method}" + ) + + has_body = ( + self.has_body + if self.create_returns_body is None + else self.create_returns_body + ) + self.microversion = microversion + self._translate_response(response, has_body=has_body) + # direct comparision to False since we need to rule out None + if self.has_body and self.create_returns_body is False: + # fetch the body if it's required but not returned by create + return self.fetch(session) + return self diff --git a/openstack/compute/v2/server_interface.py b/openstack/compute/v2/server_interface.py index 951f6e422f..52d3bffa83 100644 --- a/openstack/compute/v2/server_interface.py +++ b/openstack/compute/v2/server_interface.py @@ -10,32 +10,34 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.compute import compute_service -from openstack import resource2 +from openstack import resource -class ServerInterface(resource2.Resource): +class ServerInterface(resource.Resource): resource_key = 'interfaceAttachment' resources_key = 'interfaceAttachments' base_path = '/servers/%(server_id)s/os-interface' - service = compute_service.ComputeService() # capabilities allow_create = True - allow_get = True - allow_update = False + allow_fetch = True + allow_commit = False allow_delete = True allow_list = True #: Fixed IP addresses with subnet IDs. - fixed_ips = resource2.Body('fixed_ips') + fixed_ips = resource.Body('fixed_ips') #: The MAC address. - mac_addr = resource2.Body('mac_addr') + mac_addr = resource.Body('mac_addr') #: The network ID. - net_id = resource2.Body('net_id') + net_id = resource.Body('net_id') #: The ID of the port for which you want to create an interface. - port_id = resource2.Body('port_id', alternate_id=True) + port_id = resource.Body('port_id', alternate_id=True) #: The port state. - port_state = resource2.Body('port_state') + port_state = resource.Body('port_state') #: The ID for the server. - server_id = resource2.URI('server_id') + server_id = resource.URI('server_id') + #: Tags for the virtual interfaces. + tag = resource.Body('tag') + # tag introduced in 2.70 + _max_microversion = '2.70' diff --git a/openstack/compute/v2/server_ip.py b/openstack/compute/v2/server_ip.py index bc90d645de..519a48fc63 100644 --- a/openstack/compute/v2/server_ip.py +++ b/openstack/compute/v2/server_ip.py @@ -10,38 +10,56 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.compute import compute_service -from openstack import resource2 +import typing as ty + +from keystoneauth1 import adapter +import typing_extensions as ty_ext + +from openstack import resource from openstack import utils -class ServerIP(resource2.Resource): +class ServerIP(resource.Resource): resources_key = 'addresses' base_path = '/servers/%(server_id)s/ips' - service = compute_service.ComputeService() # capabilities allow_list = True # Properties #: The IP address. The format of the address depends on :attr:`version` - address = resource2.Body('addr') + address = resource.Body('addr') #: The network label, such as public or private. - network_label = resource2.URI('network_label') + network_label = resource.URI('network_label') #: The ID for the server. - server_id = resource2.URI('server_id') + server_id = resource.URI('server_id') # Version of the IP protocol. Currently either 4 or 6. - version = resource2.Body('version') + version = resource.Body('version') @classmethod - def list(cls, session, paginated=False, server_id=None, - network_label=None, **params): - url = cls.base_path % {"server_id": server_id} + def list( + cls, + session: adapter.Adapter, + paginated: bool = False, + base_path: str | None = None, + allow_unknown_params: bool = False, + *, + microversion: str | None = None, + headers: dict[str, str] | None = None, + max_items: int | None = None, + server_id: str | None = None, + network_label: str | None = None, + **params: ty.Any, + ) -> ty.Generator[ty_ext.Self, None, None]: + if base_path is None: + base_path = cls.base_path + + url = base_path % {"server_id": server_id} if network_label is not None: url = utils.urljoin(url, network_label) - resp = session.get(url, endpoint_filter=cls.service) + resp = session.get(url) resp = resp.json() if network_label is None: @@ -49,6 +67,8 @@ def list(cls, session, paginated=False, server_id=None, for label, addresses in resp.items(): for address in addresses: - yield cls.existing(network_label=label, - address=address["addr"], - version=address["version"]) + yield cls.existing( + network_label=label, + address=address["addr"], + version=address["version"], + ) diff --git a/openstack/compute/v2/server_migration.py b/openstack/compute/v2/server_migration.py new file mode 100644 index 0000000000..fbc6a2ccce --- /dev/null +++ b/openstack/compute/v2/server_migration.py @@ -0,0 +1,91 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class ServerMigration(resource.Resource): + resource_key = 'migration' + resources_key = 'migrations' + base_path = '/servers/%(server_id)s/migrations' + + # capabilities + allow_fetch = True + allow_list = True + allow_delete = True + + #: The ID for the server from the URI of the resource + server_id = resource.URI('server_id') + + #: The date and time when the resource was created. + created_at = resource.Body('created_at') + #: The target host of the migration. + dest_host = resource.Body('dest_host') + #: The target compute of the migration. + dest_compute = resource.Body('dest_compute') + #: The target node of the migration. + dest_node = resource.Body('dest_node') + #: The amount of disk, in bytes, that has been processed during the + #: migration. + disk_processed_bytes = resource.Body('disk_processed_bytes') + #: The amount of disk, in bytes, that still needs to be migrated. + disk_remaining_bytes = resource.Body('disk_remaining_bytes') + #: The total amount of disk, in bytes, that needs to be migrated. + disk_total_bytes = resource.Body('disk_total_bytes') + #: The amount of memory, in bytes, that has been processed during the + #: migration. + memory_processed_bytes = resource.Body('memory_processed_bytes') + #: The amount of memory, in bytes, that still needs to be migrated. + memory_remaining_bytes = resource.Body('memory_remaining_bytes') + #: The total amount of memory, in bytes, that needs to be migrated. + memory_total_bytes = resource.Body('memory_total_bytes') + #: The ID of the project that initiated the server migration (since + #: microversion 2.80) + project_id = resource.Body('project_id') + #: The UUID of the server from the response body + server_uuid = resource.Body('server_uuid') + #: The source compute of the migration. + source_compute = resource.Body('source_compute') + #: The source node of the migration. + source_node = resource.Body('source_node') + #: The current status of the migration. + status = resource.Body('status') + #: The date and time when the resource was last updated. + updated_at = resource.Body('updated_at') + #: The ID of the user that initiated the server migration (since + #: microversion 2.80) + user_id = resource.Body('user_id') + #: The UUID of the migration (since microversion 2.59) + uuid = resource.Body('uuid', alternate_id=True) + + _max_microversion = '2.80' + + def _action(self, session, body): + """Preform server migration actions given the message body.""" + session = self._get_session(session) + microversion = self._get_microversion(session) + + url = utils.urljoin( + self.base_path % {'server_id': self.server_id}, + self.id, + 'action', + ) + response = session.post(url, microversion=microversion, json=body) + exceptions.raise_from_response(response) + return response + + def force_complete(self, session): + """Force on-going live migration to complete.""" + body = {'force_complete': None} + self._action(session, body) diff --git a/openstack/compute/v2/server_remote_console.py b/openstack/compute/v2/server_remote_console.py new file mode 100644 index 0000000000..7d62ea6dd9 --- /dev/null +++ b/openstack/compute/v2/server_remote_console.py @@ -0,0 +1,68 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource +from openstack import utils + +CONSOLE_TYPE_PROTOCOL_MAPPING = { + 'novnc': 'vnc', + 'xvpvnc': 'vnc', + 'spice-html5': 'spice', + 'spice-direct': 'spice', + 'rdp-html5': 'rdp', + 'serial': 'serial', + 'webmks': 'mks', +} + + +class ServerRemoteConsole(resource.Resource): + resource_key = 'remote_console' + base_path = '/servers/%(server_id)s/remote-consoles' + + # capabilities + allow_create = True + allow_fetch = False + allow_commit = False + allow_delete = False + allow_list = False + + _max_microversion = '2.99' + + #: Protocol of the remote console. + protocol = resource.Body('protocol') + #: Type of the remote console. + type = resource.Body('type') + #: URL used to connect to the console. + url = resource.Body('url') + #: The ID for the server. + server_id = resource.URI('server_id') + + def create(self, session, prepend_key=True, base_path=None, **params): + if not self.protocol: + self.protocol = CONSOLE_TYPE_PROTOCOL_MAPPING.get(self.type) + if ( + not utils.supports_microversion(session, '2.8') + and self.type == 'webmks' + ): + raise ValueError( + 'Console type webmks is not supported on server side' + ) + if ( + not utils.supports_microversion(session, '2.99') + and self.type == 'spice-direct' + ): + raise ValueError( + 'Console type spice-direct is not supported on server side' + ) + return super().create( + session, prepend_key=prepend_key, base_path=base_path, **params + ) diff --git a/openstack/compute/v2/service.py b/openstack/compute/v2/service.py index 9a48a3921b..68b0ee5bc0 100644 --- a/openstack/compute/v2/service.py +++ b/openstack/compute/v2/service.py @@ -10,53 +10,192 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.compute import compute_service -from openstack import resource2 +import typing as ty + +from keystoneauth1 import adapter +import typing_extensions as ty_ext + +from openstack import exceptions +from openstack import resource from openstack import utils -class Service(resource2.Resource): +class Service(resource.Resource): resource_key = 'service' resources_key = 'services' base_path = '/os-services' - service = compute_service.ComputeService() - # capabilities - allow_get = True allow_list = True - allow_update = True + allow_commit = True + allow_delete = True + + _query_mapping = resource.QueryParameters( + 'name', + 'binary', + 'host', + name='binary', + ) # Properties - #: Status of service - status = resource2.Body('status') - #: State of service - state = resource2.Body('state') - #: Name of service - binary = resource2.Body('binary') - #: Id of service - id = resource2.Body('id') - #: Disabled reason of service - disables_reason = resource2.Body('disabled_reason') - #: Host where service runs - host = resource2.Body('host') #: The availability zone of service - zone = resource2.Body("zone") + availability_zone = resource.Body("zone") + #: Binary name of service + binary = resource.Body('binary') + #: Disabled reason of service + disabled_reason = resource.Body('disabled_reason') + #: Whether or not this service was forced down manually by an administrator + #: after the service was fenced + is_forced_down = resource.Body('forced_down', type=bool) + #: The name of the host where service runs + host = resource.Body('host') + #: Service name + name = resource.Body('name', alias='binary') + #: State of service + state = resource.Body('state') + #: Status of service + status = resource.Body('status') + #: The date and time when the resource was updated + updated_at = resource.Body('updated_at') - def _action(self, session, action, body): - url = utils.urljoin(Service.base_path, action) - return session.put(url, endpoint_filter=self.service, json=body) + _max_microversion = '2.69' + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[True] = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... - def force_down(self, session, host, binary): - """Force a service down.""" + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[False], + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self: ... + # excuse the duplication here: it's mypy's fault + # https://github.com/python/mypy/issues/14764 + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: + # No direct request possible, thus go directly to list + if list_base_path: + params['base_path'] = list_base_path + + # all_projects is a special case that is used by multiple services. We + # handle it here since it doesn't make sense to pass it to the .fetch + # call above + if all_projects is not None: + params['all_projects'] = all_projects + + data = cls.list( + session, + base_path=list_base_path, + ) + + result = None + for maybe_result in data: + # Since ID might be both int and str force cast + id_value = str(cls._get_id(maybe_result)) + name_value = maybe_result.name + + if str(name_or_id) in (id_value, name_value): + if 'host' in params and maybe_result['host'] != params['host']: + continue + # Only allow one resource to be found. If we already + # found a match, raise an exception to show it. + if result is None: + result = maybe_result + else: + msg = "More than one %s exists with the name '%s'." + msg = msg % (cls.__name__, name_or_id) + raise exceptions.DuplicateResource(msg) + + if result is not None: + return result + + if ignore_missing: + return None + raise exceptions.NotFoundException( + f"No {cls.__name__} found for {name_or_id}" + ) + + def commit(self, session, prepend_key=False, *args, **kwargs): + # we need to set prepend_key to false + return super().commit(session, prepend_key, *args, **kwargs) + + def _action(self, session, action, body, microversion=None): + if not microversion: + microversion = session.default_microversion + url = utils.urljoin(Service.base_path, action) + response = session.put(url, json=body, microversion=microversion) + self._translate_response(response) + return self + + def set_forced_down(self, session, host=None, binary=None, forced=False): + """Update forced_down information of a service.""" + microversion = session.default_microversion + body = {} + if not host: + host = self.host + if not binary: + binary = self.binary body = { 'host': host, 'binary': binary, - 'forced_down': True, } + if utils.supports_microversion(session, '2.11'): + body['forced_down'] = forced + # Using forced_down works only 2.11-2.52, therefore pin it + microversion = '2.11' + + # This will not work with newest microversions + return self._action( + session, + 'force-down', + body, + microversion=microversion, + ) - return self._action(session, 'force-down', body) + force_down = set_forced_down def enable(self, session, host, binary): """Enable service.""" diff --git a/openstack/compute/v2/usage.py b/openstack/compute/v2/usage.py new file mode 100644 index 0000000000..976a65012c --- /dev/null +++ b/openstack/compute/v2/usage.py @@ -0,0 +1,104 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ServerUsage(resource.Resource): + resource_key = None + resources_key = None + + # Capabilities + allow_create = False + allow_fetch = False + allow_delete = False + allow_list = False + allow_commit = False + + # Properties + #: The duration that the server exists (in hours). + hours = resource.Body('hours') + #: The display name of a flavor. + flavor = resource.Body('flavor') + #: The UUID of the server. + instance_id = resource.Body('instance_id') + #: The server name. + name = resource.Body('name') + #: The UUID of the project in a multi-tenancy cloud. + project_id = resource.Body('tenant_id') + #: The memory size of the server (in MiB). + memory_mb = resource.Body('memory_mb') + #: The sum of the root disk size of the server and the ephemeral disk size + #: of it (in GiB). + local_gb = resource.Body('local_gb') + #: The number of virtual CPUs that the server uses. + vcpus = resource.Body('vcpus') + #: The date and time when the server was launched. + started_at = resource.Body('started_at') + #: The date and time when the server was deleted. + ended_at = resource.Body('ended_at') + #: The VM state. + state = resource.Body('state') + #: The uptime of the server. + uptime = resource.Body('uptime') + + +class Usage(resource.Resource): + resource_key = 'tenant_usage' + resources_key = 'tenant_usages' + base_path = '/os-simple-tenant-usage' + + # Capabilities + allow_create = False + allow_fetch = True + allow_delete = False + allow_list = True + allow_commit = False + + # TODO(stephenfin): Add 'start', 'end'. These conflict with the body + # responses though. + _query_mapping = resource.QueryParameters( + "detailed", + "limit", + "marker", + "start", + "end", + ) + + # Properties + #: The UUID of the project in a multi-tenancy cloud. + project_id = resource.Body('tenant_id') + #: A list of the server usage objects. + server_usages = resource.Body( + 'server_usages', + type=list, + list_type=ServerUsage, + ) + #: Multiplying the server disk size (in GiB) by hours the server exists, + #: and then adding that all together for each server. + total_local_gb_usage = resource.Body('total_local_gb_usage') + #: Multiplying the number of virtual CPUs of the server by hours the server + #: exists, and then adding that all together for each server. + total_vcpus_usage = resource.Body('total_vcpus_usage') + #: Multiplying the server memory size (in MiB) by hours the server exists, + #: and then adding that all together for each server. + total_memory_mb_usage = resource.Body('total_memory_mb_usage') + #: The total duration that servers exist (in hours). + total_hours = resource.Body('total_hours') + #: The beginning time to calculate usage statistics on compute and storage + #: resources. + start = resource.Body('start') + #: The ending time to calculate usage statistics on compute and storage + #: resources. + stop = resource.Body('stop') + + _max_microversion = '2.75' diff --git a/openstack/compute/v2/volume_attachment.py b/openstack/compute/v2/volume_attachment.py new file mode 100644 index 0000000000..322c553f7d --- /dev/null +++ b/openstack/compute/v2/volume_attachment.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class VolumeAttachment(resource.Resource): + resource_key = 'volumeAttachment' + resources_key = 'volumeAttachments' + base_path = '/servers/%(server_id)s/os-volume_attachments' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters("limit", "offset") + + #: The ID for the server. + server_id = resource.URI('server_id') + #: Name of the device such as, /dev/vdb. + device = resource.Body('device') + #: The ID of the attachment. + id = resource.Body('id') + # FIXME(stephenfin): This conflicts since there is a server ID in the URI + # *and* in the body. We need a field that handles both or we need to use + # different names. + # #: The UUID of the server + # server_id = resource.Body('server_uuid') + #: The ID of the attached volume. + volume_id = resource.Body('volumeId', alternate_id=True) + #: The UUID of the associated volume attachment in Cinder. + attachment_id = resource.Body('attachment_id') + #: The ID of the block device mapping record for the attachment + bdm_id = resource.Body('bdm_uuid') + #: Virtual device tags for the attachment. + tag = resource.Body('tag') + #: Indicates whether to delete the volume when server is destroyed + delete_on_termination = resource.Body('delete_on_termination') + # attachment_id (in responses) and bdm_id introduced in 2.89 + _max_microversion = '2.89' diff --git a/openstack/compute/version.py b/openstack/compute/version.py index 23a2398f51..f78d3addd9 100644 --- a/openstack/compute/version.py +++ b/openstack/compute/version.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.compute import compute_service from openstack import resource @@ -18,14 +17,11 @@ class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' - service = compute_service.ComputeService( - version=compute_service.ComputeService.UNVERSIONED - ) # capabilities allow_list = True # Properties - links = resource.prop('links') - status = resource.prop('status') - updated = resource.prop('updated') + links = resource.Body('links') + status = resource.Body('status') + updated = resource.Body('updated') diff --git a/openstack/config/__init__.py b/openstack/config/__init__.py new file mode 100644 index 0000000000..30cf605f09 --- /dev/null +++ b/openstack/config/__init__.py @@ -0,0 +1,69 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse +import sys +import typing as ty + +from openstack.config.loader import OpenStackConfig + +if ty.TYPE_CHECKING: + from openstack.config import cloud_region + + +__all__ = [ + 'OpenStackConfig', + 'cloud_region', + 'get_cloud_region', +] + + +# TODO(stephenfin): Expand kwargs once we've typed OpenstackConfig.get_one +def get_cloud_region( + service_key: str | None = None, + options: argparse.ArgumentParser | None = None, + app_name: str | None = None, + app_version: str | None = None, + load_yaml_config: bool = True, + load_envvars: bool = True, + **kwargs: ty.Any, +) -> 'cloud_region.CloudRegion': + """Retrieve a single CloudRegion and merge additional options + + :param service_key: Service this argparse should be specialized for, if + known. This will be used as the default value for service_type. + :param options: Parser to attach additional options to + :param app_name: Name of the application to be added to User Agent. + :param app_version: Version of the application to be added to User Agent. + :param load_yaml_config: Whether to load configuration from clouds.yaml and + related configuration files. + :param load_envvars: Whether to load configuration from environment + variables + :returns: A populated + :class:`~openstack.config.cloud.cloud_region.CloudRegion` object. + """ + config = OpenStackConfig( + load_yaml_config=load_yaml_config, + load_envvars=load_envvars, + app_name=app_name, + app_version=app_version, + ) + if options: + service_keys = [service_key] if service_key is not None else [] + config.register_argparse_arguments(options, sys.argv, service_keys) + parsed_options, _ = options.parse_known_args(sys.argv) + else: + parsed_options = None + + return config.get_one(argparse=parsed_options, **kwargs) diff --git a/openstack/config/_util.py b/openstack/config/_util.py new file mode 100644 index 0000000000..3f6e8a9a5f --- /dev/null +++ b/openstack/config/_util.py @@ -0,0 +1,65 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + + +def normalize_keys(config: dict[str, ty.Any]) -> dict[str, ty.Any]: + new_config: dict[str, ty.Any] = {} + for key, value in config.items(): + key = key.replace('-', '_') + if isinstance(value, dict): + new_config[key] = normalize_keys(value) + elif isinstance(value, bool): + new_config[key] = value + elif isinstance(value, int) and key not in ( + 'verbose_level', + 'api_timeout', + ): + new_config[key] = str(value) + elif isinstance(value, float): + new_config[key] = str(value) + else: + new_config[key] = value + return new_config + + +def merge_clouds( + old_dict: dict[str, ty.Any], new_dict: dict[str, ty.Any] +) -> dict[str, ty.Any]: + """Like dict.update, except handling nested dicts.""" + ret = old_dict.copy() + for k, v in new_dict.items(): + if isinstance(v, dict): + if k in ret: + ret[k] = merge_clouds(ret[k], v) + else: + ret[k] = v.copy() + else: + ret[k] = v + return ret + + +class VersionRequest: + def __init__( + self, + version: str | None = None, + min_api_version: str | None = None, + max_api_version: str | None = None, + default_microversion: str | None = None, + ) -> None: + self.version = version + self.min_api_version = min_api_version + self.max_api_version = max_api_version + self.default_microversion = default_microversion diff --git a/openstack/config/cloud_config.py b/openstack/config/cloud_config.py new file mode 100644 index 0000000000..3c695a8f56 --- /dev/null +++ b/openstack/config/cloud_config.py @@ -0,0 +1,95 @@ +# Copyright (c) 2018 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty +import warnings +# TODO(mordred) This is only here to ease the OSC transition + +from openstack.config import cloud_region +from openstack import warnings as os_warnings + +if ty.TYPE_CHECKING: + from keystoneauth1 import discover + from keystoneauth1 import plugin + from keystoneauth1 import session as ks_session + import prometheus_client + + from openstack.config import loader + + +class CloudConfig(cloud_region.CloudRegion): + def __init__( + self, + name: str | None, + region: str | None, + config: dict[str, ty.Any] | None, + force_ipv4: bool = False, + auth_plugin: ty.Optional['plugin.BaseAuthPlugin'] = None, + openstack_config: ty.Optional['loader.OpenStackConfig'] = None, + session_constructor: type['ks_session.Session'] | None = None, + app_name: str | None = None, + app_version: str | None = None, + session: ty.Optional['ks_session.Session'] = None, + discovery_cache: dict[str, 'discover.Discover'] | None = None, + extra_config: dict[str, ty.Any] | None = None, + cache_expiration_time: int = 0, + cache_expirations: dict[str, int] | None = None, + cache_path: str | None = None, + cache_class: str = 'dogpile.cache.null', + cache_arguments: dict[str, ty.Any] | None = None, + password_callback: cloud_region._PasswordCallback | None = None, + statsd_host: str | None = None, + statsd_port: str | None = None, + statsd_prefix: str | None = None, + # TODO(stephenfin): Add better types + influxdb_config: dict[str, ty.Any] | None = None, + collector_registry: ty.Optional[ + 'prometheus_client.CollectorRegistry' + ] = None, + cache_auth: bool = False, + ) -> None: + warnings.warn( + 'The CloudConfig class has been deprecated in favour of ' + 'CloudRegion. Please update your references.', + os_warnings.RemovedInSDK60Warning, + ) + + self.region = region + + super().__init__( + name, + region, + config, + force_ipv4=force_ipv4, + auth_plugin=auth_plugin, + openstack_config=openstack_config, + session_constructor=session_constructor, + app_name=app_name, + app_version=app_version, + session=session, + discovery_cache=discovery_cache, + extra_config=extra_config, + cache_expiration_time=cache_expiration_time, + cache_expirations=cache_expirations, + cache_path=cache_path, + cache_class=cache_class, + cache_arguments=cache_arguments, + password_callback=password_callback, + statsd_host=statsd_host, + statsd_port=statsd_port, + statsd_prefix=statsd_prefix, + influxdb_config=influxdb_config, + collector_registry=collector_registry, + cache_auth=cache_auth, + ) diff --git a/openstack/config/cloud_region.py b/openstack/config/cloud_region.py new file mode 100644 index 0000000000..48011922c6 --- /dev/null +++ b/openstack/config/cloud_region.py @@ -0,0 +1,1543 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections.abc +import copy +import os.path +import typing as ty +from urllib import parse +import warnings + +try: + import keyring +except ImportError: + keyring = None # type: ignore[assignment] + +from keystoneauth1.access import service_catalog as ks_service_catalog +from keystoneauth1 import discover +import keystoneauth1.exceptions.catalog +from keystoneauth1.identity import base as ks_identity_base +from keystoneauth1.loading import adapter as ks_load_adap +from keystoneauth1 import plugin +from keystoneauth1 import session as ks_session +import os_service_types +import urllib3.exceptions + +try: + import statsd as statsd_client +except ImportError: + statsd_client = None +try: + import prometheus_client +except ImportError: + prometheus_client = None # type: ignore[assignment] +try: + # NOTE(stephenfin): This library is EOL so we explicitly don't have it in + # our dependencies + import influxdb as influxdb_client # type: ignore[import-not-found] +except ImportError: + influxdb_client = None + +from openstack import _log +from openstack.config import _util +from openstack.config import defaults as config_defaults +from openstack import exceptions +from openstack import proxy +from openstack import version as openstack_version +from openstack import warnings as os_warnings + +if ty.TYPE_CHECKING: + from oslo_config import cfg + + from openstack.config import loader + + +_T = ty.TypeVar('_T') + +_logger = _log.setup_logging('openstack') + +SCOPE_KEYS = { + 'domain_id', + 'domain_name', + 'project_id', + 'project_name', + 'system_scope', +} + +# Sentinel for nonexistence +_ENOENT = object() + + +class _PasswordCallback(ty.Protocol): + def __call__(self, prompt: str | None = None) -> str: ... + + +def _make_key(key: str, service_type: str | None) -> str: + if not service_type: + return key + else: + service_type = service_type.lower().replace('-', '_') + return "_".join([service_type, key]) + + +def _disable_service( + config: dict[str, ty.Any], + service_type: str, + reason: str | None = None, +) -> None: + service_type = service_type.lower().replace('-', '_') + key = f'has_{service_type}' + config[key] = False + if reason: + d_key = _make_key('disabled_reason', service_type) + config[d_key] = reason + + +def _get_implied_microversion(version: str | None) -> str | None: + if version and '.' in version: + # Some services historically had a .0 in their normal api version. + # Neutron springs to mind with version "2.0". If a user has "2.0" + # set in a variable or config file just because history, we don't + # need to send any microversion headers. + if version.split('.')[1] != "0": + return version + + return None + + +def from_session( + session: ks_session.Session, + name: str | None = None, + region_name: str | None = None, + force_ipv4: bool = False, + app_name: str | None = None, + app_version: str | None = None, + **kwargs: ty.Any, +) -> 'CloudRegion': + """Construct a CloudRegion from an existing `keystoneauth1.session.Session` + + When a Session already exists, we don't actually even need to go through + the OpenStackConfig.get_one_cloud dance. We have a Session with Auth info. + The only parameters that are really needed are adapter/catalog related. + + :param keystoneauth1.session.session session: + An existing authenticated Session to use. + :param str name: + A name to use for this cloud region in logging. If left empty, the + hostname of the auth_url found in the Session will be used. + :param str region_name: + The region name to connect to. + :param bool force_ipv4: + Whether or not to disable IPv6 support. Defaults to False. + :param str app_name: + Name of the application to be added to User Agent. + :param str app_version: + Version of the application to be added to User Agent. + :param kwargs: + Config settings for this cloud region. + """ + config_dict = config_defaults.get_defaults() + config_dict.update(**kwargs) + return CloudRegion( + name=name, + session=session, + config=config_dict, + region_name=region_name, + force_ipv4=force_ipv4, + app_name=app_name, + app_version=app_version, + ) + + +def from_conf( + conf: 'cfg.ConfigOpts', + session: ks_session.Session | None = None, + service_types: list[str] | None = None, + **kwargs: ty.Any, +) -> 'CloudRegion': + """Create a CloudRegion from oslo.config ConfigOpts. + + :param oslo_config.cfg.ConfigOpts conf: + An oslo.config ConfigOpts containing keystoneauth1.Adapter options in + sections named according to project (e.g. [nova], not [compute]). + TODO: Current behavior is to use defaults if no such section exists, + which may not be what we want long term. + :param keystoneauth1.session.Session session: + An existing authenticated Session to use. This is currently required. + TODO: Load this (and auth) from the conf. + :param service_types: + A list/set of service types for which to look for and process config + opts. If None, all known service types are processed. Note that we will + not error if a supplied service type can not be processed successfully + (unless you try to use the proxy, of course). This tolerates uses where + the consuming code has paths for a given service, but those paths are + not exercised for given end user setups, and we do not want to generate + errors for e.g. missing/invalid conf sections in those cases. We also + don't check to make sure your service types are spelled correctly - + caveat implementor. + :param kwargs: + Additional keyword arguments to be passed directly to the CloudRegion + constructor. + :raise openstack.exceptions.ConfigException: + If session is not specified. + :return: + An openstack.config.cloud_region.CloudRegion. + """ + if not session: + # TODO(mordred) Fill this in - not needed for first stab with nova + raise exceptions.ConfigException("A Session must be supplied.") + config_dict = kwargs.pop('config', config_defaults.get_defaults()) + stm = os_service_types.ServiceTypes() + for st in stm.all_types_by_service_type: + if service_types is not None and st not in service_types: + _disable_service( + config_dict, + st, + reason="Not in the list of requested service_types.", + ) + continue + project_name = stm.get_project_name(st) + if project_name is None: + _disable_service( + config_dict, + st, + reason=f"No project name found for service type '{st}'.", + ) + continue + if project_name not in conf: + if '-' in project_name: + project_name = project_name.replace('-', '_') + + if project_name not in conf: + _disable_service( + config_dict, + st, + reason=( + f"No section for project '{project_name}' (service " + f"type '{st}') was present in the config." + ), + ) + continue + opt_dict: dict[str, str] = {} + # Populate opt_dict with (appropriately processed) Adapter conf opts + try: + ks_load_adap.process_conf_options(conf[project_name], opt_dict) + except Exception as e: + # NOTE(efried): This is for (at least) a couple of scenarios: + # (1) oslo_config.cfg.NoSuchOptError when ksa adapter opts are not + # registered in this section. + # (2) TypeError, when opts are registered but bogus (e.g. + # 'interface' and 'valid_interfaces' are both present). + # We may want to consider (providing a kwarg giving the caller the + # option of) blowing up right away for (2) rather than letting them + # get all the way to the point of trying the service and having + # *that* blow up. + reason = ( + "Encountered an exception attempting to process config " + f"for project '{project_name}' (service type " + f"'{st}'): {e}" + ) + _logger.warning(f"Disabling service '{st}': {reason}") + _disable_service(config_dict, st, reason=reason) + continue + # Load them into config_dict under keys prefixed by ${service_type}_ + for raw_name, opt_val in opt_dict.items(): + config_name = _make_key(raw_name, st) + config_dict[config_name] = opt_val + return CloudRegion(session=session, config=config_dict, **kwargs) + + +class CloudRegion: + # TODO(efried): Doc the rest of the kwargs + """The configuration for a Region of an OpenStack Cloud. + + A CloudRegion encapsulates the config information needed for connections + to all of the services in a Region of a Cloud. + + :param name: + :param region_name: + The default region name for all services in this CloudRegion. If + both ``region_name`` and ``config['region_name']`` are specified, the + kwarg takes precedence. May be overridden for a given ${service} + via a ${service}_region_name key in the ``config`` dict. + :param config: + A dict of configuration values for the CloudRegion and its + services. The key for a ${config_option} for a specific ${service} + should be ${service}_${config_option}. For example, to configure + the endpoint_override for the block_storage service, the ``config`` + dict should contain:: + + 'block_storage_endpoint_override': 'http://...' + + To provide a default to be used if no service-specific override is + present, just use the unprefixed ${config_option} as the service + key, e.g.:: + + 'interface': 'public' + :param force_ipv4: + :param auth_plugin: + :param openstack_config: + :param session_constructor: + :param app_name: + :param app_version: + :param session: + :param discovery_cache: + :param extra_config: + :param cache_expiration_time: + :param cache_expirations: + :param cache_path: + :param cache_class: + :param cache_arguments: + :param password_callback: + :param statsd_host: + :param statsd_port: + :param statsd_prefix: + :param influxdb_config: + :param collector_registry: + :param cache_auth: + """ + + def __init__( + self, + name: str | None = None, + region_name: str | None = None, + config: dict[str, ty.Any] | None = None, + force_ipv4: bool = False, + auth_plugin: plugin.BaseAuthPlugin | None = None, + openstack_config: ty.Optional['loader.OpenStackConfig'] = None, + session_constructor: type[ks_session.Session] | None = None, + app_name: str | None = None, + app_version: str | None = None, + session: ks_session.Session | None = None, + discovery_cache: dict[str, discover.Discover] | None = None, + extra_config: dict[str, ty.Any] | None = None, + cache_expiration_time: int = 0, + cache_expirations: dict[str, int] | None = None, + cache_path: str | None = None, + cache_class: str = 'dogpile.cache.null', + cache_arguments: dict[str, ty.Any] | None = None, + password_callback: _PasswordCallback | None = None, + statsd_host: str | None = None, + statsd_port: str | None = None, + statsd_prefix: str | None = None, + # TODO(stephenfin): Add better types + influxdb_config: dict[str, ty.Any] | None = None, + collector_registry: ty.Optional[ + 'prometheus_client.CollectorRegistry' + ] = None, + cache_auth: bool = False, + ) -> None: + self._name = name + self.config = _util.normalize_keys(config or {}) + # NOTE(efried): For backward compatibility: a) continue to accept the + # region_name kwarg; b) make it take precedence over (non-service_type- + # specific) region_name set in the config dict. + if region_name is not None: + self.config['region_name'] = region_name + self._extra_config = extra_config or {} + self.log = _log.setup_logging('openstack.config') + self._force_ipv4 = force_ipv4 + self._auth = auth_plugin + self._cache_auth = cache_auth + self.load_auth_from_cache() + self._openstack_config = openstack_config + self._keystone_session = session + self._session_constructor = session_constructor or ks_session.Session + self._app_name = app_name + self._app_version = app_version + self._discovery_cache = discovery_cache or None + self._cache_expiration_time = cache_expiration_time + self._cache_expirations = cache_expirations or {} + self._cache_path = cache_path + self._cache_class = cache_class + self._cache_arguments = cache_arguments + self._password_callback = password_callback + self._statsd_host = statsd_host + self._statsd_port = statsd_port + self._statsd_prefix = statsd_prefix + self._statsd_client = None + self._influxdb_config = influxdb_config + self._influxdb_client = None + + if influxdb_config is not None: + # NOTE(stephenfin): If you are a user and care about InfluxDB, + # please propose patches to migrate this to the influxdb3-python + # library [1]. Any migration should include tests. + # + # [1] https://github.com/InfluxCommunity/influxdb3-python + warnings.warn( + 'Support for InfluxDB requires the influxdb library which ' + 'only supports InfluxDB 1.x and is deprecated. As a result, ' + 'influxdb is also deprecated and will be removed in a future ' + 'release.', + os_warnings.RemovedInSDK60Warning, + ) + + self._collector_registry = collector_registry + + self._service_type_manager = os_service_types.ServiceTypes() + + def __getattr__(self, key: str) -> ty.Any: + """Return arbitrary attributes. + + This method accesses config via __dict__ to avoid infinite recursion + during copy.deepcopy(). When deepcopy creates a new instance without + calling __init__, self.config doesn't exist, and accessing it directly + would trigger __getattr__ again, causing a RecursionError. + + Dunder methods must raise AttributeError so Python can find inherited + implementations (e.g., __reduce_ex__ from object for pickling/copying). + """ + # Don't handle dunder methods - let Python find inherited impls + if key.startswith('__') and key.endswith('__'): + raise AttributeError(key) + + # Use __dict__.get() to safely check if config exists + config = self.__dict__.get('config') + if config is None: + raise AttributeError(key) + + if key.startswith('os_'): + key = key[3:] + + if key in [attr.replace('-', '_') for attr in config]: + return config[key] + else: + return None + + def __iter__(self) -> collections.abc.Iterator[ty.Any]: + return self.config.__iter__() + + def __eq__(self, other: object) -> bool: + if not isinstance(other, CloudRegion): + return NotImplemented + + return self.name == other.name and self.config == other.config + + def __ne__(self, other: object) -> bool: + return not self == other + + @property + def name(self) -> str: + if self._name is not None: + return self._name + + auth = self.get_session().auth + # not all auth plugins are identity plugins + if ( + auth + and isinstance(auth, ks_identity_base.BaseIdentityPlugin) + and auth.auth_url + ): + name = parse.urlparse(auth.auth_url).hostname or '' + else: + name = self._app_name or '' + + self._name = name + + return name + + @property + def full_name(self) -> str: + """Return a string that can be used as an identifier. + + Always returns a valid string. It will have name and region_name + or just one of the two if only one is set, or else 'unknown'. + """ + region_name = self.get_region_name() + if self.name and region_name: + return ":".join([self.name, region_name]) + elif self.name and not region_name: + return self.name + elif not self.name and region_name: + return region_name + else: + return 'unknown' + + def set_service_value( + self, key: str, service_type: str, value: ty.Any + ) -> None: + key = _make_key(key, service_type) + self.config[key] = value + + def set_session_constructor( + self, session_constructor: type[ks_session.Session] + ) -> None: + """Sets the Session constructor.""" + self._session_constructor = session_constructor + + def get_requests_verify_args( + self, + ) -> tuple[bool | str | None, str | tuple[str, str] | None]: + """Return the verify and cert values for the requests library.""" + insecure = self.config.get('insecure', False) + verify = self.config.get('verify', True) + cacert = self.config.get('cacert') + # Insecure is the most aggressive setting, so it wins + if insecure: + verify = False + if verify and cacert: + verify = os.path.expanduser(cacert) + else: + if cacert: + warnings.warn( + f"You are specifying a cacert for the cloud " + f"{self.full_name} but also to ignore the host " + f"verification. The host SSL cert will not be verified.", + os_warnings.ConfigurationWarning, + ) + + cert = self.config.get('cert') + if cert: + cert = os.path.expanduser(cert) + if self.config.get('key'): + cert = (cert, os.path.expanduser(self.config['key'])) + return (verify, cert) + + def get_services(self) -> list[str]: + """Return a list of service types we know something about.""" + services = [] + for key, val in self.config.items(): + if ( + key.endswith('api_version') + or key.endswith('service_type') + or key.endswith('service_name') + ): + services.append("_".join(key.split('_')[:-2])) + return list(set(services)) + + def get_enabled_services(self) -> set[str]: + services = set() + + all_services = [ + k['service_type'] for k in self._service_type_manager.services + ] + all_services.extend( + k[4:] for k in self.config.keys() if k.startswith('has_') + ) + + for srv in all_services: + ep = self.get_endpoint_from_catalog(srv) + if ep: + services.add(srv.replace('-', '_')) + + return services + + def get_auth_args(self) -> dict[str, ty.Any]: + return ty.cast(dict[str, ty.Any], self.config.get('auth', {})) + + @ty.overload + def _get_config( + self, + key: str, + service_type: str | None, + default: _T, + fallback_to_unprefixed: bool = False, + ) -> _T: ... + + @ty.overload + def _get_config( + self, + key: str, + service_type: str | None, + default: None = None, + fallback_to_unprefixed: bool = False, + ) -> ty.Any | None: ... + + def _get_config( + self, + key: str, + service_type: str | None, + default: _T | None = None, + fallback_to_unprefixed: bool = False, + ) -> _T | ty.Any | None: + """Get a config value for a service_type. + + Finds the config value for a key, looking first for it prefixed by + the given service_type, then by any known aliases of that service_type. + Finally, if fallback_to_unprefixed is True, a value will be looked + for without a prefix to support the config values where a global + default makes sense. + + For instance, ``_get_config('example', 'block-storage')`` would + first look for ``block_storage_example``, then ``volumev3_example``, + ``volumev2_example`` and ``volume_example``. If no value was found, it + would look for ``example``. + + If none of that works, it returns the value in ``default``. + """ + if service_type is None: + value = self.config.get(key) + else: + for st in self._service_type_manager.get_all_types(service_type): + _key = _make_key(key, st) + value = self.config.get(_key) + if value is not None: + key = _key + break + else: + if fallback_to_unprefixed: + value = self.config.get(key) + + if value is None: + return default + else: + return value + + def _get_service_config( + self, key: str, service_type: str + ) -> ty.Any | None: + config_dict = self.config.get(key) + if not config_dict: + return None + + if not isinstance(config_dict, dict): + raise RuntimeError( + f'invalid configuration for service type {service_type!r}' + ) + + for st in self._service_type_manager.get_all_types(service_type): + if st in config_dict: + return config_dict[st] + + return None + + def get_region_name(self, service_type: str | None = None) -> str | None: + # If a region_name for the specific service_type is configured, use it; + # else use the one configured for the CloudRegion as a whole. + value = self._get_config( + 'region_name', service_type, fallback_to_unprefixed=True + ) + return str(value) if value is not None else value + + def get_interface( + self, service_type: str | None = None + ) -> list[str] | str | None: + value = self._get_config( + 'interface', service_type, fallback_to_unprefixed=True + ) + if value is None: + return value + if isinstance(value, str): + return value + if isinstance(value, list) and all( + {isinstance(x, str) or x is None for x in value} + ): + return value + + raise exceptions.ConfigException( + f'interface should be str, list of str or None but is ' + f'{type(value)}' + ) + + def get_api_version(self, service_type: str) -> str | None: + version = self._get_config('api_version', service_type) + if not version: + return None + + try: + float(version) + except ValueError: + if 'latest' in version: + warnings.warn( + "You have a configured API_VERSION with 'latest' in " + "it. In the context of openstacksdk this doesn't make " + "any sense.", + os_warnings.ConfigurationWarning, + ) + return None + + return str(version) + + def get_default_microversion(self, service_type: str) -> str | None: + value = self._get_config('default_microversion', service_type) + return str(value) if value is not None else value + + def get_service_type(self, service_type: str) -> str: + # People requesting 'volume' are doing so because os-client-config + # let them. What they want is block-storage, not explicitly the + # v1 of cinder. If someone actually wants v1, they'll have api_version + # set to 1, in which case block-storage will still work properly. + # Use service-types-manager to grab the official type name. _get_config + # will still look for config by alias, but starting with the official + # type will get us things in the right order. + if self._service_type_manager.is_known(service_type): + official_type = self._service_type_manager.get_service_type( + service_type + ) + if official_type is not None: + service_type = official_type + value = self._get_config( + 'service_type', service_type, default=service_type + ) + return str(value) if value is not None else value + + def get_service_name(self, service_type: str) -> str | None: + value = self._get_config('service_name', service_type) + return str(value) if value is not None else value + + def get_endpoint(self, service_type: str) -> str | None: + auth = self.config.get('auth', {}) + value = self._get_config('endpoint_override', service_type) + if not value: + value = self._get_config('endpoint', service_type) + + if not value and self.config.get('auth_type') == 'none': + # If endpoint is given and we're using the none auth type, + # then the endpoint value is the endpoint_override for every + # service. + value = auth.get('endpoint') + + if ( + not value + and service_type == 'identity' + and SCOPE_KEYS.isdisjoint(set(auth.keys())) + ): + # There are a small number of unscoped identity operations. + # Specifically, looking up a list of projects/domains/system to + # scope to. + value = auth.get('auth_url') + + # Because of course. Seriously. + # We have to override the Rackspace block-storage endpoint because + # only v1 is in the catalog but the service actually does support + # v2. But the endpoint needs the project_id. + official_type = self._service_type_manager.get_service_type( + service_type + ) + if official_type is not None: + service_type = official_type + if ( + value + and self.config.get('profile') == 'rackspace' + and service_type == 'block-storage' + ): + value = value + auth.get('project_id') + + return str(value) if value else None + + def get_endpoint_from_catalog( + self, + service_type: str, + interface: list[str] | str | None = None, + region_name: str | None = None, + ) -> str | None: + """Return the endpoint for a given service as found in the catalog. + + For values respecting endpoint overrides, see + :meth:`~openstack.connection.Connection.endpoint_for` + + :param service_type: Service Type of the endpoint to search for. + :param interface: + Interface of the endpoint to search for. Optional, defaults to + the configured value for interface for this Connection. + :param region_name: + Region Name of the endpoint to search for. Optional, defaults to + the configured value for region_name for this Connection. + + :returns: The endpoint of the service, or None if not found. + """ + interface = interface or self.get_interface(service_type) + region_name = region_name or self.get_region_name(service_type) + session = self.get_session() + + auth = session.auth + if not isinstance(auth, ks_identity_base.BaseIdentityPlugin): + return None + + catalog = auth.get_access(session).service_catalog + try: + return catalog.url_for( + service_type=service_type, + interface=interface, + region_name=region_name, + ) + except (keystoneauth1.exceptions.catalog.EndpointNotFound, ValueError): + return None + + def get_connect_retries(self, service_type: str) -> int | None: + value = self._get_config( + 'connect_retries', service_type, fallback_to_unprefixed=True + ) + return int(value) if value is not None else value + + def get_connect_retry_delay(self, service_type: str) -> float | None: + value = self._get_config( + 'connect_retry_delay', service_type, fallback_to_unprefixed=True + ) + return float(value) if value is not None else value + + def get_status_code_retries(self, service_type: str) -> int | None: + value = self._get_config( + 'status_code_retries', service_type, fallback_to_unprefixed=True + ) + return int(value) if value is not None else value + + @property + def prefer_ipv6(self) -> bool: + return not self._force_ipv4 + + @property + def force_ipv4(self) -> bool: + return self._force_ipv4 + + def get_auth(self) -> plugin.BaseAuthPlugin | None: + """Return a keystoneauth plugin from the auth credentials.""" + return self._auth + + def skip_auth_cache(self) -> bool: + return not keyring or not self._auth or not self._cache_auth + + def load_auth_from_cache(self) -> None: + if self.skip_auth_cache(): + return + + assert self._auth is not None # narrow type + + cache_id = self._auth.get_cache_id() + + # skip if the plugin does not support caching + if not cache_id: + return + + try: + state = keyring.get_password('openstacksdk', cache_id) + except RuntimeError: # the fail backend raises this + state = None + + if not state: + self.log.debug('Failed to fetch auth from keyring') + return + + self.log.debug('Reusing authentication from keyring') + self._auth.set_auth_state(state) + + def set_auth_cache(self) -> None: + if self.skip_auth_cache(): + return + + assert self._auth is not None # narrow type + + cache_id = self._auth.get_cache_id() + # NOTE(stephenfin): The actual return type of this is a serialized JSON + # object + state = ty.cast(str, self._auth.get_auth_state()) + + try: + if cache_id and state: + # NOTE: under some conditions the method may be invoked when + # auth is empty. This may lead to exception in the keyring lib, + # thus do nothing. + keyring.set_password('openstacksdk', cache_id, state) + except RuntimeError: # the fail backend raises this + self.log.debug('Failed to set auth into keyring') + + def insert_user_agent(self) -> None: + """Set sdk information into the user agent of the Session. + + .. warning:: + This method is here to be used by os-client-config. It exists + as a hook point so that os-client-config can provice backwards + compatibility and still be in the User Agent for people using + os-client-config directly. + + Normal consumers of SDK should use app_name and app_version. However, + if someone else writes a subclass of + :class:`~openstack.config.cloud_region.CloudRegion` it may be + desirable. + """ + if not self._keystone_session: + return + + self._keystone_session.additional_user_agent.append( + ('openstacksdk', openstack_version.__version__) + ) + + def get_session(self) -> ks_session.Session: + """Return a keystoneauth session based on the auth credentials.""" + if self._keystone_session is not None: + return self._keystone_session + + if not self._auth: + raise exceptions.ConfigException("Problem with auth parameters") + + verify, cert = self.get_requests_verify_args() + + warnings.filterwarnings( + 'ignore', category=urllib3.exceptions.InsecurePlatformWarning + ) + # Turn off urllib3 warnings about insecure certs if we have + # explicitly configured requests to tell it we do not want + # cert verification + if not verify: + self.log.debug( + f"Turning off SSL warnings for {self.full_name} " + f"since verify=False" + ) + warnings.filterwarnings( + 'ignore', + category=urllib3.exceptions.InsecureRequestWarning, + ) + + self._keystone_session = self._session_constructor( + auth=self._auth, + verify=verify, + cert=cert, + timeout=self.config.get('api_timeout'), + collect_timing=bool(self.config.get('timing')), + discovery_cache=self._discovery_cache, + ) + self.insert_user_agent() + # Using old keystoneauth with new os-client-config fails if + # we pass in app_name and app_version. Those are not essential, + # nor a reason to bump our minimum, so just test for the session + # having the attribute post creation and set them then. + if hasattr(self._keystone_session, 'app_name'): + self._keystone_session.app_name = self._app_name + if hasattr(self._keystone_session, 'app_version'): + self._keystone_session.app_version = self._app_version + + return self._keystone_session + + def get_service_catalog( + self, + ) -> ks_service_catalog.ServiceCatalog | None: + """Helper method to grab the service catalog.""" + # not all auth plugins are identity plugins + if not isinstance(self._auth, ks_identity_base.BaseIdentityPlugin): + return None + return self._auth.get_access(self.get_session()).service_catalog + + def _get_version_request( + self, service_type: str, version: str | None + ) -> _util.VersionRequest: + """Translate OCC version args to those needed by ksa adapter. + + If no version is requested explicitly and we have a configured version, + set the version parameter and let ksa deal with expanding that to + min=ver.0, max=ver.latest. + + If version is set, pass it through. + + If version is not set and we don't have a configured version, default + to latest. + + If version is set, contains a '.', and default_microversion is not + set, also pass it as a default microversion. + """ + version_request = _util.VersionRequest() + if version == 'latest': + version_request.max_api_version = 'latest' + return version_request + + if not version: + version = self.get_api_version(service_type) + + # Octavia doens't have a version discovery document. Hard-code an + # exception to this logic for now. + if not version and service_type not in ('load-balancer',): + version_request.max_api_version = 'latest' + else: + version_request.version = version + + default_microversion = self.get_default_microversion(service_type) + implied_microversion = _get_implied_microversion(version) + if ( + implied_microversion + and default_microversion + and implied_microversion != default_microversion + ): + raise exceptions.ConfigException( + f"default_microversion of {default_microversion} was given " + f"for {service_type}, but api_version looks like a " + f"microversion as well. Please set api_version to just the " + f"desired major version, or omit default_microversion" + ) + if implied_microversion: + assert version is not None # type narrow + default_microversion = implied_microversion + # If we're inferring a microversion, don't pass the whole + # string in as api_version, since that tells keystoneauth + # we're looking for a major api version. + version_request.version = version[0] + + version_request.default_microversion = default_microversion + + return version_request + + def get_all_version_data( + self, service_type: str + ) -> list[discover.VersionData]: + """Retrieve version data for the given service. + + :param service_type: The service to fetch version data for. + :returns: A `~keystoneauth1.discover.VersionData` object containing the + version data for the requested service. + """ + # Seriously. Don't think about the existential crisis + # that is the next line. You'll wind up in cthulhu's lair. + service_type = self.get_service_type(service_type) + region_name = self.get_region_name(service_type) + assert region_name is not None # narrow type + interface = self.get_interface(service_type) + assert interface is not None # narrow type + + interfaces = interface if isinstance(interface, list) else [interface] + + versions = self.get_session().get_all_version_data( + service_type=service_type, + interface=interface, + region_name=region_name, + ) + + region_versions = versions.get(region_name, {}) + for interface in interfaces: + interface_versions = region_versions.get(interface, {}) + service_version_data = interface_versions.get(service_type) + if service_version_data is not None: + return service_version_data + + return [] + + def _get_endpoint_from_catalog( + self, + service_type: str, + constructor: type[proxy.Proxy], + ) -> str: + adapter = constructor( + session=self.get_session(), + service_type=self.get_service_type(service_type), + service_name=self.get_service_name(service_type), + # https://review.opendev.org/c/openstack/keystoneauth/+/951183 + interface=self.get_interface(service_type), # type: ignore + region_name=self.get_region_name(service_type), + ) + endpoint = adapter.get_endpoint() + assert endpoint is not None # narrow type + return endpoint + + def _get_hardcoded_endpoint( + self, service_type: str, constructor: type[proxy.Proxy] + ) -> str: + endpoint = self._get_endpoint_from_catalog(service_type, constructor) + if not endpoint.rstrip().rsplit('/')[-1] == 'v2.0': + if not endpoint.endswith('/'): + endpoint += '/' + endpoint = parse.urljoin(endpoint, 'v2.0') + return endpoint + + @ty.overload + def get_session_client( + self, + service_type: str, + version: str | None = None, + constructor: None = None, + **kwargs: ty.Any, + ) -> proxy.Proxy: ... + + @ty.overload + def get_session_client( + self, + service_type: str, + version: str | None = None, + constructor: type[proxy.ProxyT] = ..., + **kwargs: ty.Any, + ) -> proxy.ProxyT: ... + + def get_session_client( + self, + service_type: str, + version: str | None = None, + constructor: type[proxy.Proxy] | None = None, + **kwargs: ty.Any, + ) -> proxy.Proxy: + """Return a prepped keystoneauth Adapter for a given service. + + This is useful for making direct requests calls against a + 'mounted' endpoint. That is, if you do: + + client = get_session_client('compute') + + then you can do: + + client.get('/flavors') + + and it will work like you think. + """ + if constructor is None: + constructor = proxy.Proxy + + version_request = self._get_version_request(service_type, version) + + kwargs.setdefault('region_name', self.get_region_name(service_type)) + kwargs.setdefault( + 'connect_retries', self.get_connect_retries(service_type) + ) + kwargs.setdefault( + 'status_code_retries', self.get_status_code_retries(service_type) + ) + kwargs.setdefault('statsd_prefix', self.get_statsd_prefix()) + kwargs.setdefault('statsd_client', self.get_statsd_client()) + kwargs.setdefault('prometheus_counter', self.get_prometheus_counter()) + kwargs.setdefault( + 'prometheus_histogram', self.get_prometheus_histogram() + ) + kwargs.setdefault('influxdb_config', self._influxdb_config) + kwargs.setdefault('influxdb_client', self.get_influxdb_client()) + endpoint_override = self.get_endpoint(service_type) + version = version_request.version + min_api_version = ( + kwargs.pop('min_version', None) or version_request.min_api_version + ) + max_api_version = ( + kwargs.pop('max_version', None) or version_request.max_api_version + ) + + # Older neutron has inaccessible discovery document. Nobody noticed + # because neutronclient hard-codes an append of v2.0. YAY! + # Also, older octavia has a similar issue. + if service_type in ('network', 'load-balancer'): + version = None + min_api_version = None + max_api_version = None + if endpoint_override is None: + endpoint_override = self._get_hardcoded_endpoint( + service_type, constructor + ) + + client = constructor( + session=self.get_session(), + service_type=self.get_service_type(service_type), + service_name=self.get_service_name(service_type), + # https://review.opendev.org/c/openstack/keystoneauth/+/951183 + interface=self.get_interface(service_type), # type: ignore + version=version, + min_version=min_api_version, + max_version=max_api_version, + endpoint_override=endpoint_override, + default_microversion=version_request.default_microversion, + rate_limit=self.get_rate_limit(service_type), + concurrency=self.get_concurrency(service_type), + **kwargs, + ) + if version_request.default_microversion: + default_microversion = version_request.default_microversion + info = client.get_endpoint_data() + if info and not discover.version_between( + info.min_microversion, + info.max_microversion, + default_microversion, + ): + if self.get_default_microversion(service_type): + raise exceptions.ConfigException( + "A default microversion for service {service_type} of " + "{default_microversion} was requested, but the cloud " + "only supports a minimum of {min_microversion} and " + "a maximum of {max_microversion}.".format( + service_type=service_type, + default_microversion=default_microversion, + min_microversion=discover.version_to_string( + info.min_microversion or (0,) + ), + max_microversion=discover.version_to_string( + info.max_microversion or (0,) + ), + ) + ) + else: + raise exceptions.ConfigException( + "A default microversion for service {service_type} of " + "{default_microversion} was requested, but the cloud " + "only supports a minimum of {min_microversion} and " + "a maximum of {max_microversion}. The default " + "microversion was set because a microversion " + "formatted version string, '{api_version}', was " + "passed for the api_version of the service. If it " + "was not intended to set a default microversion " + "please remove anything other than an integer major " + "version from the version setting for " + "the service.".format( + service_type=service_type, + api_version=self.get_api_version(service_type), + default_microversion=default_microversion, + min_microversion=discover.version_to_string( + info.min_microversion or (0,) + ), + max_microversion=discover.version_to_string( + info.max_microversion or (0,) + ), + ) + ) + return client + + def get_session_endpoint( + self, + service_type: str, + min_version: str | None = None, + max_version: str | None = None, + ) -> str | None: + """Return the endpoint from config or the catalog. + + If a configuration lists an explicit endpoint for a service, + return that. Otherwise, fetch the service catalog from the + keystone session and return the appropriate endpoint. + + :param service_type: Official service type of service + """ + + override_endpoint = self.get_endpoint(service_type) + if override_endpoint: + return override_endpoint + + region_name = self.get_region_name(service_type) + service_name = self.get_service_name(service_type) + interface = self.get_interface(service_type) + session = self.get_session() + try: + # Return the highest version we find that matches + # the request + endpoint = session.get_endpoint( + service_type=service_type, + region_name=region_name, + interface=interface, + service_name=service_name, + min_version=min_version, + max_version=max_version, + ) + except keystoneauth1.exceptions.catalog.EndpointNotFound: + endpoint = None + if not endpoint: + self.log.warning( + "Keystone catalog entry not found (" + "service_type=%s,service_name=%s," + "interface=%s,region_name=%s)", + service_type, + service_name, + interface, + region_name, + ) + return endpoint + + def get_cache_expiration_time(self) -> int: + # TODO(mordred) We should be validating/transforming this on input + return int(self._cache_expiration_time) + + def get_cache_path(self) -> str | None: + return self._cache_path + + def get_cache_class(self) -> str: + return self._cache_class + + def get_cache_arguments(self) -> dict[str, ty.Any] | None: + return copy.deepcopy(self._cache_arguments) + + def get_cache_expirations(self) -> dict[str, int]: + return copy.deepcopy(self._cache_expirations) + + def get_cache_resource_expiration( + self, resource: str, default: float | None = None + ) -> float | None: + """Get expiration time for a resource + + :param resource: Name of the resource type + :param default: Default value to return if not found (optional, + defaults to None) + + :returns: Expiration time for the resource type as float or default + """ + if resource not in self._cache_expirations: + return default + return float(self._cache_expirations[resource]) + + def requires_floating_ip(self) -> bool | None: + """Return whether or not this cloud requires floating ips. + + + :returns: True or False if know, None if discovery is needed. + If requires_floating_ip is not configured but the cloud is + known to not provide floating ips, will return False. + """ + if self.config['floating_ip_source'] == "None": + return False + requires_floating_ip = self.config.get('requires_floating_ip') + if requires_floating_ip is None: + return None + return bool(requires_floating_ip) + + def get_external_networks(self) -> list[str]: + """Get list of network names for external networks.""" + return [ + net['name'] + for net in self.config.get('networks', []) + if net['routes_externally'] + ] + + def get_external_ipv4_networks(self) -> list[str]: + """Get list of network names for external IPv4 networks.""" + return [ + str(net['name']) + for net in self.config.get('networks', []) + if net['routes_ipv4_externally'] + ] + + def get_external_ipv6_networks(self) -> list[str]: + """Get list of network names for external IPv6 networks.""" + return [ + str(net['name']) + for net in self.config.get('networks', []) + if net['routes_ipv6_externally'] + ] + + def get_internal_networks(self) -> list[str]: + """Get list of network names for internal networks.""" + return [ + str(net['name']) + for net in self.config.get('networks', []) + if not net['routes_externally'] + ] + + def get_internal_ipv4_networks(self) -> list[str]: + """Get list of network names for internal IPv4 networks.""" + return [ + str(net['name']) + for net in self.config.get('networks', []) + if not net['routes_ipv4_externally'] + ] + + def get_internal_ipv6_networks(self) -> list[str]: + """Get list of network names for internal IPv6 networks.""" + return [ + str(net['name']) + for net in self.config.get('networks', []) + if not net['routes_ipv6_externally'] + ] + + def get_default_network(self) -> str | None: + """Get network used for default interactions.""" + for net in self.config.get('networks', []): + if net['default_interface']: + return str(net['name']) + return None + + def get_nat_destination(self) -> str | None: + """Get network used for NAT destination.""" + for net in self.config.get('networks', []): + if net['nat_destination']: + return str(net['name']) + return None + + def get_nat_source(self) -> str | None: + """Get network used for NAT source.""" + for net in self.config.get('networks', []): + if net.get('nat_source'): + return str(net['name']) + return None + + def _get_extra_config( + self, + key: str | None, + defaults: dict[str, ty.Any] | None = None, + ) -> dict[str, ty.Any]: + """Fetch an arbitrary extra chunk of config, laying in defaults. + + :param string key: name of the config section to fetch + :param dict defaults: (optional) default values to merge under the + found config + """ + defaults = _util.normalize_keys(defaults or {}) + if not key: + return defaults or {} + return _util.merge_clouds( + defaults, _util.normalize_keys(self._extra_config.get(key, {})) + ) + + def get_client_config( + self, + name: str | None = None, + defaults: dict[str, ty.Any] | None = None, + ) -> dict[str, ty.Any] | None: + """Get config settings for a named client. + + Settings will also be looked for in a section called 'client'. + If settings are found in both, they will be merged with the settings + from the named section winning over the settings from client section, + and both winning over provided defaults. + + :param string name: + Name of the config section to look for. + :param dict defaults: + Default settings to use. + + :returns: + A dict containing merged settings from the named section, the + client section and the defaults. + """ + return self._get_extra_config( + name, self._get_extra_config('client', defaults) + ) + + def get_password_callback(self) -> _PasswordCallback | None: + return self._password_callback + + def get_rate_limit(self, service_type: str) -> float | None: + return self._get_service_config( + 'rate_limit', service_type=service_type + ) + + def get_concurrency(self, service_type: str) -> int | None: + return self._get_service_config( + 'concurrency', service_type=service_type + ) + + def get_statsd_client( + self, + ) -> ty.Optional['statsd_client.StatsClientBase']: + if not statsd_client: + if self._statsd_host: + self.log.warning( + 'StatsD python library is not available. ' + 'Reporting disabled' + ) + return None + statsd_args = {} + if self._statsd_host: + statsd_args['host'] = self._statsd_host + if self._statsd_port: + statsd_args['port'] = self._statsd_port + if statsd_args: + try: + return statsd_client.StatsClient(**statsd_args) + except Exception: + self.log.warning('Cannot establish connection to statsd') + return None + else: + return None + + def get_statsd_prefix(self) -> str: + return self._statsd_prefix or 'openstack.api' + + def get_prometheus_registry( + self, + ) -> ty.Optional['prometheus_client.CollectorRegistry']: + return self._collector_registry + + def get_prometheus_histogram( + self, + ) -> ty.Optional['prometheus_client.Histogram']: + registry = self.get_prometheus_registry() + if not registry or not prometheus_client: + return None + # We have to hide a reference to the histogram on the registry + # object, because it's collectors must be singletons for a given + # registry but register at creation time. + hist = getattr(registry, '_openstacksdk_histogram', None) + if not hist: + hist = prometheus_client.Histogram( + 'openstack_http_response_time', + 'Time taken for an http response to an OpenStack service', + labelnames=[ + 'method', + 'endpoint', + 'service_type', + 'status_code', + ], + registry=registry, + ) + setattr(registry, '_openstacksdk_histogram', hist) + return hist + + def get_prometheus_counter( + self, + ) -> ty.Optional['prometheus_client.Counter']: + registry = self.get_prometheus_registry() + if not registry or not prometheus_client: + return None + counter = getattr(registry, '_openstacksdk_counter', None) + if not counter: + counter = prometheus_client.Counter( + 'openstack_http_requests', + 'Number of HTTP requests made to an OpenStack service', + labelnames=[ + 'method', + 'endpoint', + 'service_type', + 'status_code', + ], + registry=registry, + ) + setattr(registry, '_openstacksdk_counter', counter) + return counter + + def has_service(self, service_type: str) -> bool: + service_type = service_type.lower().replace('-', '_') + key = f'has_{service_type}' + value = self.config.get( + key, self._service_type_manager.is_official(service_type) + ) + assert isinstance(value, bool) + return value + + def disable_service( + self, service_type: str, reason: str | None = None + ) -> None: + _disable_service(self.config, service_type, reason=reason) + + def enable_service(self, service_type: str) -> None: + service_type = service_type.lower().replace('-', '_') + key = f'has_{service_type}' + self.config[key] = True + + def get_disabled_reason(self, service_type: str) -> str | None: + service_type = service_type.lower().replace('-', '_') + d_key = _make_key('disabled_reason', service_type) + return self.config.get(d_key) + + def get_influxdb_client( + self, + ) -> ty.Optional['influxdb_client.InfluxDBClient']: + influx_args: dict[str, ty.Any] = {} + if not self._influxdb_config: + return None + + warnings.warn( + 'Support for InfluxDB requires the influxdb library which ' + 'only supports InfluxDB 1.x and is deprecated. As a result, ' + 'influxdb is also deprecated and will be removed in a future ' + 'release.', + os_warnings.RemovedInSDK60Warning, + ) + + use_udp = bool(self._influxdb_config.get('use_udp', False)) + port = self._influxdb_config.get('port') + if use_udp: + influx_args['use_udp'] = True + if 'port' in self._influxdb_config: + if use_udp: + influx_args['udp_port'] = port + else: + influx_args['port'] = port + for key in ['host', 'username', 'password', 'database', 'timeout']: + if key in self._influxdb_config: + influx_args[key] = self._influxdb_config[key] + if influxdb_client and influx_args: + try: + return influxdb_client.InfluxDBClient(**influx_args) + except Exception: + self.log.warning('Cannot establish connection to InfluxDB') + else: + self.log.warning( + 'InfluxDB configuration is present, ' + 'but no client library is found.' + ) + return None diff --git a/openstack/config/defaults.json b/openstack/config/defaults.json new file mode 100644 index 0000000000..2d154e3a9d --- /dev/null +++ b/openstack/config/defaults.json @@ -0,0 +1,16 @@ +{ + "auth_type": "password", + "baremetal_status_code_retries": 5, + "baremetal_introspection_status_code_retries": 5, + "image_status_code_retries": 5, + "disable_vendor_agent": {}, + "interface": "public", + "floating_ip_source": "neutron", + "image_api_use_tasks": false, + "image_format": "qcow2", + "message": "", + "network_api_version": "2", + "object_store_api_version": "1", + "secgroup_source": "neutron", + "status": "active" +} diff --git a/openstack/config/defaults.py b/openstack/config/defaults.py new file mode 100644 index 0000000000..927a7f99cf --- /dev/null +++ b/openstack/config/defaults.py @@ -0,0 +1,55 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json +import os +import threading +import typing as ty + +_json_path = os.path.join( + os.path.dirname(os.path.realpath(__file__)), 'defaults.json' +) +_defaults = None +_defaults_lock = threading.Lock() + + +# json_path argument is there for os-client-config +def get_defaults(json_path: str = _json_path) -> dict[str, ty.Any]: + global _defaults + if _defaults is not None: + return _defaults.copy() + with _defaults_lock: + if _defaults is not None: + # Did someone else just finish filling it? + return _defaults.copy() + # Python language specific defaults + # These are defaults related to use of python libraries, they are + # not qualities of a cloud. + # + # NOTE(harlowja): update a in-memory dict, before updating + # the global one so that other callers of get_defaults do not + # see the partially filled one. + tmp_defaults = { + 'api_timeout': None, + 'verify': True, + 'cacert': None, + 'cert': None, + 'key': None, + } + with open(json_path) as json_file: + updates = json.load(json_file) + if updates is not None: + tmp_defaults.update(updates) + _defaults = tmp_defaults + return tmp_defaults.copy() diff --git a/openstack/config/exceptions.py b/openstack/config/exceptions.py new file mode 100644 index 0000000000..932de73cf6 --- /dev/null +++ b/openstack/config/exceptions.py @@ -0,0 +1,17 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions + +OpenStackConfigException = exceptions.ConfigException diff --git a/openstack/config/loader.py b/openstack/config/loader.py new file mode 100644 index 0000000000..a4aad7f91c --- /dev/null +++ b/openstack/config/loader.py @@ -0,0 +1,1607 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# alias because we already had an option named argparse +import argparse as argparse_mod +import collections +import copy +import errno +import json +import os +import re +import sys +import typing as ty +import warnings + +from keystoneauth1 import adapter +from keystoneauth1 import loading +from keystoneauth1 import session +import platformdirs +import yaml + +from openstack import _log +from openstack.config import _util +from openstack.config import cloud_region +from openstack.config import defaults +from openstack.config import vendors +from openstack import exceptions +from openstack import warnings as os_warnings + +if ty.TYPE_CHECKING: + from keystoneauth1.loading._plugins.identity import v3 as v3_loaders + from keystoneauth1.loading import opts + +PLATFORMDIRS = platformdirs.PlatformDirs( + 'openstack', 'OpenStack', multipath=True +) +CONFIG_HOME = PLATFORMDIRS.user_config_dir +CACHE_PATH = PLATFORMDIRS.user_cache_dir + +# snaps do set $HOME to something like +# /home/$USER/snap/openstackclients/$SNAP_VERSION +# the real home (usually /home/$USERNAME) is stored in $SNAP_REAL_HOME +# see https://snapcraft.io/docs/environment-variables +SNAP_REAL_HOME = os.getenv('SNAP_REAL_HOME') +if SNAP_REAL_HOME: + UNIX_CONFIG_HOME = os.path.join( + os.path.join(SNAP_REAL_HOME, '.config'), 'openstack' + ) +else: + UNIX_CONFIG_HOME = os.path.join( + os.path.expanduser(os.path.join('~', '.config')), 'openstack' + ) +UNIX_SITE_CONFIG_HOME = '/etc/openstack' + +SITE_CONFIG_HOME = PLATFORMDIRS.site_config_dir + +CONFIG_SEARCH_PATH = [ + os.getcwd(), + CONFIG_HOME, + UNIX_CONFIG_HOME, + SITE_CONFIG_HOME, + UNIX_SITE_CONFIG_HOME, +] +YAML_SUFFIXES = ('.yaml', '.yml') +JSON_SUFFIXES = ('.json',) +CONFIG_FILES = [ + os.path.join(d, 'clouds' + s) + for d in CONFIG_SEARCH_PATH + for s in YAML_SUFFIXES + JSON_SUFFIXES +] +SECURE_FILES = [ + os.path.join(d, 'secure' + s) + for d in CONFIG_SEARCH_PATH + for s in YAML_SUFFIXES + JSON_SUFFIXES +] +VENDOR_FILES = [ + os.path.join(d, 'clouds-public' + s) + for d in CONFIG_SEARCH_PATH + for s in YAML_SUFFIXES + JSON_SUFFIXES +] + +BOOL_KEYS = ('insecure', 'cache') +CSV_KEYS = ('auth_methods',) + +FORMAT_EXCLUSIONS = frozenset(['password']) + + +def get_boolean(value: ty.Any) -> bool: + if value is None: + return False + if type(value) is bool: + return value + if value.lower() == 'true': + return True + return False + + +def _auth_update( + old_dict: dict[str, ty.Any], new_dict_source: dict[str, ty.Any] +) -> dict[str, ty.Any]: + """Like dict.update, except handling the nested dict called auth.""" + new_dict = copy.deepcopy(new_dict_source) + for k, v in new_dict.items(): + if k == 'auth': + if k in old_dict: + old_dict[k].update(v) + else: + old_dict[k] = v.copy() + else: + old_dict[k] = v + return old_dict + + +def _fix_argv(argv: list[str]) -> None: + # Transform any _ characters in arg names to - so that we don't + # have to throw billions of compat argparse arguments around all + # over the place. + processed = collections.defaultdict(set) + for index in range(0, len(argv)): + # If the value starts with '--' and has '-' or '_' in it, then + # it's worth looking at it + if re.match('^--.*(_|-)+.*', argv[index]): + split_args = argv[index].split('=') + orig = split_args[0] + new = orig.replace('_', '-') + if orig != new: + split_args[0] = new + argv[index] = "=".join(split_args) + # Save both for later so we can throw an error about dupes + processed[new].add(orig) + overlap: list[str] = [] + for new, old in processed.items(): + if len(old) > 1: + overlap.extend(old) + if overlap: + raise exceptions.ConfigException( + "The following options were given: '{options}' which contain " + "duplicates except that one has _ and one has -. There is " + "no sane way for us to know what you're doing. Remove the " + "duplicate option and try again".format(options=','.join(overlap)) + ) + + +class OpenStackConfig: + # These two attribute are to allow os-client-config to plumb in its + # local versions for backwards compat. + # They should not be used by anyone else. + _cloud_region_class = cloud_region.CloudRegion + _defaults_module = defaults + + #: config_filename is the filename that configuration was loaded from, if + #: any. + config_filename: str | None + #: secure_config_filename is the filename that secure configuration was + #: loaded from, if any. + secure_config_filename: str | None + #: cloud_config contains the combined loaded configuration. + cloud_config: dict[str, ty.Any] + + def __init__( + self, + config_files: list[str] | None = None, + vendor_files: list[str] | None = None, + override_defaults: dict[str, ty.Any] | None = None, + force_ipv4: bool | None = None, + envvar_prefix: str | None = None, + secure_files: list[str] | None = None, + pw_func: cloud_region._PasswordCallback | None = None, + session_constructor: type[session.Session] | None = None, + app_name: str | None = None, + app_version: str | None = None, + load_yaml_config: bool = True, + load_envvars: bool = True, + statsd_host: str | None = None, + statsd_port: str | None = None, + statsd_prefix: str | None = None, + influxdb_config: dict[str, ty.Any] | None = None, + ): + self.log = _log.setup_logging('openstack.config') + self._session_constructor = session_constructor + self._app_name = app_name + self._app_version = app_version + self._load_envvars = load_envvars + + if load_yaml_config: + # "if config_files" is not sufficient to process empty list + if config_files is not None: + self._config_files = config_files + else: + self._config_files = CONFIG_FILES + if secure_files is not None: + self._secure_files = secure_files + else: + self._secure_files = SECURE_FILES + if vendor_files is not None: + self._vendor_files = vendor_files + else: + self._vendor_files = VENDOR_FILES + else: + self._config_files = [] + self._secure_files = [] + self._vendor_files = [] + + config_file_override = self._get_envvar('OS_CLIENT_CONFIG_FILE') + if config_file_override: + self._config_files.insert(0, config_file_override) + + secure_file_override = self._get_envvar('OS_CLIENT_SECURE_FILE') + if secure_file_override: + self._secure_files.insert(0, secure_file_override) + + self.defaults = self._defaults_module.get_defaults() + if override_defaults: + self.defaults.update(override_defaults) + + # First, use a config file if it exists where expected + config_filename, cloud_config = self._load_config_file() + if config_filename and cloud_config: + self._validate_config_file(config_filename, cloud_config) + + secure_config_filename, secure_config = self._load_secure_file() + if secure_config_filename and secure_config: + self._validate_config_file(secure_config_filename, secure_config) + cloud_config = _util.merge_clouds( + cloud_config or {}, secure_config + ) + + self.config_filename = config_filename + self.secure_config_filename = secure_config_filename + if not cloud_config: + self.cloud_config = {'clouds': {}} + else: + self.cloud_config = cloud_config + if 'clouds' not in self.cloud_config: + self.cloud_config['clouds'] = {} + + # Save the other config + self.extra_config = copy.deepcopy(self.cloud_config) + self.extra_config.pop('clouds', None) + + # Grab ipv6 preference settings from env + client_config = self.cloud_config.get('client', {}) + + if force_ipv4 is not None: + # If it's passed in to the constructor, honor it. + self.force_ipv4 = force_ipv4 + else: + # Get the backwards compat value + prefer_ipv6 = get_boolean( + self._get_envvar( + 'OS_PREFER_IPV6', + client_config.get( + 'prefer_ipv6', client_config.get('prefer-ipv6', True) + ), + ) + ) + force_ipv4 = get_boolean( + self._get_envvar( + 'OS_FORCE_IPV4', + client_config.get( + 'force_ipv4', client_config.get('broken-ipv6', False) + ), + ) + ) + + self.force_ipv4 = force_ipv4 + if not prefer_ipv6: + # this will only be false if someone set it explicitly + # honor their wishes + self.force_ipv4 = True + + # Next, process environment variables and add them to the mix + self.envvar_key = self._get_envvar('OS_CLOUD_NAME', 'envvars') + if self.envvar_key in self.cloud_config['clouds']: + raise exceptions.ConfigException( + f'{self.config_filename!r} defines a cloud named ' + f'{self.envvar_key!r}, but OS_CLOUD_NAME is also set to ' + f'{self.envvar_key!r}. ' + f'Please rename either your environment-based cloud, ' + f'or one of your file-based clouds.' + ) + + self.default_cloud = self._get_envvar('OS_CLOUD') + + if load_envvars: + envvars = self._get_os_environ(envvar_prefix=envvar_prefix) + if envvars: + self.cloud_config['clouds'][self.envvar_key] = envvars + if not self.default_cloud: + self.default_cloud = self.envvar_key + + if not self.default_cloud and self.cloud_config['clouds']: + if len(self.cloud_config['clouds'].keys()) == 1: + # If there is only one cloud just use it. This matches envvars + # behavior and allows for much less typing. + # TODO(mordred) allow someone to mark a cloud as "default" in + # clouds.yaml. + # The next/iter thing is for python3 compat where dict.keys + # returns an iterator but in python2 it's a list. + self.default_cloud = next( + iter(self.cloud_config['clouds'].keys()) + ) + + # Finally, fall through and make a cloud that starts with defaults + # because we need somewhere to put arguments, and there are neither + # config files or env vars + if not self.cloud_config['clouds']: + self.cloud_config = dict(clouds=dict(defaults=dict(self.defaults))) + self.default_cloud = 'defaults' + + self._cache_auth = False + self._cache_expiration_time = 0 + self._cache_path = CACHE_PATH + self._cache_class = 'dogpile.cache.null' + self._cache_arguments: dict[str, ty.Any] = {} + self._cache_expirations: dict[str, int] = {} + self._influxdb_config = {} + if 'cache' in self.cloud_config: + cache_settings = _util.normalize_keys(self.cloud_config['cache']) + + self._cache_auth = get_boolean( + cache_settings.get('auth', self._cache_auth) + ) + + # expiration_time used to be 'max_age' but the dogpile setting + # is expiration_time. Support max_age for backwards compat. + self._cache_expiration_time = cache_settings.get( + 'expiration_time', + cache_settings.get('max_age', self._cache_expiration_time), + ) + + # If cache class is given, use that. If not, but if cache time + # is given, default to memory. Otherwise, default to nothing. + if self._cache_expiration_time: + self._cache_class = 'dogpile.cache.memory' + self._cache_class = self.cloud_config['cache'].get( + 'class', self._cache_class + ) + + self._cache_path = os.path.expanduser( + cache_settings.get('path', self._cache_path) + ) + self._cache_arguments = cache_settings.get( + 'arguments', self._cache_arguments + ) + self._cache_expirations = cache_settings.get( + 'expiration', self._cache_expirations + ) + + if load_yaml_config: + metrics_config = self.cloud_config.get('metrics', {}) + statsd_config = metrics_config.get('statsd', {}) + statsd_host = statsd_host or statsd_config.get('host') + statsd_port = statsd_port or statsd_config.get('port') + statsd_prefix = statsd_prefix or statsd_config.get('prefix') + + influxdb_cfg = metrics_config.get('influxdb', {}) + # Parse InfluxDB configuration + if not influxdb_config: + influxdb_config = influxdb_cfg + else: + influxdb_config.update(influxdb_cfg) + + if influxdb_config: + # NOTE(stephenfin): defer the warning to here so we catch config in + # both clouds.yaml and directly passed in + warnings.warn( + 'Support for InfluxDB requires the influxdb library which ' + 'only supports InfluxDB 1.x and is deprecated. As a result, ' + 'influxdb is also deprecated and will be removed in a future ' + 'release.', + os_warnings.RemovedInSDK60Warning, + ) + + config = {} + if 'use_udp' in influxdb_config: + use_udp = influxdb_config['use_udp'] + if isinstance(use_udp, str): + use_udp = use_udp.lower() in ('true', 'yes', '1') + elif not isinstance(use_udp, bool): + use_udp = False + self.log.warning( + 'InfluxDB.use_udp value type is not ' + 'supported. Use one of ' + '[true|false|yes|no|1|0]' + ) + config['use_udp'] = use_udp + for key in [ + 'host', + 'port', + 'username', + 'password', + 'database', + 'measurement', + 'timeout', + ]: + if key in influxdb_config: + config[key] = influxdb_config[key] + self._influxdb_config = config + + if load_envvars: + statsd_host = statsd_host or os.environ.get('STATSD_HOST') + statsd_port = statsd_port or os.environ.get('STATSD_PORT') + statsd_prefix = statsd_prefix or os.environ.get('STATSD_PREFIX') + + self._statsd_host = statsd_host + self._statsd_port = statsd_port + self._statsd_prefix = statsd_prefix + + # Flag location to hold the peeked value of an argparse timeout value + self._argv_timeout = False + + # Save the password callback + # password = self._pw_callback(prompt="Password: ") + self._pw_callback = pw_func + + def _get_os_environ( + self, envvar_prefix: str | None = None + ) -> dict[str, ty.Any] | None: + ret = self._defaults_module.get_defaults() + if not envvar_prefix: + # This makes the or below be OS_ or OS_ which is a no-op + envvar_prefix = 'OS_' + environkeys = [ + k + for k in os.environ.keys() + if (k.startswith('OS_') or k.startswith(envvar_prefix)) + and not k.startswith('OS_TEST') # infra CI var + and not k.startswith('OS_STD') # oslotest var + and not k.startswith('OS_LOG') # oslotest var + ] + for k in environkeys: + newkey = k.split('_', 1)[-1].lower() + ret[newkey] = os.environ[k] + # If the only environ keys are selectors or behavior modification, + # don't return anything + selectors = { + 'OS_CLOUD', + 'OS_REGION_NAME', + 'OS_CLIENT_CONFIG_FILE', + 'OS_CLIENT_SECURE_FILE', + 'OS_CLOUD_NAME', + } + if set(environkeys) - selectors: + return ret + return None + + def _get_envvar(self, key: str, default: str | None = None) -> str | None: + if not self._load_envvars: + return default + return os.environ.get(key, default) + + def get_extra_config( + self, key: str, defaults: dict[str, ty.Any] | None = None + ) -> dict[str, ty.Any]: + """Fetch an arbitrary extra chunk of config, laying in defaults. + + :param string key: name of the config section to fetch + :param dict defaults: (optional) default values to merge under the + found config + """ + defaults = _util.normalize_keys(defaults or {}) + assert defaults is not None # narrow type + if not key: + return defaults + return _util.merge_clouds( + defaults, _util.normalize_keys(self.cloud_config.get(key, {})) + ) + + def _load_config_file( + self, + ) -> tuple[str, dict[str, ty.Any]] | tuple[None, None]: + return self._load_yaml_json_file(self._config_files) + + def _load_secure_file( + self, + ) -> tuple[str, dict[str, ty.Any]] | tuple[None, None]: + return self._load_yaml_json_file(self._secure_files) + + def _load_vendor_file( + self, + ) -> tuple[str, dict[str, ty.Any]] | tuple[None, None]: + return self._load_yaml_json_file(self._vendor_files) + + def _load_yaml_json_file( + self, filelist: list[str] + ) -> tuple[str, dict[str, ty.Any]] | tuple[None, None]: + for path in filelist: + if os.path.exists(path): + try: + with open(path) as f: + if path.endswith('json'): + return path, json.load(f) + else: + return path, yaml.safe_load(f) + except OSError as e: + if e.errno == errno.EACCES: + # Can't access file so let's continue to the next + # file + continue + return (None, None) + + def _validate_config_file(self, path: str, data: ty.Any) -> bool: + """Validate config file contains a clouds entry. + + All config files should have a 'clouds' key at a minimum. + """ + if not isinstance(data, dict): + raise exceptions.ConfigException( + f'Configuration file {path} is empty or not a valid mapping' + ) + + if 'clouds' not in data: + # TODO(stephenfin): This should probably be an error at some point + self.log.warning( + "Configuration file %s does not contain a 'clouds' key", path + ) + return False + + return True + + def _expand_region_name(self, region_name: str) -> dict[str, ty.Any]: + return {'name': region_name, 'values': {}} + + def _expand_regions( + self, regions: list[str | dict[str, ty.Any]] + ) -> list[dict[str, ty.Any]]: + ret = [] + for region in regions: + if isinstance(region, dict): + # i.e. must have name key, and only name,values keys + if 'name' not in region or not {'name', 'values'} >= set( + region + ): + raise exceptions.ConfigException( + f'Invalid region entry at: {region}' + ) + if 'values' not in region: + region['values'] = {} + ret.append(copy.deepcopy(region)) + else: + ret.append(self._expand_region_name(region)) + return ret + + def _get_regions(self, cloud: str) -> list[dict[str, ty.Any]]: + if cloud not in self.cloud_config['clouds']: + return [self._expand_region_name('')] + regions = self._get_known_regions(cloud) + if not regions: + # We don't know of any regions use a workable default. + regions = [self._expand_region_name('')] + return regions + + def _get_known_regions(self, cloud: str) -> list[dict[str, ty.Any]]: + config = _util.normalize_keys(self.cloud_config['clouds'][cloud]) + if 'regions' in config: + return self._expand_regions(config['regions']) + elif 'region_name' in config: + if isinstance(config['region_name'], list): + regions = config['region_name'] + else: + regions = config['region_name'].split(',') + if len(regions) > 1: + warnings.warn( + f"Comma separated lists in region_name are deprecated. " + f"Please use a yaml list in the regions " + f"parameter in {self.config_filename} instead.", + os_warnings.OpenStackDeprecationWarning, + ) + return self._expand_regions(regions) + else: + # crappit. we don't have a region defined. + new_cloud: dict[str, ty.Any] = {} + our_cloud = self.cloud_config['clouds'].get(cloud, {}) + self._expand_vendor_profile(cloud, new_cloud, our_cloud) + if new_cloud.get('regions'): + return self._expand_regions(new_cloud['regions']) + elif new_cloud.get('region_name'): + return [self._expand_region_name(new_cloud['region_name'])] + + return [] + + def _get_region( + self, cloud: str | None = None, region_name: str = '' + ) -> dict[str, ty.Any]: + if region_name is None: + region_name = '' + + if not cloud: + return self._expand_region_name(region_name) + + regions = self._get_known_regions(cloud) + if not regions: + return self._expand_region_name(region_name) + + if not region_name: + return regions[0] + + for region in regions: + if region['name'] == region_name: + return region + + raise exceptions.ConfigException( + 'Region {region_name} is not a valid region name for cloud ' + '{cloud}. Valid choices are {region_list}. Please note that ' + 'region names are case sensitive.'.format( + region_name=region_name, + region_list=','.join([r['name'] for r in regions]), + cloud=cloud, + ) + ) + + def get_cloud_names(self) -> list[str]: + return list(self.cloud_config['clouds'].keys()) + + def _get_base_cloud_config( + self, name: str | None, profile: str | None = None + ) -> dict[str, ty.Any]: + cloud = {} + + # Only validate cloud name if one was given + if name and name not in self.cloud_config['clouds']: + raise exceptions.ConfigException(f"Cloud {name} was not found.") + + our_cloud = self.cloud_config['clouds'].get(name, dict()) + if profile: + our_cloud['profile'] = profile + + # Get the defaults + cloud.update(self.defaults) + self._expand_vendor_profile(name, cloud, our_cloud) + + if 'auth' not in cloud: + cloud['auth'] = {} + + _auth_update(cloud, our_cloud) + cloud.pop('cloud', None) + + return cloud + + def _expand_vendor_profile( + self, + name: str | None, + cloud: dict[str, ty.Any], + our_cloud: dict[str, ty.Any], + ) -> None: + # Expand a profile if it exists. 'cloud' is an old confusing name + # for this. + profile_name = our_cloud.get('profile', our_cloud.get('cloud', None)) + if not profile_name or profile_name == self.envvar_key: + return + if 'cloud' in our_cloud: + warnings.warn( + f"{self.config_filename} uses the keyword 'cloud' to " + f"reference a known vendor profile. This has been deprecated " + f"in favor of the 'profile' keyword.", + os_warnings.OpenStackDeprecationWarning, + ) + + _, vendor_file = self._load_vendor_file() + if ( + vendor_file + and 'public-clouds' in vendor_file + and profile_name in vendor_file['public-clouds'] + ): + _auth_update(cloud, vendor_file['public-clouds'][profile_name]) + else: + profile_data = vendors.get_profile(profile_name) + if profile_data: + nested_profile = profile_data.pop('profile', None) + if nested_profile: + nested_profile_data = vendors.get_profile(nested_profile) + if nested_profile_data: + profile_data = nested_profile_data + status = profile_data.pop('status', 'active') + message = profile_data.pop('message', '') + if status == 'deprecated': + warnings.warn( + f"{profile_name} is deprecated: {message}", + os_warnings.OpenStackDeprecationWarning, + ) + elif status == 'shutdown': + raise exceptions.ConfigException( + f"{profile_name} references a cloud that no longer " + f"exists: {message}" + ) + _auth_update(cloud, profile_data) + else: + # Can't find the requested vendor config, go about business + warnings.warn( + f"Couldn't find the vendor profile {profile_name} for " + f"the cloud {name}", + os_warnings.ConfigurationWarning, + ) + + def _project_scoped(self, cloud: dict[str, ty.Any]) -> bool: + return ( + 'project_id' in cloud + or 'project_name' in cloud + or 'project_id' in cloud['auth'] + or 'project_name' in cloud['auth'] + ) + + def _validate_networks( + self, networks: list[dict[str, ty.Any]], key: str + ) -> None: + value = None + for net in networks: + if value and net[key]: + raise exceptions.ConfigException( + "Duplicate network entries for {key}: {net1} and {net2}. " + "Only one network can be flagged with {key}".format( + key=key, net1=value['name'], net2=net['name'] + ) + ) + if not value and net[key]: + value = net + + def _fix_backwards_networks( + self, cloud: dict[str, ty.Any] + ) -> dict[str, ty.Any]: + # Leave the external_network and internal_network keys in the + # dict because consuming code might be expecting them. + networks = [] + # Normalize existing network entries + for net in cloud.get('networks', []): + name = net.get('name') + if not name: + raise exceptions.ConfigException( + 'Entry in network list is missing required field "name".' + ) + network = dict( + name=name, + routes_externally=get_boolean(net.get('routes_externally')), + nat_source=get_boolean(net.get('nat_source')), + nat_destination=get_boolean(net.get('nat_destination')), + default_interface=get_boolean(net.get('default_interface')), + ) + # routes_ipv4_externally defaults to the value of routes_externally + network['routes_ipv4_externally'] = get_boolean( + net.get('routes_ipv4_externally', network['routes_externally']) + ) + # routes_ipv6_externally defaults to the value of routes_externally + network['routes_ipv6_externally'] = get_boolean( + net.get('routes_ipv6_externally', network['routes_externally']) + ) + networks.append(network) + + for key in ('external_network', 'internal_network'): + external = key.startswith('external') + if key in cloud and 'networks' in cloud: + raise exceptions.ConfigException( + f"Both {key} and networks were specified in the config. " + f"Please remove {key} from the config and use the network " + f"list to configure network behavior." + ) + if key in cloud: + warnings.warn( + f"{key} is deprecated. Please replace with an entry in " + f"a dict inside of the networks list with name: " + f"{cloud[key]} and routes_externally: {external}", + os_warnings.OpenStackDeprecationWarning, + ) + networks.append( + dict( + name=cloud[key], + routes_externally=external, + nat_destination=not external, + default_interface=external, + ) + ) + + # Validate that we don't have duplicates + self._validate_networks(networks, 'nat_destination') + self._validate_networks(networks, 'default_interface') + + cloud['networks'] = networks + return cloud + + def _handle_domain_id(self, cloud: dict[str, ty.Any]) -> dict[str, ty.Any]: + # Allow people to just specify domain once if it's the same + mappings = { + 'domain_id': ('user_domain_id', 'project_domain_id'), + 'domain_name': ('user_domain_name', 'project_domain_name'), + } + for target_key, possible_values in mappings.items(): + if not self._project_scoped(cloud): + if target_key in cloud and target_key not in cloud['auth']: + cloud['auth'][target_key] = cloud.pop(target_key) + continue + for key in possible_values: + if target_key in cloud['auth'] and key not in cloud['auth']: + cloud['auth'][key] = cloud['auth'][target_key] + cloud.pop(target_key, None) + cloud['auth'].pop(target_key, None) + return cloud + + def _fix_backwards_auth( + self, cloud: dict[str, ty.Any] + ) -> dict[str, ty.Any]: + mappings = { + 'domain_id': ('domain_id', 'domain-id'), + 'domain_name': ('domain_name', 'domain-name'), + 'user_domain_id': ('user_domain_id', 'user-domain-id'), + 'user_domain_name': ('user_domain_name', 'user-domain-name'), + 'project_domain_id': ('project_domain_id', 'project-domain-id'), + 'project_domain_name': ( + 'project_domain_name', + 'project-domain-name', + ), + 'token': ('auth-token', 'auth_token', 'token'), + 'passcode': ('passcode',), + } + if cloud.get('auth_type', None) == 'v2password': + # If v2password is explcitly requested, this is to deal with old + # clouds. That's fine - we need to map settings in the opposite + # direction + mappings['tenant_id'] = ( + 'project_id', + 'project-id', + 'tenant_id', + 'tenant-id', + ) + mappings['tenant_name'] = ( + 'project_name', + 'project-name', + 'tenant_name', + 'tenant-name', + ) + else: + mappings['project_id'] = ( + 'tenant_id', + 'tenant-id', + 'project_id', + 'project-id', + ) + mappings['project_name'] = ( + 'tenant_name', + 'tenant-name', + 'project_name', + 'project-name', + ) + for target_key, possible_values in mappings.items(): + target = None + for key in possible_values: + if key in cloud['auth']: + target = str(cloud['auth'][key]) + del cloud['auth'][key] + # Prefer values NOT from the 'auth' section + # as they may contain cli or environment overrides. + # See story 2010784 for context. + if key in cloud: + target = str(cloud[key]) + del cloud[key] + if target: + cloud['auth'][target_key] = target + return cloud + + def _fix_backwards_auth_plugin( + self, cloud: dict[str, ty.Any] + ) -> dict[str, ty.Any]: + # Do the lists backwards so that auth_type is the ultimate winner + mappings = { + 'auth_type': ('auth_plugin', 'auth_type'), + } + for target_key, possible_values in mappings.items(): + target = None + for key in possible_values: + if key in cloud: + target = cloud[key] + del cloud[key] + cloud[target_key] = target + # Because we force alignment to v3 nouns, we want to force + # use of the auth plugin that can do auto-selection and dealing + # with that based on auth parameters. v2password is basically + # completely broken + return cloud + + def register_argparse_arguments( + self, + parser: argparse_mod.ArgumentParser, + argv: list[str], + service_keys: list[str] | None = None, + ) -> None: + """Register all of the common argparse options needed. + + Given an argparse parser, register the keystoneauth Session arguments, + the keystoneauth Auth Plugin Options and os-cloud. Also, peek in the + argv to see if all of the auth plugin options should be registered + or merely the ones already configured. + + :param argparse.ArgumentParser: parser to attach argparse options to + :param argv: the arguments provided to the application + :param string service_keys: Service or list of services this argparse + should be specialized for, if known. + The first item in the list will be used + as the default value for service_type + (optional) + + :raises exceptions.ConfigException if an invalid auth-type is requested + """ + + if service_keys is None: + service_keys = [] + + # Fix argv in place - mapping any keys with embedded _ in them to - + _fix_argv(argv) + + local_parser = argparse_mod.ArgumentParser(add_help=False) + + for p in (parser, local_parser): + p.add_argument( + '--os-cloud', + metavar='', + default=self._get_envvar('OS_CLOUD', None), + help='Named cloud to connect to', + ) + + # we need to peek to see if timeout was actually passed, since + # the keystoneauth declaration of it has a default, which means + # we have no clue if the value we get is from the ksa default + # for from the user passing it explicitly. We'll stash it for later + local_parser.add_argument('--timeout', metavar='') + + # We need for get_one to be able to peek at whether a token + # was passed so that we can swap the default from password to + # token if it was. And we need to also peek for --os-auth-token + # for novaclient backwards compat + local_parser.add_argument('--os-token') + local_parser.add_argument('--os-auth-token') + + # Peek into the future and see if we have an auth-type set in + # config AND a cloud set, so that we know which command line + # arguments to register and show to the user (the user may want + # to say something like: + # openstack --os-cloud=foo --os-oidctoken=bar + # although I think that user is the cause of my personal pain + options, _args = local_parser.parse_known_args(argv) + if options.timeout: + self._argv_timeout = True + + # validate = False because we're not _actually_ loading here + # we're only peeking, so it's the wrong time to assert that + # the rest of the arguments given are invalid for the plugin + # chosen (for instance, --help may be requested, so that the + # user can see what options he may want to give + cloud_region = self.get_one(argparse=options, validate=False) + default_auth_type = cloud_region.config['auth_type'] + + try: + loading.register_auth_argparse_arguments( + parser, argv, default=default_auth_type + ) + except Exception: + # Hidiing the keystoneauth exception because we're not actually + # loading the auth plugin at this point, so the error message + # from it doesn't actually make sense to os-client-config users + options, _args = parser.parse_known_args(argv) + plugin_names = loading.get_available_plugin_names() + raise exceptions.ConfigException( + "An invalid auth-type was specified: {auth_type}. " + "Valid choices are: {plugin_names}.".format( + auth_type=options.os_auth_type, + plugin_names=",".join(plugin_names), + ) + ) + + if service_keys: + primary_service = service_keys[0] + else: + primary_service = None + loading.register_session_argparse_arguments(parser) + adapter.register_adapter_argparse_arguments( + parser, service_type=primary_service + ) + for service_key in service_keys: + # legacy clients have un-prefixed api-version options + parser.add_argument( + '--{service_key}-api-version'.format( + service_key=service_key.replace('_', '-') + ), + help=argparse_mod.SUPPRESS, + ) + adapter.register_service_adapter_argparse_arguments( + parser, service_type=service_key + ) + + # Backwards compat options for legacy clients + parser.add_argument('--http-timeout', help=argparse_mod.SUPPRESS) + parser.add_argument('--os-endpoint-type', help=argparse_mod.SUPPRESS) + parser.add_argument('--endpoint-type', help=argparse_mod.SUPPRESS) + + def _fix_backwards_interface( + self, cloud: dict[str, ty.Any] + ) -> dict[str, ty.Any]: + new_cloud = {} + for key in cloud.keys(): + if key.endswith('endpoint_type'): + target_key = key.replace('endpoint_type', 'interface') + else: + target_key = key + new_cloud[target_key] = cloud[key] + return new_cloud + + def _fix_backwards_api_timeout( + self, cloud: dict[str, ty.Any] + ) -> dict[str, ty.Any]: + new_cloud = {} + # requests can only have one timeout, which means that in a single + # cloud there is no point in different timeout values. However, + # for some reason many of the legacy clients decided to shove their + # service name in to the arg name for reasons surpassin sanity. If + # we find any values that are not api_timeout, overwrite api_timeout + # with the value + service_timeout = None + for key in cloud.keys(): + if key.endswith('timeout') and not ( + key == 'timeout' or key == 'api_timeout' + ): + service_timeout = cloud[key] + else: + new_cloud[key] = cloud[key] + if service_timeout is not None: + new_cloud['api_timeout'] = service_timeout + # The common argparse arg from keystoneauth is called timeout, but + # os-client-config expects it to be called api_timeout + if self._argv_timeout: + if new_cloud.get('timeout'): + new_cloud['api_timeout'] = new_cloud.pop('timeout') + return new_cloud + + def get_all(self) -> list[cloud_region.CloudRegion]: + clouds = [] + + for cloud in self.get_cloud_names(): + for region in self._get_regions(cloud): + if region: + clouds.append( + self.get_one(cloud, region_name=region['name']) + ) + return clouds + + def get_all_clouds(self) -> list[cloud_region.CloudRegion]: + warnings.warn( + "The 'get_all_clouds' method is a deprecated alias for " + "'get_clouds' and will be removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + return self.get_all() + + def _fix_args( + self, + args: dict[str, ty.Any] | None = None, + argparse: argparse_mod.Namespace | None = None, + ) -> dict[str, ty.Any]: + """Massage the passed-in options + + Replace - with _ and strip os_ prefixes. + + Convert an argparse Namespace object to a dict, removing values + that are either None or ''. + """ + if not args: + args = {} + + if argparse: + # Convert the passed-in Namespace + o_dict = vars(argparse) + parsed_args = dict() + for k in o_dict: + if o_dict[k] is not None and o_dict[k] != '': + parsed_args[k] = o_dict[k] + args.update(parsed_args) + + os_args = {} + new_args = {} + for key, val in iter(args.items()): + if isinstance(args[key], dict): + # dive into the auth dict + new_args[key] = self._fix_args(args[key]) + continue + + key = key.replace('-', '_') + if key.startswith('os_'): + os_args[key[3:]] = val + else: + new_args[key] = val + new_args.update(os_args) + return new_args + + def _find_winning_auth_value( + self, opt: 'opts.Opt', config: dict[str, dict[str, ty.Any]] + ) -> dict[str, ty.Any] | None: + opt_name = opt.name.replace('-', '_') + if opt_name in config: + return config[opt_name] + else: + deprecated = getattr( + opt, 'deprecated', getattr(opt, 'deprecated_opts', []) + ) + for d_opt in deprecated: + d_opt_name = d_opt.name.replace('-', '_') + if d_opt_name in config: + return config[d_opt_name] + + return None + + def auth_config_hook(self, config: dict[str, ty.Any]) -> dict[str, ty.Any]: + """Allow examination of config values before loading auth plugin + + OpenStackClient will override this to perform additional checks + on auth_type. + """ + return config + + def _get_auth_loader( + self, config: dict[str, ty.Any] + ) -> loading.BaseLoader[ty.Any]: + # Use the 'none' plugin for variants of None specified, + # since it does not look up endpoints or tokens but rather + # does a passthrough. This is useful for things like Ironic + # that have a keystoneless operational mode, but means we're + # still dealing with a keystoneauth Session object, so all the + # _other_ things (SSL arg handling, timeout) all work consistently + if config['auth_type'] in (None, "None", ''): + config['auth_type'] = 'none' + elif config['auth_type'] == 'token_endpoint': + # Humans have been trained to use a thing called token_endpoint + # That it does not exist in keystoneauth is irrelvant- it not + # doing what they want causes them sorrow. + config['auth_type'] = 'admin_token' + + loader: loading.BaseLoader[ty.Any] = loading.get_plugin_loader( + config['auth_type'] + ) + + # As the name would suggest, v3multifactor uses multiple factors for + # authentication. As a result, we need to register the configuration + # options for each required auth method. Normally, this is handled by + # the 'MultiFactor.load_from_options' method but there doesn't appear + # to be a way to "register" the auth methods without actually loading + # the plugin. As a result, if we encounter this auth type then we need + # to do this registration of extra options manually. + # FIXME(stephenfin): We need to provide a mechanism to extend the + # options in keystoneauth1.loading._plugins.identity.v3.MultiAuth + # without calling 'load_from_options'. + if config['auth_type'] == 'v3multifactor': + if ty.TYPE_CHECKING: + # narrow types + assert isinstance(loader, v3_loaders.MultiFactor) + # We use '.get' since we can't be sure this key is set yet - + # validation happens later, in _validate_auth + loader._methods = config.get('auth_methods') + + return loader + + def _validate_auth( + self, config: dict[str, ty.Any], loader: loading.BaseLoader[ty.Any] + ) -> dict[str, ty.Any]: + # May throw a keystoneauth1.exceptions.NoMatchingPlugin + + plugin_options = loader.get_options() + + for p_opt in plugin_options: + # if it's in config.auth, win, kill it from config dict + # if it's in config and not in config.auth, move it + # deprecated loses to current + # provided beats default, deprecated or not + winning_value = self._find_winning_auth_value( + p_opt, + config['auth'], + ) + if not winning_value: + winning_value = self._find_winning_auth_value( + p_opt, + config, + ) + + config = self._clean_up_after_ourselves( + config, + p_opt, + winning_value, + ) + + if winning_value: + # Prefer the plugin configuration dest value if the value's key + # is marked as deprecated. + if p_opt.dest is None: + good_name = p_opt.name.replace('-', '_') + config['auth'][good_name] = winning_value + else: + config['auth'][p_opt.dest] = winning_value + + # See if this needs a prompting + config = self.option_prompt(config, p_opt) + + return config + + def _validate_auth_correctly( + self, config: dict[str, ty.Any], loader: loading.BaseLoader[ty.Any] + ) -> dict[str, ty.Any]: + # May throw a keystoneauth1.exceptions.NoMatchingPlugin + + plugin_options = loader.get_options() + + for p_opt in plugin_options: + # if it's in config, win, move it and kill it from config dict + # if it's in config.auth but not in config it's good + # deprecated loses to current + # provided beats default, deprecated or not + winning_value = self._find_winning_auth_value( + p_opt, + config, + ) + if not winning_value: + winning_value = self._find_winning_auth_value( + p_opt, + config['auth'], + ) + + config = self._clean_up_after_ourselves( + config, + p_opt, + winning_value, + ) + + # See if this needs a prompting + config = self.option_prompt(config, p_opt) + + return config + + def option_prompt( + self, config: dict[str, ty.Any], p_opt: 'opts.Opt' + ) -> dict[str, ty.Any]: + """Prompt user for option that requires a value""" + if ( + getattr(p_opt, 'prompt', None) is not None + and p_opt.dest not in config['auth'] + and self._pw_callback is not None + ): + config['auth'][p_opt.dest] = self._pw_callback(p_opt.prompt) + return config + + def _clean_up_after_ourselves( + self, + config: dict[str, ty.Any], + p_opt: 'opts.Opt', + winning_value: ty.Any, + ) -> dict[str, ty.Any]: + # Clean up after ourselves + for opt in [p_opt.name] + [o.name for o in p_opt.deprecated]: + opt = opt.replace('-', '_') + config.pop(opt, None) + config['auth'].pop(opt, None) + + if winning_value: + # Prefer the plugin configuration dest value if the value's key + # is marked as depreciated. + if p_opt.dest is None: + config['auth'][p_opt.name.replace('-', '_')] = winning_value + else: + config['auth'][p_opt.dest] = winning_value + return config + + def _handle_value_types( + self, config: dict[str, ty.Any] + ) -> dict[str, ty.Any]: + for key in BOOL_KEYS: + if key in config: + if not isinstance(config[key], bool): + config[key] = get_boolean(config[key]) + + for key in CSV_KEYS: + if key in config: + if isinstance(config[key], str): + config[key] = config[key].split(',') + return config + + def magic_fixes(self, config: dict[str, ty.Any]) -> dict[str, ty.Any]: + """Perform the set of magic argument fixups""" + + # These backwards compat values are only set via argparse. If it's + # there, it's because it was passed in explicitly, and should win + config = self._fix_backwards_api_timeout(config) + config = self._fix_backwards_auth_plugin(config) + config = self._fix_backwards_auth(config) + config = self._fix_backwards_interface(config) + config = self._fix_backwards_networks(config) + config = self._handle_domain_id(config) + config = self._handle_value_types(config) + + # TODO(mordred): Special casing auth_url here. We should + # come back to this betterer later so that it's + # more generalized + if 'auth' in config and 'auth_url' in config['auth']: + config['auth']['auth_url'] = config['auth']['auth_url'].format( + **config + ) + + return config + + def get_one( + self, + cloud: str | None = None, + validate: bool = True, + argparse: argparse_mod.Namespace | None = None, + **kwargs: ty.Any, + ) -> cloud_region.CloudRegion: + """Retrieve a single CloudRegion and merge additional options + + :param string cloud: + The name of the configuration to load from clouds.yaml + :param boolean validate: + Validate the config. Setting this to False causes no auth plugin + to be created. It's really only useful for testing. + :param Namespace argparse: + An argparse Namespace object; allows direct passing in of + argparse options to be added to the cloud config. Values + of None and '' will be removed. + :param region_name: Name of the region of the cloud. + :param kwargs: Additional configuration options + + :returns: openstack.config.cloud_region.CloudRegion + :raises: keystoneauth1.exceptions.MissingRequiredOptions + on missing required auth parameters + """ + + profile = kwargs.pop('profile', None) + args = self._fix_args(kwargs, argparse=argparse) + + if cloud is None: + if 'cloud' in args: + cloud = args['cloud'] + else: + cloud = self.default_cloud + + config = self._get_base_cloud_config(cloud, profile) + + # Get region specific settings + if 'region_name' not in args: + args['region_name'] = '' + region = self._get_region(cloud=cloud, region_name=args['region_name']) + args['region_name'] = region['name'] + region_args = copy.deepcopy(region['values']) + + # Regions is a list that we can use to create a list of cloud/region + # objects. It does not belong in the single-cloud dict + config.pop('regions', None) + + # Can't just do update, because None values take over + for arg_list in region_args, args: + for key, val in iter(arg_list.items()): + if val is not None: + if key == 'auth' and config[key] is not None: + config[key] = _auth_update(config[key], val) + else: + config[key] = val + + config = self.magic_fixes(config) + config = _util.normalize_keys(config) + + # NOTE(dtroyer): OSC needs a hook into the auth args before the + # plugin is loaded in order to maintain backward- + # compatible behaviour + config = self.auth_config_hook(config) + + if validate: + loader = self._get_auth_loader(config) + config = self._validate_auth(config, loader) + auth_plugin = loader.load_from_options(**config['auth']) + else: + auth_plugin = None + + # If any of the defaults reference other values, we need to expand + for key, value in config.items(): + if hasattr(value, 'format') and key not in FORMAT_EXCLUSIONS: + config[key] = value.format(**config) + + force_ipv4 = config.pop('force_ipv4', self.force_ipv4) + prefer_ipv6 = config.pop('prefer_ipv6', True) + if not prefer_ipv6: + force_ipv4 = True + + # Override global metrics config with more specific per-cloud + # details. + metrics_config = config.get('metrics', {}) + statsd_config = metrics_config.get('statsd', {}) + statsd_host = statsd_config.get('host') or self._statsd_host + statsd_port = statsd_config.get('port') or self._statsd_port + statsd_prefix = statsd_config.get('prefix') or self._statsd_prefix + influxdb_config = metrics_config.get('influxdb', {}) + if influxdb_config: + merged_influxdb = copy.deepcopy(self._influxdb_config) + merged_influxdb.update(influxdb_config) + influxdb_config = merged_influxdb + else: + influxdb_config = self._influxdb_config + + if cloud is None: + cloud_name = '' + else: + cloud_name = str(cloud) + return self._cloud_region_class( + name=cloud_name, + region_name=config['region_name'], + config=config, + extra_config=self.extra_config, + force_ipv4=force_ipv4, + auth_plugin=auth_plugin, + openstack_config=self, + session_constructor=self._session_constructor, + app_name=self._app_name, + app_version=self._app_version, + cache_auth=self._cache_auth, + cache_expiration_time=self._cache_expiration_time, + cache_expirations=self._cache_expirations, + cache_path=self._cache_path, + cache_class=self._cache_class, + cache_arguments=self._cache_arguments, + password_callback=self._pw_callback, + statsd_host=statsd_host, + statsd_port=statsd_port, + statsd_prefix=statsd_prefix, + influxdb_config=influxdb_config, + ) + + def get_one_cloud( + self, + cloud: str | None = None, + validate: bool = True, + argparse: argparse_mod.Namespace | None = None, + **kwargs: ty.Any, + ) -> cloud_region.CloudRegion: + warnings.warn( + "The 'get_one_cloud' method is a deprecated alias for 'get_one' " + "and will be removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + return self.get_one( + cloud=cloud, + validate=validate, + argparse=argparse, + **kwargs, + ) + + def get_one_cloud_osc( + self, + cloud: str | None = None, + validate: bool = True, + argparse: argparse_mod.Namespace | None = None, + **kwargs: ty.Any, + ) -> cloud_region.CloudRegion: + """Retrieve a single CloudRegion and merge additional options + + :param string cloud: + The name of the configuration to load from clouds.yaml + :param boolean validate: + Validate the config. Setting this to False causes no auth plugin + to be created. It's really only useful for testing. + :param Namespace argparse: + An argparse Namespace object; allows direct passing in of + argparse options to be added to the cloud config. Values + of None and '' will be removed. + :param region_name: Name of the region of the cloud. + :param kwargs: Additional configuration options + + :raises: keystoneauth1.exceptions.MissingRequiredOptions + on missing required auth parameters + """ + + args = self._fix_args(kwargs, argparse=argparse) + + if cloud is None: + if 'cloud' in args: + cloud = args['cloud'] + else: + cloud = self.default_cloud + + config = self._get_base_cloud_config(cloud) + + # Get region specific settings + if 'region_name' not in args: + args['region_name'] = '' + region = self._get_region(cloud=cloud, region_name=args['region_name']) + args['region_name'] = region['name'] + region_args = copy.deepcopy(region['values']) + + # Regions is a list that we can use to create a list of cloud/region + # objects. It does not belong in the single-cloud dict + config.pop('regions', None) + + # Can't just do update, because None values take over + for arg_list in region_args, args: + for key, val in iter(arg_list.items()): + if val is not None: + if key == 'auth' and config[key] is not None: + config[key] = _auth_update(config[key], val) + else: + config[key] = val + + config = self.magic_fixes(config) + + # NOTE(dtroyer): OSC needs a hook into the auth args before the + # plugin is loaded in order to maintain backward- + # compatible behaviour + config = self.auth_config_hook(config) + + if validate: + loader = self._get_auth_loader(config) + config = self._validate_auth_correctly(config, loader) + auth_plugin = loader.load_from_options(**config['auth']) + else: + auth_plugin = None + + # If any of the defaults reference other values, we need to expand + for key, value in config.items(): + if hasattr(value, 'format') and key not in FORMAT_EXCLUSIONS: + config[key] = value.format(**config) + + force_ipv4 = config.pop('force_ipv4', self.force_ipv4) + prefer_ipv6 = config.pop('prefer_ipv6', True) + if not prefer_ipv6: + force_ipv4 = True + + if cloud is None: + cloud_name = '' + else: + cloud_name = str(cloud) + return self._cloud_region_class( + name=cloud_name, + region_name=config['region_name'], + config=config, + extra_config=self.extra_config, + force_ipv4=force_ipv4, + auth_plugin=auth_plugin, + openstack_config=self, + cache_auth=self._cache_auth, + cache_expiration_time=self._cache_expiration_time, + cache_expirations=self._cache_expirations, + cache_path=self._cache_path, + cache_class=self._cache_class, + cache_arguments=self._cache_arguments, + password_callback=self._pw_callback, + ) + + @staticmethod + def set_one_cloud( + config_file: str, + cloud: str, + set_config: dict[str, ty.Any] | None = None, + ) -> None: + """Set a single cloud configuration. + + :param string config_file: + The path to the config file to edit. If this file does not exist + it will be created. + :param string cloud: + The name of the configuration to save to clouds.yaml + :param dict set_config: Configuration options to be set + """ + + set_config = set_config or {} + cur_config = {} + try: + with open(config_file) as fh: + cur_config = yaml.safe_load(fh) + except OSError as e: + # Not no such file + if e.errno != 2: + raise + pass + + clouds_config = cur_config.get('clouds', {}) + cloud_config = _auth_update(clouds_config.get(cloud, {}), set_config) + clouds_config[cloud] = cloud_config + cur_config['clouds'] = clouds_config + + with open(config_file, 'w') as fh: + yaml.safe_dump(cur_config, fh, default_flow_style=False) + + +if __name__ == '__main__': + config = OpenStackConfig().get_all_clouds() + for cloud in config: + print_cloud = False + if len(sys.argv) == 1: + print_cloud = True + elif len(sys.argv) == 3 and ( + sys.argv[1] == cloud.name and sys.argv[2] == cloud.region + ): + print_cloud = True + elif len(sys.argv) == 2 and (sys.argv[1] == cloud.name): + print_cloud = True + + if print_cloud: + print(cloud.name, cloud.region, cloud.config) diff --git a/openstack/config/schema.json b/openstack/config/schema.json new file mode 100644 index 0000000000..ff07f0d999 --- /dev/null +++ b/openstack/config/schema.json @@ -0,0 +1,123 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "https://opendev.org/openstack/openstacksdk/raw/branch/master/openstack/config/schema.json", + "type": "object", + "properties": { + "auth_type": { + "name": "Auth Type", + "description": "Name of authentication plugin to be used", + "default": "password", + "type": "string" + }, + "disable_vendor_agent": { + "name": "Disable Vendor Agent Properties", + "description": "Image properties required to disable vendor agent", + "type": "object", + "properties": {} + }, + "floating_ip_source": { + "name": "Floating IP Source", + "description": "Which service provides Floating IPs", + "enum": [ "neutron", "nova", "None" ], + "default": "neutron" + }, + "image_api_use_tasks": { + "name": "Image Task API", + "description": "Does the cloud require the Image Task API", + "default": false, + "type": "boolean" + }, + "image_format": { + "name": "Image Format", + "description": "Format for uploaded Images", + "default": "qcow2", + "type": "string" + }, + "interface": { + "name": "API Interface", + "description": "Which API Interface should connections hit", + "default": "public", + "enum": [ "public", "internal", "admin" ] + }, + "secgroup_source": { + "name": "Security Group Source", + "description": "Which service provides security groups", + "default": "neutron", + "enum": [ "neutron", "nova", "None" ] + }, + "baremetal_api_version": { + "name": "Baremetal API Service Type", + "description": "Baremetal API Service Type", + "default": "1", + "type": "string" + }, + "block_storage_api_version": { + "name": "Block Storage API Version", + "description": "Block Storage API Version", + "default": "2", + "type": "string" + }, + "compute_api_version": { + "name": "Compute API Version", + "description": "Compute API Version", + "default": "2", + "type": "string" + }, + "database_api_version": { + "name": "Database API Version", + "description": "Database API Version", + "default": "1.0", + "type": "string" + }, + "dns_api_version": { + "name": "DNS API Version", + "description": "DNS API Version", + "default": "2", + "type": "string" + }, + "identity_api_version": { + "name": "Identity API Version", + "description": "Identity API Version", + "default": "2", + "type": "string" + }, + "image_api_version": { + "name": "Image API Version", + "description": "Image API Version", + "default": "1", + "type": "string" + }, + "network_api_version": { + "name": "Network API Version", + "description": "Network API Version", + "default": "2", + "type": "string" + }, + "object_store_api_version": { + "name": "Object Storage API Version", + "description": "Object Storage API Version", + "default": "1", + "type": "string" + }, + "volume_api_version": { + "name": "Volume API Version", + "description": "Volume API Version", + "default": "2", + "type": "string" + }, + "vendor_hook": { + "name": "Hook for vendor customization", + "description": "A possibility for a vendor to alter connection object", + "type": "string" + } + }, + "required": [ + "auth_type", + "disable_vendor_agent", + "floating_ip_source", + "image_api_use_tasks", + "image_format", + "interface", + "secgroup_source" + ] +} diff --git a/openstack/config/vendor-schema.json b/openstack/config/vendor-schema.json new file mode 100644 index 0000000000..be9ce5e6ed --- /dev/null +++ b/openstack/config/vendor-schema.json @@ -0,0 +1,233 @@ +{ + "$schema": "http://json-schema.org/draft-04/schema#", + "id": "https://opendev.org/openstack/openstacksdk/raw/branch/master/openstack/config/vendor-schema.json#", + "type": "object", + "properties": { + "name": { + "type": "string" + }, + "profile": { + "type": "object", + "properties": { + "auth": { + "type": "object", + "properties": { + "auth_url": { + "name": "Auth URL", + "description": "URL of the primary Keystone endpoint", + "type": "string" + } + } + }, + "auth_type": { + "name": "Auth Type", + "description": "Name of authentication plugin to be used", + "default": "password", + "type": "string" + }, + "disable_vendor_agent": { + "name": "Disable Vendor Agent Properties", + "description": "Image properties required to disable vendor agent", + "type": "object", + "properties": {} + }, + "floating_ip_source": { + "name": "Floating IP Source", + "description": "Which service provides Floating IPs", + "enum": [ "neutron", "nova", "None" ], + "default": "neutron" + }, + "image_api_use_tasks": { + "name": "Image Task API", + "description": "Does the cloud require the Image Task API", + "default": false, + "type": "boolean" + }, + "image_format": { + "name": "Image Format", + "description": "Format for uploaded Images", + "default": "qcow2", + "type": "string" + }, + "interface": { + "name": "API Interface", + "description": "Which API Interface should connections hit", + "default": "public", + "enum": [ "public", "internal", "admin" ] + }, + "message": { + "name": "Status message", + "description": "Optional message with information related to status", + "type": "string" + }, + "requires_floating_ip": { + "name": "Requires Floating IP", + "description": "Whether the cloud requires a floating IP to route traffic off of the cloud", + "default": null, + "type": ["boolean", "null"] + }, + "secgroup_source": { + "name": "Security Group Source", + "description": "Which service provides security groups", + "enum": [ "neutron", "nova", "None" ], + "default": "neutron" + }, + "status": { + "name": "Vendor status", + "description": "Status of the vendor's cloud", + "enum": [ "active", "deprecated", "shutdown"], + "default": "active" + }, + "compute_service_name": { + "name": "Compute API Service Name", + "description": "Compute API Service Name", + "type": "string" + }, + "database_service_name": { + "name": "Database API Service Name", + "description": "Database API Service Name", + "type": "string" + }, + "dns_service_name": { + "name": "DNS API Service Name", + "description": "DNS API Service Name", + "type": "string" + }, + "identity_service_name": { + "name": "Identity API Service Name", + "description": "Identity API Service Name", + "type": "string" + }, + "image_service_name": { + "name": "Image API Service Name", + "description": "Image API Service Name", + "type": "string" + }, + "volume_service_name": { + "name": "Volume API Service Name", + "description": "Volume API Service Name", + "type": "string" + }, + "network_service_name": { + "name": "Network API Service Name", + "description": "Network API Service Name", + "type": "string" + }, + "object_service_name": { + "name": "Object Storage API Service Name", + "description": "Object Storage API Service Name", + "type": "string" + }, + "baremetal_service_name": { + "name": "Baremetal API Service Name", + "description": "Baremetal API Service Name", + "type": "string" + }, + "compute_service_type": { + "name": "Compute API Service Type", + "description": "Compute API Service Type", + "type": "string" + }, + "database_service_type": { + "name": "Database API Service Type", + "description": "Database API Service Type", + "type": "string" + }, + "dns_service_type": { + "name": "DNS API Service Type", + "description": "DNS API Service Type", + "type": "string" + }, + "identity_service_type": { + "name": "Identity API Service Type", + "description": "Identity API Service Type", + "type": "string" + }, + "image_service_type": { + "name": "Image API Service Type", + "description": "Image API Service Type", + "type": "string" + }, + "volume_service_type": { + "name": "Volume API Service Type", + "description": "Volume API Service Type", + "type": "string" + }, + "network_service_type": { + "name": "Network API Service Type", + "description": "Network API Service Type", + "type": "string" + }, + "object_service_type": { + "name": "Object Storage API Service Type", + "description": "Object Storage API Service Type", + "type": "string" + }, + "baremetal_service_type": { + "name": "Baremetal API Service Type", + "description": "Baremetal API Service Type", + "type": "string" + }, + "block_storage_api_version": { + "name": "Block Storage API Version", + "description": "Block Storage API Version", + "type": "string" + }, + "compute_api_version": { + "name": "Compute API Version", + "description": "Compute API Version", + "type": "string" + }, + "database_api_version": { + "name": "Database API Version", + "description": "Database API Version", + "type": "string" + }, + "dns_api_version": { + "name": "DNS API Version", + "description": "DNS API Version", + "type": "string" + }, + "identity_api_version": { + "name": "Identity API Version", + "description": "Identity API Version", + "type": "string" + }, + "image_api_version": { + "name": "Image API Version", + "description": "Image API Version", + "type": "string" + }, + "volume_api_version": { + "name": "Volume API Version", + "description": "Volume API Version", + "type": "string" + }, + "network_api_version": { + "name": "Network API Version", + "description": "Network API Version", + "type": "string" + }, + "object_api_version": { + "name": "Object Storage API Version", + "description": "Object Storage API Version", + "type": "string" + }, + "baremetal_api_version": { + "name": "Baremetal API Version", + "description": "Baremetal API Version", + "type": "string" + }, + "vendor_hook": { + "name": "Hook for vendor customization", + "description": "A possibility for a vendor to alter connection object", + "type": "string" + } + } + } + }, + "required": [ + "name", + "profile" + ] +} diff --git a/openstack/config/vendors/__init__.py b/openstack/config/vendors/__init__.py new file mode 100644 index 0000000000..94c8d5b8c8 --- /dev/null +++ b/openstack/config/vendors/__init__.py @@ -0,0 +1,88 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import glob +import json +import os +import typing as ty +import urllib + +import requests +import yaml + +from openstack.config import _util +from openstack import exceptions + +_VENDORS_PATH = os.path.dirname(os.path.realpath(__file__)) +_VENDOR_DEFAULTS: dict[str, dict[str, ty.Any]] = {} +_WELL_KNOWN_PATH = "{scheme}://{netloc}/.well-known/openstack/api" + + +def _get_vendor_defaults() -> dict[str, dict[str, ty.Any]]: + global _VENDOR_DEFAULTS + + if not _VENDOR_DEFAULTS: + for vendor in glob.glob(os.path.join(_VENDORS_PATH, '*.yaml')): + with open(vendor) as f: + vendor_data = yaml.safe_load(f) + _VENDOR_DEFAULTS[vendor_data['name']] = vendor_data['profile'] + + for vendor in glob.glob(os.path.join(_VENDORS_PATH, '*.json')): + with open(vendor) as f: + vendor_data = json.load(f) + _VENDOR_DEFAULTS[vendor_data['name']] = vendor_data['profile'] + + return _VENDOR_DEFAULTS + + +def get_profile(profile_name: str) -> dict[str, ty.Any] | None: + vendor_defaults = _get_vendor_defaults() + if profile_name in vendor_defaults: + return vendor_defaults[profile_name].copy() + + profile_url = urllib.parse.urlparse(profile_name) + if not profile_url.netloc: + # This isn't a url, and we already don't have it. + return None + + well_known_url = _WELL_KNOWN_PATH.format( + scheme=profile_url.scheme, + netloc=profile_url.netloc, + ) + response = requests.get(well_known_url, timeout=10) + if not response.ok: + raise exceptions.ConfigException( + f"{profile_name} is a remote profile that could not be fetched: " + f"{response.status_code} {response.reason}" + ) + vendor_defaults[profile_name] = None + return + + vendor_data = response.json() + name = vendor_data['name'] + # Merge named and url cloud config, but make named config override the + # config from the cloud so that we can supply local overrides if needed. + profile = _util.merge_clouds( + vendor_data['profile'], vendor_defaults.get(name, {}) + ) + # If there is (or was) a profile listed in a named config profile, it + # might still be here. We just merged in content from a URL though, so + # pop the key to prevent doing it again in the future. + profile.pop('profile', None) + # Save the data under both names so we don't reprocess this, no matter + # how we're called. + vendor_defaults[profile_name] = profile + vendor_defaults[name] = profile + + return profile diff --git a/openstack/config/vendors/auro.json b/openstack/config/vendors/auro.json new file mode 100644 index 0000000000..21d832b2b6 --- /dev/null +++ b/openstack/config/vendors/auro.json @@ -0,0 +1,13 @@ +{ + "name": "auro", + "profile": { + "auth": { + "auth_url": "https://api.van2.auro.io:5000/v3", + "user_domain_name": "Default", + "project_domain_name": "Default" + }, + "identity_api_version": "3", + "region_name": "RegionOne", + "requires_floating_ip": true + } +} diff --git a/openstack/config/vendors/betacloud.json b/openstack/config/vendors/betacloud.json new file mode 100644 index 0000000000..a254abf394 --- /dev/null +++ b/openstack/config/vendors/betacloud.json @@ -0,0 +1,16 @@ +{ + "name": "betacloud", + "profile": { + "auth": { + "auth_url": "https://api-1.betacloud.de:5000" + }, + "regions": [ + "betacloud-1" + ], + "identity_api_version": "3", + "image_format": "raw", + "block_storage_api_version": "3", + "status": "shutdown", + "message": "betacloud.de has ceased business" + } +} diff --git a/openstack/config/vendors/binero.json b/openstack/config/vendors/binero.json new file mode 100644 index 0000000000..5482d7196e --- /dev/null +++ b/openstack/config/vendors/binero.json @@ -0,0 +1,13 @@ +{ + "name": "binero", + "profile": { + "auth": { + "auth_url": "https://auth.binero.cloud:5000/v3" + }, + "identity_api_version": "3", + "block_storage_api_version": "3", + "regions": [ + "europe-se-1" + ] + } +} diff --git a/openstack/config/vendors/bluebox.json b/openstack/config/vendors/bluebox.json new file mode 100644 index 0000000000..d50e5acd57 --- /dev/null +++ b/openstack/config/vendors/bluebox.json @@ -0,0 +1,7 @@ +{ + "name": "bluebox", + "profile": { + "block_storage_api_version": "1", + "region_name": "RegionOne" + } +} diff --git a/openstack/config/vendors/catalyst.json b/openstack/config/vendors/catalyst.json new file mode 100644 index 0000000000..511d3b768a --- /dev/null +++ b/openstack/config/vendors/catalyst.json @@ -0,0 +1,15 @@ +{ + "name": "catalyst", + "profile": { + "auth": { + "auth_url": "https://api.cloud.catalyst.net.nz:5000/v2.0" + }, + "regions": [ + "nz-por-1", + "nz_wlg_2" + ], + "image_api_version": "1", + "block_storage_api_version": "1", + "image_format": "raw" + } +} diff --git a/openstack/config/vendors/citycloud.json b/openstack/config/vendors/citycloud.json new file mode 100644 index 0000000000..a1c765195c --- /dev/null +++ b/openstack/config/vendors/citycloud.json @@ -0,0 +1,20 @@ +{ + "name": "citycloud", + "profile": { + "auth": { + "auth_url": "https://{region_name}.citycloud.com:5000/v3/" + }, + "regions": [ + "Buf1", + "Fra1", + "Sto2", + "Kna1", + "dx1", + "tky1" + ], + "requires_floating_ip": true, + "block_storage_api_version": "3", + "identity_api_version": "3", + "image_format": "raw" + } +} diff --git a/openstack/config/vendors/conoha.json b/openstack/config/vendors/conoha.json new file mode 100644 index 0000000000..5636f09552 --- /dev/null +++ b/openstack/config/vendors/conoha.json @@ -0,0 +1,14 @@ +{ + "name": "conoha", + "profile": { + "auth": { + "auth_url": "https://identity.{region_name}.conoha.io" + }, + "regions": [ + "sin1", + "sjc1", + "tyo1" + ], + "identity_api_version": "2" + } +} diff --git a/openstack/config/vendors/dreamcompute.json b/openstack/config/vendors/dreamcompute.json new file mode 100644 index 0000000000..8244cf77c7 --- /dev/null +++ b/openstack/config/vendors/dreamcompute.json @@ -0,0 +1,11 @@ +{ + "name": "dreamcompute", + "profile": { + "auth": { + "auth_url": "https://iad2.dream.io:5000" + }, + "identity_api_version": "3", + "region_name": "RegionOne", + "image_format": "raw" + } +} diff --git a/openstack/config/vendors/elastx.json b/openstack/config/vendors/elastx.json new file mode 100644 index 0000000000..b6d208b93d --- /dev/null +++ b/openstack/config/vendors/elastx.json @@ -0,0 +1,10 @@ +{ + "name": "elastx", + "profile": { + "auth": { + "auth_url": "https://ops.elastx.cloud:5000/v3" + }, + "identity_api_version": "3", + "region_name": "se-sto" + } +} diff --git a/openstack/config/vendors/entercloudsuite.json b/openstack/config/vendors/entercloudsuite.json new file mode 100644 index 0000000000..711db59d87 --- /dev/null +++ b/openstack/config/vendors/entercloudsuite.json @@ -0,0 +1,16 @@ +{ + "name": "entercloudsuite", + "profile": { + "auth": { + "auth_url": "https://api.entercloudsuite.com/" + }, + "identity_api_version": "3", + "image_api_version": "1", + "block_storage_api_version": "1", + "regions": [ + "it-mil1", + "nl-ams1", + "de-fra1" + ] + } +} diff --git a/openstack/config/vendors/fuga.json b/openstack/config/vendors/fuga.json new file mode 100644 index 0000000000..8f2da84218 --- /dev/null +++ b/openstack/config/vendors/fuga.json @@ -0,0 +1,17 @@ +{ + "name": "fuga", + "profile": { + "auth": { + "auth_url": "https://identity.api.fuga.io:5000", + "user_domain_name": "Default", + "project_domain_name": "Default" + }, + "regions": [ + "cystack" + ], + "identity_api_version": "3", + "block_storage_api_version": "3", + "status": "deprecated", + "message": "the API Endpoint is no longer responsive" + } +} diff --git a/openstack/config/vendors/ibmcloud.json b/openstack/config/vendors/ibmcloud.json new file mode 100644 index 0000000000..a0950dc2aa --- /dev/null +++ b/openstack/config/vendors/ibmcloud.json @@ -0,0 +1,15 @@ +{ + "name": "ibmcloud", + "profile": { + "auth": { + "auth_url": "https://identity.open.softlayer.com" + }, + "block_storage_api_version": "2", + "identity_api_version": "3", + "regions": [ + "london" + ], + "status": "shutdown", + "message": "the API Endpoint is no longer responsive" + } +} diff --git a/openstack/config/vendors/internap.json b/openstack/config/vendors/internap.json new file mode 100644 index 0000000000..55a0b3ad6d --- /dev/null +++ b/openstack/config/vendors/internap.json @@ -0,0 +1,19 @@ +{ + "name": "internap", + "profile": { + "auth": { + "auth_url": "https://identity.api.cloud.inap.com" + }, + "regions": [ + "ams01", + "da01", + "nyj01", + "sin01", + "sjc01" + ], + "identity_api_version": "3", + "floating_ip_source": "None", + "status": "shutdown", + "message": "Internap have been rebranded as HorizonIQ" + } +} diff --git a/openstack/config/vendors/limestonenetworks.yaml b/openstack/config/vendors/limestonenetworks.yaml new file mode 100644 index 0000000000..d5b359fe57 --- /dev/null +++ b/openstack/config/vendors/limestonenetworks.yaml @@ -0,0 +1,36 @@ +--- + +name: limestonenetworks +profile: + auth: + auth_url: https://auth.cloud.lstn.net:5000/v3 + regions: + - name: us-dfw-1 + values: + networks: + - name: Public Internet + routes_externally: true + default_interface: true + nat_source: true + - name: DDoS Protected + routes_externally: true + - name: Private Network (10.0.0.0/8 only) + routes_externally: false + - name: Private Network (Floating Public) + routes_externally: false + nat_destination: true + - name: us-slc + values: + networks: + - name: Public Internet + routes_externally: true + default_interface: true + nat_source: true + - name: Private Network (10.0.0.0/8 only) + routes_externally: false + - name: Private Network (Floating Public) + routes_externally: false + nat_destination: true + identity_api_version: '3' + image_format: raw + volume_api_version: '3' diff --git a/openstack/config/vendors/otc-swiss.json b/openstack/config/vendors/otc-swiss.json new file mode 100644 index 0000000000..0b5eef4169 --- /dev/null +++ b/openstack/config/vendors/otc-swiss.json @@ -0,0 +1,15 @@ +{ + "name": "otc-swiss", + "profile": { + "auth": { + "auth_url": "iam-pub.eu-ch2.sc.otc.t-systems.com/v3" + }, + "regions": [ + "eu-ch2" + ], + "identity_api_version": "3", + "interface": "public", + "image_format": "qcow2", + "vendor_hook": "otcextensions.sdk:load" + } +} diff --git a/openstack/config/vendors/otc.json b/openstack/config/vendors/otc.json new file mode 100644 index 0000000000..1a860ada4a --- /dev/null +++ b/openstack/config/vendors/otc.json @@ -0,0 +1,16 @@ +{ + "name": "otc", + "profile": { + "auth": { + "auth_url": "https://iam.{region_name}.otc.t-systems.com/v3" + }, + "regions": [ + "eu-de", + "eu-nl" + ], + "identity_api_version": "3", + "interface": "public", + "image_format": "qcow2", + "vendor_hook": "otcextensions.sdk:load" + } +} diff --git a/openstack/config/vendors/ovh-us.json b/openstack/config/vendors/ovh-us.json new file mode 100644 index 0000000000..4bbb956611 --- /dev/null +++ b/openstack/config/vendors/ovh-us.json @@ -0,0 +1,18 @@ +{ + "name": "ovh-us", + "profile": { + "auth": { + "auth_url": "https://auth.cloud.ovh.us/", + "user_domain_name": "Default", + "project_domain_name": "Default" + }, + "regions": [ + "US-EAST-VA-1", + "US-WEST-OR-1", + "US-EAST-VA", + "US-WEST-OR" + ], + "identity_api_version": "3", + "floating_ip_source": "None" + } +} diff --git a/openstack/config/vendors/ovh.json b/openstack/config/vendors/ovh.json new file mode 100644 index 0000000000..f65f9c67f8 --- /dev/null +++ b/openstack/config/vendors/ovh.json @@ -0,0 +1,6 @@ +{ + "name": "ovh", + "profile": { + "profile": "https://ovhcloud.com" + } +} \ No newline at end of file diff --git a/openstack/config/vendors/rackspace.json b/openstack/config/vendors/rackspace.json new file mode 100644 index 0000000000..1884980e25 --- /dev/null +++ b/openstack/config/vendors/rackspace.json @@ -0,0 +1,31 @@ +{ + "name": "rackspace", + "profile": { + "auth": { + "auth_url": "https://identity.api.rackspacecloud.com/v2.0/" + }, + "identity_api_version": "2.0", + "regions": [ + "DFW", + "HKG", + "IAD", + "ORD", + "SYD", + "LON" + ], + "database_service_type": "rax:database", + "compute_service_name": "cloudServersOpenStack", + "image_api_use_tasks": true, + "image_format": "vhd", + "floating_ip_source": "None", + "secgroup_source": "None", + "requires_floating_ip": false, + "block_storage_endpoint_override": "https://{region_name}.blockstorage.api.rackspacecloud.com/v2/", + "block_storage_api_version": "2", + "disable_vendor_agent": { + "vm_mode": "hvm", + "xenapi_use_agent": "False" + }, + "has_network": false + } +} diff --git a/openstack/config/vendors/switchengines.json b/openstack/config/vendors/switchengines.json new file mode 100644 index 0000000000..43503018ef --- /dev/null +++ b/openstack/config/vendors/switchengines.json @@ -0,0 +1,14 @@ +{ + "name": "switchengines", + "profile": { + "auth": { + "auth_url": "https://keystone.cloud.switch.ch:5000/v3" + }, + "regions": [ + "LS", + "ZH" + ], + "identity_api_version": "3", + "image_format": "raw" + } +} diff --git a/openstack/config/vendors/ultimum.json b/openstack/config/vendors/ultimum.json new file mode 100644 index 0000000000..83128a2896 --- /dev/null +++ b/openstack/config/vendors/ultimum.json @@ -0,0 +1,13 @@ +{ + "name": "ultimum", + "profile": { + "auth": { + "auth_url": "https://console.ultimum-cloud.com:5000/" + }, + "identity_api_version": "3", + "block_storage_api_version": "1", + "region-name": "RegionOne", + "status": "shutdown", + "message": "ultimum-cloud.com has ceased business" + } +} diff --git a/openstack/config/vendors/unitedstack.json b/openstack/config/vendors/unitedstack.json new file mode 100644 index 0000000000..b7ee678fc0 --- /dev/null +++ b/openstack/config/vendors/unitedstack.json @@ -0,0 +1,18 @@ +{ + "name": "unitedstack", + "profile": { + "auth": { + "auth_url": "https://identity.api.ustack.com/v3" + }, + "regions": [ + "bj1", + "gd1" + ], + "block_storage_api_version": "1", + "identity_api_version": "3", + "image_format": "raw", + "floating_ip_source": "None", + "status": "shutdown", + "message": "the API Endpoint is no longer responsive" + } +} diff --git a/openstack/config/vendors/vexxhost.json b/openstack/config/vendors/vexxhost.json new file mode 100644 index 0000000000..2f846068c8 --- /dev/null +++ b/openstack/config/vendors/vexxhost.json @@ -0,0 +1,6 @@ +{ + "name": "vexxhost", + "profile": { + "profile": "https://vexxhost.com" + } +} diff --git a/openstack/config/vendors/zetta.json b/openstack/config/vendors/zetta.json new file mode 100644 index 0000000000..44e9711ff0 --- /dev/null +++ b/openstack/config/vendors/zetta.json @@ -0,0 +1,13 @@ +{ + "name": "zetta", + "profile": { + "auth": { + "auth_url": "https://identity.api.zetta.io/v3" + }, + "regions": [ + "no-osl1" + ], + "identity_api_version": "3", + "dns_api_version": "2" + } +} diff --git a/openstack/connection.py b/openstack/connection.py index fe114372a5..688ed9793f 100644 --- a/openstack/connection.py +++ b/openstack/connection.py @@ -12,275 +12,711 @@ """ The :class:`~openstack.connection.Connection` class is the primary interface -to the Python SDK it maintains a context for a connection to a cloud provider. -The connection has an attribute to access each supported service. The service -attributes are created dynamically based on user profiles and the service -catalog. - -Examples --------- +to the Python SDK. It maintains a context for a connection to a region of +a cloud provider. The :class:`~openstack.connection.Connection` has an +attribute to access each OpenStack service. At a minimum, the :class:`~openstack.connection.Connection` class needs to be -created with an authenticator or the parameters to build one. +created with a config or the parameters to build one. + +While the overall system is very flexible, there are four main use cases +for different ways to create a :class:`~openstack.connection.Connection`. + +* Using config settings and keyword arguments as described in + :ref:`openstack-config` +* Using only keyword arguments passed to the constructor ignoring config files + and environment variables. +* Using an existing authenticated `keystoneauth1.session.Session`, such as + might exist inside of an OpenStack service operational context. +* Using an existing :class:`~openstack.config.cloud_region.CloudRegion`. + +Creating the Connection +----------------------- + +Using config settings +~~~~~~~~~~~~~~~~~~~~~ + +For users who want to create a :class:`~openstack.connection.Connection` making +use of named clouds in ``clouds.yaml`` files, ``OS_`` environment variables +and python keyword arguments, the :func:`openstack.connect` factory function +is the recommended way to go: + +.. code-block:: python + + import openstack + + conn = openstack.connect(cloud='example', region_name='earth1') + +If the application in question is a command line application that should also +accept command line arguments, an `argparse.Namespace` can be passed to +:func:`openstack.connect` that will have relevant arguments added to it and +then subsequently consumed by the constructor: + +.. code-block:: python + + import argparse + import openstack + + options = argparse.ArgumentParser(description='Awesome OpenStack App') + conn = openstack.connect(options=options) + +Using only keyword arguments +~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +If the application wants to avoid loading any settings from ``clouds.yaml`` or +environment variables, use the :class:`~openstack.connection.Connection` +constructor directly. As long as the ``cloud`` argument is omitted or ``None``, +the :class:`~openstack.connection.Connection` constructor will not load +settings from files or the environment. + +.. note:: + + This is a different default behavior than the :func:`~openstack.connect` + factory function. In :func:`~openstack.connect` if ``cloud`` is omitted + or ``None``, a default cloud will be loaded, defaulting to the ``envvars`` + cloud if it exists. + +.. code-block:: python + + from openstack import connection + + conn = connection.Connection( + region_name='example-region', + auth={ + 'auth_url': 'https://auth.example.com', + 'username': 'amazing-user', + 'password': 'super-secret-password', + 'project_id': '33aa1afc-03fe-43b8-8201-4e0d3b4b8ab5', + 'user_domain_id': '054abd68-9ad9-418b-96d3-3437bb376703', + }, + compute_api_version='2', + identity_interface='internal', + ) + +Per-service settings as needed by `keystoneauth1.adapter.Adapter` such as +``api_version``, ``service_name``, and ``interface`` can be set, as seen +above, by prefixing them with the official ``service-type`` name of the +service. ``region_name`` is a setting for the entire +:class:`~openstack.config.cloud_region.CloudRegion` and cannot be set per +service. + +From existing authenticated Session +~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For applications that already have an authenticated Session, simply passing +it to the :class:`~openstack.connection.Connection` constructor is all that +is needed: + +.. code-block:: python + + from openstack import connection + + conn = connection.Connection( + session=session, + region_name='example-region', + compute_api_version='2', + identity_interface='internal', + ) + +From oslo.conf CONF object +~~~~~~~~~~~~~~~~~~~~~~~~~~ + +For applications that have an oslo.config ``CONF`` object that has been +populated with ``keystoneauth1.loading.register_adapter_conf_options`` in +groups named by the OpenStack service's project name, it is possible to +construct a Connection with the ``CONF`` object and an authenticated Session. + +.. note:: + + This is primarily intended for use by OpenStack services to talk amongst + themselves. + +.. code-block:: python + + from keystoneauth1 import loading as ks_loading + from openstack import connection + from oslo_config import cfg + + CONF = cfg.CONF + + group = cfg.OptGroup('neutron') + ks_loading.register_session_conf_options(CONF, group) + ks_loading.register_auth_conf_options(CONF, group) + ks_loading.register_adapter_conf_options(CONF, group) + + CONF() + + auth = ks_loading.load_auth_from_conf_options(CONF, 'neutron') + sess = ks_loading.load_session_from_conf_options( + CONF, 'neutron', auth=auth + ) + + conn = connection.Connection( + session=sess, + oslo_conf=CONF, + ) + +This can then be used with an appropriate configuration file. + +.. code-block:: ini + + [neutron] + region_name = RegionOne + auth_strategy = keystone + project_domain_name = Default + project_name = service + user_domain_name = Default + password = password + username = neutron + auth_url = http://10.0.110.85/identity + auth_type = password + service_metadata_proxy = True + default_floating_pool = public + +You may also wish to configure a service user. As discussed in the `Keystone +documentation`__, service users are users with specific roles that identify the +user as a service. The use of service users can avoid issues caused by the +expiration of the original user's token during long running operations, as a +fresh token issued for the service user will always accompany the user's token, +which may have expired. + +.. code-block:: python + + from keystoneauth1 import loading as ks_loading + from keystoneauth1 import service_token + from oslo_config import cfg + import openstack + from openstack import connection + + CONF = cfg.CONF + + neutron_group = cfg.OptGroup('neutron') + ks_loading.register_session_conf_options(CONF, neutron_group) + ks_loading.register_auth_conf_options(CONF, neutron_group) + ks_loading.register_adapter_conf_options(CONF, neutron_group) + + service_group = cfg.OptGroup('service_user') + ks_loading.register_session_conf_options(CONF, service_group) + ks_loading.register_auth_conf_options(CONF, service_group) + + CONF() + user_auth = ks_loading.load_auth_from_conf_options(CONF, 'neutron') + service_auth = ks_loading.load_auth_from_conf_options(CONF, 'service_user') + auth = service_token.ServiceTokenAuthWrapper(user_auth, service_auth) + + sess = ks_loading.load_session_from_conf_options( + CONF, 'neutron', auth=auth + ) + + conn = connection.Connection( + session=sess, + oslo_conf=CONF, + ) -Create a connection -~~~~~~~~~~~~~~~~~~~ +This will necessitate an additional section in the configuration file used. -The following example constructor uses the identity authenticator using -username and password. The default settings for the transport are used -by this connection.:: +.. code-block:: ini + + [service_user] + auth_strategy = keystone + project_domain_name = Default + project_name = service + user_domain_name = Default + password = password + username = nova + auth_url = http://10.0.110.85/identity + auth_type = password + +.. __: https://docs.openstack.org/keystone/latest/admin/manage-services.html + +From existing CloudRegion +~~~~~~~~~~~~~~~~~~~~~~~~~ + +If you already have an :class:`~openstack.config.cloud_region.CloudRegion` +you can pass it in instead: + +.. code-block:: python from openstack import connection - auth_args = { - 'auth_url': 'http://172.20.1.108:5000/v3', - 'project_name': 'admin', - 'username': 'admin', - 'password': 'admin', - } - conn = connection.Connection(**auth_args) + import openstack.config + + config = openstack.config.get_cloud_region( + cloud='example', + region_name='earth', + ) + conn = connection.Connection(config=config) + +Using the Connection +-------------------- + +Services are accessed through an attribute named after the service's official +service-type. List ~~~~ -Services are accessed through an attribute named after the service. A list -of all the projects is retrieved in this manner:: +An iterator containing a list of all the projects is retrieved in this manner: + +.. code-block:: python - projects = conn.identity.list_projects() + projects = conn.identity.projects() Find or create ~~~~~~~~~~~~~~ -If you wanted to make sure you had a network named 'jenkins', you would first + +If you wanted to make sure you had a network named 'zuul', you would first try to find it and if that fails, you would create it:: - network = conn.network.find_network("jenkins") + network = conn.network.find_network("zuul") if network is None: - network = conn.network.create_network({"name": "jenkins"}) + network = conn.network.create_network(name="zuul") +Additional information about the services can be found in the +:ref:`service-proxies` documentation. """ -import logging -import sys - -from keystoneauth1.loading import base as ksa_loader -import os_client_config +import argparse +import concurrent.futures +import copy +import importlib.metadata as importlib_metadata +import typing as ty + +import keystoneauth1.exceptions +from keystoneauth1 import session as ks_session +import typing_extensions as ty_ext + +from openstack import _log +from openstack.cloud import _accelerator +from openstack.cloud import _baremetal +from openstack.cloud import _block_storage +from openstack.cloud import _coe +from openstack.cloud import _compute +from openstack.cloud import _dns +from openstack.cloud import _identity +from openstack.cloud import _image +from openstack.cloud import _network +from openstack.cloud import _object_store +from openstack.cloud import _orchestration +from openstack.cloud import _shared_file_system +from openstack import config as _config +import openstack.config.cloud_region from openstack import exceptions -from openstack import profile as _profile -from openstack import proxy -from openstack import proxy2 -from openstack import session as _session -from openstack import utils - -_logger = logging.getLogger(__name__) - - -def from_config(cloud_name=None, cloud_config=None, options=None): - """Create a Connection using os-client-config - - :param str cloud_name: Use the `cloud_name` configuration details when - creating the Connection instance. - :param cloud_config: An instance of - `os_client_config.config.OpenStackConfig` - as returned from the os-client-config library. - If no `config` is provided, - `os_client_config.OpenStackConfig` will be called, - and the provided `cloud_name` will be used in - determining which cloud's configuration details - will be used in creation of the - `Connection` instance. - :param options: A namespace object; allows direct passing in of options to - be added to the cloud config. This does not have to be an - instance of argparse.Namespace, despite the naming of the - the `os_client_config.config.OpenStackConfig.get_one_cloud` - argument to which it is passed. +from openstack import service_description + +if ty.TYPE_CHECKING: + from oslo_config import cfg + + from openstack.config import cloud_region + from openstack import proxy + +__all__ = [ + 'Connection', + 'from_config', +] + +_logger = _log.setup_logging('openstack') + + +def from_config( + cloud: str | None = None, + config: ty.Optional['cloud_region.CloudRegion'] = None, + options: argparse.Namespace | None = None, + **kwargs: ty.Any, +) -> 'Connection': + """Create a Connection using openstack.config + + :param str cloud: + Use the `cloud` configuration details when creating the Connection. + :param openstack.config.cloud_region.CloudRegion config: + An existing CloudRegion configuration. If no `config` is provided, + `openstack.config.OpenStackConfig` will be called, and the provided + `name` will be used in determining which cloud's configuration + details will be used in creation of the `Connection` instance. + :param argparse.Namespace options: + Allows direct passing in of options to be added to the cloud config. + This does not have to be an actual instance of argparse.Namespace, + despite the naming of the + `openstack.config.loader.OpenStackConfig.get_one` argument to which + it is passed. :rtype: :class:`~openstack.connection.Connection` """ - # TODO(thowe): I proposed that service name defaults to None in OCC - defaults = {} - prof = _profile.Profile() - services = [service.service_type for service in prof.get_services()] - for service in services: - defaults[service + '_service_name'] = None - # TODO(thowe): default is 2 which turns into v2 which doesn't work - # this stuff needs to be fixed where we keep version and path separated. - defaults['network_api_version'] = 'v2.0' - if cloud_config is None: - occ = os_client_config.OpenStackConfig(override_defaults=defaults) - cloud_config = occ.get_one_cloud(cloud=cloud_name, argparse=options) - - if cloud_config.debug: - utils.enable_logging(True, stream=sys.stdout) - - # TODO(mordred) we need to add service_type setting to openstacksdk. - # Some clouds have type overridden as well as name. - services = [service.service_type for service in prof.get_services()] - for service in cloud_config.get_services(): - if service in services: - version = cloud_config.get_api_version(service) - if version: - version = str(version) - if not version.startswith("v"): - version = "v" + version - prof.set_version(service, version) - name = cloud_config.get_service_name(service) - if name: - prof.set_name(service, name) - interface = cloud_config.get_interface(service) - if interface: - prof.set_interface(service, interface) - - region = cloud_config.get_region_name(service) - if region: - for service in services: - prof.set_region(service, region) - - # Auth - auth = cloud_config.config['auth'] - # TODO(thowe) We should be using auth_type - auth['auth_plugin'] = cloud_config.config['auth_type'] - if 'cacert' in auth: - auth['verify'] = auth.pop('cacert') - if 'cacert' in cloud_config.config: - auth['verify'] = cloud_config.config['cacert'] - insecure = cloud_config.config.get('insecure', False) - if insecure: - auth['verify'] = False - - cert = cloud_config.config.get('cert') - if cert: - key = cloud_config.config.get('key') - auth['cert'] = (cert, key) if key else cert - - return Connection(profile=prof, **auth) - - -class Connection(object): - - def __init__(self, session=None, authenticator=None, profile=None, - verify=True, cert=None, user_agent=None, - auth_plugin="password", - **auth_args): - """Create a context for a connection to a cloud provider. - - A connection needs a transport and an authenticator. The user may pass - in a transport and authenticator they want to use or they may pass in - the parameters to create a transport and authenticator. The connection - creates a - :class:`~openstack.session.Session` which uses the profile - and authenticator to perform HTTP requests. - + # TODO(mordred) Backwards compat while we transition + cloud = kwargs.pop('cloud_name', cloud) + config = kwargs.pop('cloud_config', config) + if config is None: + config = _config.OpenStackConfig().get_one( + cloud=cloud, argparse=options, **kwargs + ) + + return Connection(config=config) + + +class Connection( + _accelerator.AcceleratorCloudMixin, + _baremetal.BaremetalCloudMixin, + _block_storage.BlockStorageCloudMixin, + _compute.ComputeCloudMixin, + _coe.CoeCloudMixin, + _dns.DnsCloudMixin, + _identity.IdentityCloudMixin, + _image.ImageCloudMixin, + _network.NetworkCloudMixin, + _object_store.ObjectStoreCloudMixin, + _orchestration.OrchestrationCloudMixin, + _shared_file_system.SharedFileSystemCloudMixin, +): + def __init__( + self, + cloud: str | None = None, + config: ty.Optional['cloud_region.CloudRegion'] = None, + session: ks_session.Session | None = None, + app_name: str | None = None, + app_version: str | None = None, + extra_services: ( + list[service_description.ServiceDescription[ty.Any]] | None + ) = None, + strict: bool = False, + use_direct_get: bool | None = None, + task_manager: ty.Any = None, + rate_limit: float | dict[str, float] | None = None, + oslo_conf: ty.Optional['cfg.ConfigOpts'] = None, + service_types: list[str] | None = None, + global_request_id: str | None = None, + strict_proxies: bool = False, + pool_executor: concurrent.futures.Executor | None = None, + **kwargs: ty.Any, + ): + """Create a connection to a cloud. + + A connection needs information about how to connect, how to + authenticate and how to select the appropriate services to use. + + The recommended way to provide this information is by referencing + a named cloud config from an existing `clouds.yaml` file. The cloud + name ``envvars`` may be used to consume a cloud configured via ``OS_`` + environment variables. + + A pre-existing :class:`~openstack.config.cloud_region.CloudRegion` + object can be passed in lieu of a cloud name, for cases where the user + already has a fully formed CloudRegion and just wants to use it. + + Similarly, if for some reason the user already has a + :class:`~keystoneauth1.session.Session` and wants to use it, it may be + passed in. + + :param str cloud: Name of the cloud from config to use. + :param config: CloudRegion object representing the config for the + region of the cloud in question. + :type config: :class:`~openstack.config.cloud_region.CloudRegion` :param session: A session object compatible with - :class:`~openstack.session.Session`. - :type session: :class:`~openstack.session.Session` - :param authenticator: An authenticator derived from the base - authenticator plugin that was previously created. Two common - authentication identity plugins are - :class:`identity_v2 ` and - :class:`identity_v3 `. - If this parameter is not passed in, the connection will create an - authenticator. - :type authenticator: :class:`~openstack.auth.base.BaseAuthPlugin` - :param profile: If the user has any special profiles such as the - service name, region, version or interface, they may be provided - in the profile object. If no profiles are provided, the - services that appear first in the service catalog will be used. - :type profile: :class:`~openstack.profile.Profile` - :param bool verify: If a transport is not provided to the connection, - this parameter will be used to create a transport. If ``verify`` - is set to true, which is the default, the SSL cert will be - verified. It can also be set to a CA_BUNDLE path. - :param cert: If a transport is not provided to the connection then this - parameter will be used to create a transport. `cert` allows to - provide a client certificate file path or a tuple with client - certificate and key paths. - :type cert: str or tuple - :param str user_agent: If a transport is not provided to the - connection, this parameter will be used when creating a transport. - The value given here will be prepended to the default, which is - specified in :attr:`~openstack.transport.USER_AGENT`. - The resulting ``user_agent`` value is used for the ``User-Agent`` - HTTP header. - :param str auth_plugin: The name of authentication plugin to use. - The default value is ``password``. - :param auth_args: The rest of the parameters provided are assumed to be - authentication arguments that are used by the authentication - plugin. + :class:`~keystoneauth1.session.Session`. + :type session: :class:`~keystoneauth1.session.Session` + :param str app_name: Name of the application to be added to User Agent. + :param str app_version: Version of the application to be added to + User Agent. + :param extra_services: List of + :class:`~openstack.service_description.ServiceDescription` + objects describing services that openstacksdk otherwise does not + know about. + :param bool use_direct_get: + For get methods, make specific REST calls for server-side + filtering instead of making list calls and filtering client-side. + Default false. + :param task_manager: + Ignored. Exists for backwards compat during transition. Rate limit + parameters should be passed directly to the `rate_limit` parameter. + :param rate_limit: + Client-side rate limit, expressed in calls per second. The + parameter can either be a single float, or it can be a dict with + keys as service-type and values as floats expressing the calls + per second for that service. Defaults to None, which means no + rate-limiting is performed. + :param oslo_conf: An oslo.config ``CONF`` object that has been + populated with + ``keystoneauth1.loading.register_adapter_conf_options`` in + groups named by the OpenStack service's project name. + :type oslo_conf: :class:`~oslo_config.cfg.ConfigOpts` + :param service_types: + A list/set of service types this Connection should support. All + other service types will be disabled (will error if used). + **Currently only supported in conjunction with the ``oslo_conf`` + kwarg.** + :param strict_proxies: + Throw an ``openstack.exceptions.ServiceDiscoveryException`` if the + endpoint for a given service doesn't work. This is useful for + OpenStack services using sdk to talk to other OpenStack services + where it can be expected that the deployer config is correct and + errors should be reported immediately. + Default false. + :type strict_proxies: bool + :param global_request_id: A Request-id to send with all interactions. + :type global_request_id: str + :param pool_executor: + A futurist ``Executor`` object to be used for concurrent background + activities. Defaults to None in which case a ThreadPoolExecutor + will be created if needed. + :type pool_executor: :class:`~futurist.Executor` + :param kwargs: If a config is not provided, the rest of the parameters + provided are assumed to be arguments to be passed to the + CloudRegion constructor. """ - self.profile = profile if profile else _profile.Profile() - if session: - # Make sure it is the right kind of session. A keystoneauth1 - # session would work in some ways but show strange errors in - # others. E.g. a Resource.find would work with an id but fail when - # given a name because it attempts to catch - # openstack.exceptions.NotFoundException to signal that a search by - # ID failed before trying a search by name, but with a - # keystoneauth1 session the lookup by ID raises - # keystoneauth1.exceptions.NotFound instead. We need to ensure our - # Session class gets used so that our implementation of various - # methods always works as we expect. - if not isinstance(session, _session.Session): - raise exceptions.SDKException( - 'Session instance is from %s but must be from %s' % - (session.__module__, _session.__name__)) - self.session = session - else: - self.authenticator = self._create_authenticator(authenticator, - auth_plugin, - **auth_args) - self.session = _session.Session( - self.profile, auth=self.authenticator, verify=verify, - cert=cert, user_agent=user_agent) - - self._open() - - def _create_authenticator(self, authenticator, auth_plugin, **args): - if authenticator: - return authenticator - # TODO(thowe): Jamie was suggesting we should support other - # ways of loading the plugin - loader = ksa_loader.get_plugin_loader(auth_plugin) - load_args = {} - for opt in loader.get_options(): - if args.get(opt.dest): - load_args[opt.dest] = args[opt.dest] - return loader.load_from_options(**load_args) - - def _open(self): - """Open the connection. - - NOTE(thowe): Have this set up some lazy loader instead. + super().__init__( + cloud=cloud, + config=config, + session=session, + app_name=app_name, + app_version=app_version, + extra_services=extra_services, + strict=strict, + use_direct_get=use_direct_get, + task_manager=task_manager, + rate_limit=rate_limit, + oslo_conf=oslo_conf, + service_types=service_types, + global_request_id=global_request_id, + strict_proxies=strict_proxies, + pool_executor=pool_executor, + **kwargs, + ) + + # Allow vendors to provide hooks. They will normally only receive a + # connection object and a responsible to register additional services + vendor_hook = kwargs.get('vendor_hook') + if not vendor_hook and 'vendor_hook' in self.config.config: + # Get the one from profile + vendor_hook = self.config.config.get('vendor_hook') + if vendor_hook: + try: + # NOTE(gtema): no class name in the hook, plain module:function + # Split string hook into module and function + try: + package_name, function = vendor_hook.rsplit(':') + + if package_name and function: + ep = importlib_metadata.EntryPoint( + name='vendor_hook', + value=vendor_hook, + group='vendor_hook', + ) + hook = ep.load() + hook(self) + except ValueError: + self.log.warning( + 'Hook should be in the entrypoint ' + 'module:attribute format' + ) + except (ImportError, TypeError, AttributeError) as e: + self.log.warning( + 'Configured hook %s cannot be executed: %s', vendor_hook, e + ) + + # Add additional metrics into the configuration according to the + # selected connection. We don't want to deal with overall config in the + # proxy, just pass required part. + if ( + self.config._influxdb_config + and 'additional_metric_tags' in self.config.config + ): + self.config._influxdb_config['additional_metric_tags'] = ( + self.config.config['additional_metric_tags'] + ) + + def add_service( + self, service: service_description.ServiceDescription['proxy.Proxy'] + ) -> None: + """Add a service to the Connection. + + Attaches an instance of the :class:`~openstack.proxy.Proxy` + class contained in + :class:`~openstack.service_description.ServiceDescription`. + The :class:`~openstack.proxy.Proxy` will be attached to the + `Connection` by its ``service_type`` and by any ``aliases`` that + may be specified. + + :param openstack.service_description.ServiceDescription service: + Object describing the service to be attached. As a convenience, + if ``service`` is a string it will be treated as a ``service_type`` + and a basic + :class:`~openstack.service_description.ServiceDescription` + will be created. """ - for service in self.profile.get_services(): - self._load(service) - - def _load(self, service): - attr_name = service.get_service_module() - module = service.get_module() + "._proxy" - try: - __import__(module) - proxy_class = getattr(sys.modules[module], "Proxy") - if not (issubclass(proxy_class, proxy.BaseProxy) or - issubclass(proxy_class, proxy2.BaseProxy)): - raise TypeError("%s.Proxy must inherit from BaseProxy" % - proxy_class.__module__) - setattr(self, attr_name, proxy_class(self.session)) - except Exception as e: - _logger.warn("Unable to load %s: %s" % (module, e)) - - def authorize(self): + # If we don't have a proxy, just instantiate Proxy so that + # we get an adapter. + if isinstance(service, str): + service = service_description.ServiceDescription['proxy.Proxy']( + service + ) + + # Directly invoke descriptor of the ServiceDescription + def getter(self: 'Connection') -> 'proxy.Proxy': + return service.__get__(self, type(self)) + + # Register the ServiceDescription class (as property) + # with every known alias for a "runtime descriptor" + for attr_name in service.all_types: + setattr( + self.__class__, + attr_name.replace('-', '_'), + property(fget=getter), + ) + self.config.enable_service(service.service_type) + + def authorize(self) -> str: """Authorize this Connection - **NOTE**: This method is optional. When an application makes a call - to any OpenStack service, this method allows you to request - a token manually before attempting to do anything else. + .. note:: + + This method is optional. When an application makes a call to any + OpenStack service, this method allows you to request a token + manually before attempting to do anything else. :returns: A string token. + :raises: :class:`~openstack.exceptions.HttpException` if the + authorization fails due to reasons like the credentials provided + are unable to be authorized or the `auth_type` argument is missing, + etc. + """ + try: + return ty.cast(str, self.session.get_token()) + except keystoneauth1.exceptions.ClientException as e: + raise exceptions.SDKException(str(e)) + + def connect_as(self, **kwargs: ty.Any) -> ty_ext.Self: + """Make a new Connection object with new auth context. + + Take the existing settings from the current cloud and construct a new + Connection object with some of the auth settings overridden. This + is useful for getting an object to perform tasks with as another user, + or in the context of a different project. + + .. code-block:: python + + conn = openstack.connect(cloud='example') + # Work normally + servers = conn.list_servers() + conn2 = conn.connect_as(username='different-user', password='') + # Work as different-user + servers = conn2.list_servers() + + :param kwargs: keyword arguments can contain anything that would + normally go in an auth dict. They will override the same settings + from the parent cloud as appropriate. Entries that do not want to + be overridden can be ommitted. + """ - :raises:`~openstack.exceptions.HttpException` if the authorization - fails due to reasons like the credentials provided are unable - to be authorized or the `auth_plugin` argument is missing, - etc. + if self.config._openstack_config: + config = self.config._openstack_config + else: + # TODO(mordred) Replace this with from_session + config = openstack.config.OpenStackConfig( + app_name=self.config._app_name, + app_version=self.config._app_version, + load_yaml_config=False, + ) + params = copy.deepcopy(self.config.config) + # Remove profile from current cloud so that overridding works + params.pop('profile', None) + + # Utility function to help with the stripping below. + def pop_keys( + params: dict[str, dict[str, str | None]], + auth: dict[str, str | None], + name_key: str, + id_key: str, + ) -> None: + if name_key in auth or id_key in auth: + params['auth'].pop(name_key, None) + params['auth'].pop(id_key, None) + + # If there are user, project or domain settings in the incoming auth + # dict, strip out both id and name so that a user can say: + # cloud.connect_as(project_name='foo') + # and have that work with clouds that have a project_id set in their + # config. + for prefix in ('user', 'project'): + if prefix == 'user': + name_key = 'username' + else: + name_key = 'project_name' + id_key = f'{prefix}_id' + pop_keys(params, kwargs, name_key, id_key) + id_key = f'{prefix}_domain_id' + name_key = f'{prefix}_domain_name' + pop_keys(params, kwargs, name_key, id_key) + + for key, value in kwargs.items(): + params['auth'][key] = value + + cloud_region = config.get_one(**params) + # Attach the discovery cache from the old session so we won't + # double discover. + cloud_region._discovery_cache = self.session._discovery_cache + # Override the cloud name so that logging/location work right + cloud_region._name = self.name + cloud_region.config['profile'] = self.name + # Use self.__class__ so that we return whatever this if, like if it's + # a subclass in the case of shade wrapping sdk. + return self.__class__(config=cloud_region) + + def connect_as_project(self, project: str) -> ty_ext.Self: + """Make a new Connection object with a new project. + + Take the existing settings from the current cloud and construct a new + Connection object with the project settings overridden. This + is useful for getting an object to perform tasks with as another user, + or in the context of a different project. + + .. code-block:: python + + cloud = openstack.connect(cloud='example') + # Work normally + servers = cloud.list_servers() + cloud2 = cloud.connect_as_project('different-project') + # Work in different-project + servers = cloud2.list_servers() + + :param project: Either a project name or a project dict as returned by + ``list_projects``. + """ + auth = {} + if isinstance(project, dict): + auth['project_id'] = project.get('id') + auth['project_name'] = project.get('name') + if project.get('domain_id'): + auth['project_domain_id'] = project['domain_id'] + else: + auth['project_name'] = project + return self.connect_as(**auth) + + def endpoint_for( + self, + service_type: str, + interface: str | None = None, + region_name: str | None = None, + ) -> str | None: + """Return the endpoint for a given service. + + Respects config values for Connection, including + ``*_endpoint_override``. For direct values from the catalog regardless + of overrides, see + :meth:`~openstack.config.cloud_region.CloudRegion.get_endpoint_from_catalog` + + :param service_type: Service Type of the endpoint to search for. + :param interface: Interface of the endpoint to search for. Optional, + defaults to the configured value for interface for this Connection. + :param region_name: Region Name of the endpoint to search for. + Optional, defaults to the configured value for region_name for this + Connection. + + :returns: The endpoint of the service, or None if not found. """ - headers = self.session.get_auth_headers() - return headers.get('X-Auth-Token') if headers else None + endpoint_override = self.config.get_endpoint(service_type) + if endpoint_override: + return endpoint_override + return self.config.get_endpoint_from_catalog( + service_type=service_type, + interface=interface, + region_name=region_name, + ) diff --git a/openstack/tests/functional/telemetry/__init__.py b/openstack/container_infrastructure_management/__init__.py similarity index 100% rename from openstack/tests/functional/telemetry/__init__.py rename to openstack/container_infrastructure_management/__init__.py diff --git a/openstack/container_infrastructure_management/container_infrastructure_management_service.py b/openstack/container_infrastructure_management/container_infrastructure_management_service.py new file mode 100644 index 0000000000..df9a0d57be --- /dev/null +++ b/openstack/container_infrastructure_management/container_infrastructure_management_service.py @@ -0,0 +1,24 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.container_infrastructure_management.v1 import _proxy +from openstack import service_description + + +class ContainerInfrastructureManagementService( + service_description.ServiceDescription[_proxy.Proxy], +): + """The container infrastructure management service.""" + + supported_versions = { + '1': _proxy.Proxy, + } diff --git a/openstack/tests/functional/telemetry/alarm/__init__.py b/openstack/container_infrastructure_management/v1/__init__.py similarity index 100% rename from openstack/tests/functional/telemetry/alarm/__init__.py rename to openstack/container_infrastructure_management/v1/__init__.py diff --git a/openstack/container_infrastructure_management/v1/_proxy.py b/openstack/container_infrastructure_management/v1/_proxy.py new file mode 100644 index 0000000000..eb24995b59 --- /dev/null +++ b/openstack/container_infrastructure_management/v1/_proxy.py @@ -0,0 +1,326 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +from openstack.container_infrastructure_management.v1 import ( + cluster as _cluster, +) +from openstack.container_infrastructure_management.v1 import ( + cluster_certificate as _cluster_cert, +) +from openstack.container_infrastructure_management.v1 import ( + cluster_template as _cluster_template, +) +from openstack.container_infrastructure_management.v1 import ( + service as _service, +) +from openstack import proxy +from openstack import resource + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['1']] = '1' + + _resource_registry = { + "cluster": _cluster.Cluster, + "cluster_template": _cluster_template.ClusterTemplate, + "service": _service.Service, + } + + # ========== Clusters ========== + + def create_cluster(self, **attrs): + """Create a new cluster from attributes + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster`, + comprised of the properties on the Cluster class. + :returns: The results of cluster creation + :rtype: + :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` + """ + return self._create(_cluster.Cluster, **attrs) + + def delete_cluster(self, cluster, ignore_missing=True): + """Delete a cluster + + :param cluster: The value can be either the ID of a cluster or a + :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the cluster does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent cluster. + :returns: ``None`` + """ + self._delete(_cluster.Cluster, cluster, ignore_missing=ignore_missing) + + def find_cluster(self, name_or_id, ignore_missing=True): + """Find a single cluster + + :param name_or_id: The name or ID of a cluster. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :returns: One + :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` + or None + """ + return self._find( + _cluster.Cluster, + name_or_id, + ignore_missing=ignore_missing, + ) + + def get_cluster(self, cluster): + """Get a single cluster + + :param cluster: The value can be the ID of a cluster or a + :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` + instance. + + :returns: One + :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_cluster.Cluster, cluster) + + def clusters(self, **query): + """Return a generator of clusters + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of cluster objects + :rtype: + :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` + """ + return self._list(_cluster.Cluster, **query) + + def update_cluster(self, cluster, **attrs): + """Update a cluster + + :param cluster: Either the id of a cluster or a + :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` + instance. + :param attrs: The attributes to update on the cluster represented + by ``cluster``. + + :returns: The updated cluster + :rtype: + :class:`~openstack.container_infrastructure_management.v1.cluster.Cluster` + """ + return self._update(_cluster.Cluster, cluster, **attrs) + + # ============== Cluster Templates ============== + + def create_cluster_template(self, **attrs): + """Create a new cluster_template from attributes + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate`, + comprised of the properties on the ClusterTemplate class. + :returns: The results of cluster_template creation + :rtype: + :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` + """ + return self._create(_cluster_template.ClusterTemplate, **attrs) + + def delete_cluster_template(self, cluster_template, ignore_missing=True): + """Delete a cluster_template + + :param cluster_template: The value can be either the ID of a + cluster_template or a + :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the cluster_template does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + cluster_template. + :returns: ``None`` + """ + self._delete( + _cluster_template.ClusterTemplate, + cluster_template, + ignore_missing=ignore_missing, + ) + + def find_cluster_template(self, name_or_id, ignore_missing=True): + """Find a single cluster_template + + :param name_or_id: The name or ID of a cluster_template. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :returns: One + :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` + or None + """ + return self._find( + _cluster_template.ClusterTemplate, + name_or_id, + ignore_missing=ignore_missing, + ) + + def get_cluster_template(self, cluster_template): + """Get a single cluster_template + + :param cluster_template: The value can be the ID of a cluster_template + or a + :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` + instance. + + :returns: One + :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_cluster_template.ClusterTemplate, cluster_template) + + def cluster_templates(self, **query): + """Return a generator of cluster_templates + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of cluster_template objects + :rtype: + :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` + """ + return self._list(_cluster_template.ClusterTemplate, **query) + + def update_cluster_template(self, cluster_template, **attrs): + """Update a cluster_template + + :param cluster_template: Either the id of a cluster_template or a + :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` + instance. + :param attrs: The attributes to update on the cluster_template + represented by ``cluster_template``. + + :returns: The updated cluster_template + :rtype: + :class:`~openstack.container_infrastructure_management.v1.cluster_template.ClusterTemplate` + """ + return self._update( + _cluster_template.ClusterTemplate, cluster_template, **attrs + ) + + # ============== Cluster Certificates ============== + + def create_cluster_certificate(self, **attrs): + """Create a new cluster_certificate from CSR + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.container_infrastructure_management.v1.cluster_certificate.ClusterCertificate`, + comprised of the properties on the ClusterCertificate class. + :returns: The results of cluster_certificate creation + :rtype: + :class:`~openstack.container_infrastructure_management.v1.cluster_certificate.ClusterCertificate` + """ + return self._create(_cluster_cert.ClusterCertificate, **attrs) + + def get_cluster_certificate(self, cluster_certificate): + """Get a single cluster_certificate + + :param cluster_certificate: The value can be the ID of a + cluster_certificate or a + :class:`~openstack.container_infrastructure_management.v1.cluster_certificate.ClusterCertificate` + instance. + + :returns: One + :class:`~openstack.container_infrastructure_management.v1.cluster_certificate.ClusterCertificate` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_cluster_cert.ClusterCertificate, cluster_certificate) + + # ============== Services ============== + + def services(self): + """Return a generator of services + + :returns: A generator of service objects + :rtype: + :class:`~openstack.container_infrastructure_management.v1.service.Service` + """ + return self._list(_service.Service) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/container_infrastructure_management/v1/cluster.py b/openstack/container_infrastructure_management/v1/cluster.py new file mode 100644 index 0000000000..19d4efdabc --- /dev/null +++ b/openstack/container_infrastructure_management/v1/cluster.py @@ -0,0 +1,168 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Cluster(resource.Resource): + resources_key = 'clusters' + base_path = '/clusters' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_patch = True + + commit_method = 'PATCH' + commit_jsonpatch = True + + #: The endpoint URL of COE API exposed to end-users. + api_address = resource.Body('api_address') + #: The UUID of the cluster template. + cluster_template_id = resource.Body('cluster_template_id') + #: Version info of chosen COE in bay/cluster for helping client in picking + #: the right version of client. + coe_version = resource.Body('coe_version') + #: The timeout for cluster creation in minutes. The value expected is a + #: positive integer. If the timeout is reached during cluster creation + #: process, the operation will be aborted and the cluster status will be + #: set to CREATE_FAILED. Defaults to 60. + create_timeout = resource.Body('create_timeout', type=int) + #: The date and time when the resource was created. The date and time stamp + #: format is ISO 8601:: + #: + #: CCYY-MM-DDThh:mm:ss±hh:mm + #: + #: For example, `2015-08-27T09:49:58-05:00`. The ±hh:mm value, if included, + #: is the time zone as an offset from UTC. + created_at = resource.Body('created_at') + #: The custom discovery url for node discovery. This is used by the COE to + #: discover the servers that have been created to host the containers. The + #: actual discovery mechanism varies with the COE. In some cases, the + #: service fills in the server info in the discovery service. In other + #: cases,if the discovery_url is not specified, the service will use the + #: public discovery service at https://discovery.etcd.io. In this case, the + #: service will generate a unique url here for each bay and store the info + #: for the servers. + discovery_url = resource.Body('discovery_url') + #: The name or ID of the network to provide connectivity to the internal + #: network for the bay/cluster. + fixed_network = resource.Body('fixed_network') + #: The fixed subnet to use when allocating network addresses for nodes in + #: bay/cluster. + fixed_subnet = resource.Body('fixed_subnet') + #: The flavor name or ID to use when booting the node servers. Defaults to + #: m1.small. + flavor_id = resource.Body('flavor_id') + #: Whether to enable using the floating IP of cloud provider. Some cloud + #: providers use floating IPs while some use public IPs. When set to true, + #: floating IPs will be used. If this value is not provided, the value of + #: ``floating_ip_enabled`` provided in the template will be used. + is_floating_ip_enabled = resource.Body('floating_ip_enabled', type=bool) + #: Whether to enable the master load balancer. Since multiple masters may + #: exist in a bay/cluster, a Neutron load balancer is created to provide + #: the API endpoint for the bay/cluster and to direct requests to the + #: masters. In some cases, such as when the LBaaS service is not available, + #: this option can be set to false to create a bay/cluster without the load + #: balancer. In this case, one of the masters will serve as the API + #: endpoint. The default is true, i.e. to create the load balancer for the + #: bay. + is_master_lb_enabled = resource.Body('master_lb_enabled', type=bool) + #: The name of the SSH keypair to configure in the bay/cluster servers for + #: SSH access. Users will need the key to be able to ssh to the servers in + #: the bay/cluster. The login name is specific to the bay/cluster driver. + #: For example, with fedora-atomic image the default login name is fedora. + keypair = resource.Body('keypair') + #: Arbitrary labels. The accepted keys and valid values are defined in the + #: bay/cluster drivers. They are used as a way to pass additional + #: parameters that are specific to a bay/cluster driver. + labels = resource.Body('labels', type=dict) + #: A list of floating IPs of all master nodes. + master_addresses = resource.Body('master_addresses', type=list) + #: The number of servers that will serve as master for the bay/cluster. Set + #: to more than 1 master to enable High Availability. If the option + #: master-lb-enabled is specified in the baymodel/cluster template, the + #: master servers will be placed in a load balancer pool. Defaults to 1. + master_count = resource.Body('master_count', type=int) + #: The flavor of the master node for this baymodel/cluster template. + master_flavor_id = resource.Body('master_flavor_id') + #: Name of the resource. + name = resource.Body('name') + #: The number of servers that will serve as node in the bay/cluster. + #: Defaults to 1. + node_count = resource.Body('node_count', type=int) + #: A list of floating IPs of all servers that serve as nodes. + node_addresses = resource.Body('node_addresses', type=list) + #: The reference UUID of orchestration stack from Heat orchestration + #: service. + stack_id = resource.Body('stack_id') + #: The current state of the bay/cluster. + status = resource.Body('status') + #: The reason of bay/cluster current status. + status_reason = resource.Body('reason') + #: The date and time when the resource was updated. The date and time stamp + #: format is ISO 8601:: + #: + #: CCYY-MM-DDThh:mm:ss±hh:mm + #: + #: For example, `2015-08-27T09:49:58-05:00`. The ±hh:mm value, if included, + #: is the time zone as an offset from UTC. If the updated_at date and time + #: stamp is not set, its value is null. + updated_at = resource.Body('updated_at') + #: The UUID of the cluster. + uuid = resource.Body('uuid', alternate_id=True) + + def resize(self, session, *, node_count, nodes_to_remove=None): + """Resize the cluster. + + :param node_count: The number of servers that will serve as node in the + bay/cluster. The default is 1. + :param nodes_to_remove: The server ID list will be removed if + downsizing the cluster. + :returns: The UUID of the resized cluster. + :raises: :exc:`~openstack.exceptions.NotFoundException` if + the resource was not found. + """ + url = utils.urljoin(Cluster.base_path, self.id, 'actions', 'resize') + headers = {'Accept': ''} + body = { + 'node_count': node_count, + 'nodes_to_remove': nodes_to_remove, + } + response = session.post(url, json=body, headers=headers) + exceptions.raise_from_response(response) + return response['uuid'] + + def upgrade(self, session, *, cluster_template, max_batch_size=None): + """Upgrade the cluster. + + :param cluster_template: The UUID of the cluster template. + :param max_batch_size: The max batch size each time when doing upgrade. + The default is 1 + :returns: The UUID of the updated cluster. + :raises: :exc:`~openstack.exceptions.NotFoundException` if + the resource was not found. + """ + url = utils.urljoin(Cluster.base_path, self.id, 'actions', 'upgrade') + headers = {'Accept': ''} + body = { + 'cluster_template': cluster_template, + 'max_batch_size': max_batch_size, + } + response = session.post(url, json=body, headers=headers) + exceptions.raise_from_response(response) + return response['uuid'] diff --git a/openstack/container_infrastructure_management/v1/cluster_certificate.py b/openstack/container_infrastructure_management/v1/cluster_certificate.py new file mode 100644 index 0000000000..0bd8d6fd01 --- /dev/null +++ b/openstack/container_infrastructure_management/v1/cluster_certificate.py @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ClusterCertificate(resource.Resource): + base_path = '/certificates' + + # capabilities + allow_create = True + allow_list = False + allow_fetch = True + + #: The UUID of the bay. + bay_uuid = resource.Body('bay_uuid') + #: The UUID of the cluster. + cluster_uuid = resource.Body('cluster_uuid', alternate_id=True) + #: Certificate Signing Request (CSR) for authenticating client key. + csr = resource.Body('csr') + #: CA certificate for the bay/cluster. + pem = resource.Body('pem') diff --git a/openstack/container_infrastructure_management/v1/cluster_template.py b/openstack/container_infrastructure_management/v1/cluster_template.py new file mode 100644 index 0000000000..38b74844d9 --- /dev/null +++ b/openstack/container_infrastructure_management/v1/cluster_template.py @@ -0,0 +1,115 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ClusterTemplate(resource.Resource): + resources_key = 'clustertemplates' + base_path = '/clustertemplates' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_patch = True + + commit_method = 'PATCH' + commit_jsonpatch = True + + #: The exposed port of COE API server. + apiserver_port = resource.Body('apiserver_port', type=int) + #: Display the attribute os_distro defined as appropriate metadata in image + #: for the bay/cluster driver. + cluster_distro = resource.Body('cluster_distro') + #: Specify the Container Orchestration Engine to use. Supported COEs + #: include kubernetes, swarm, mesos. + coe = resource.Body('coe') + #: The date and time when the resource was created. + created_at = resource.Body('created_at') + #: The name of a driver to manage the storage for the images and the + #: container's writable layer. + docker_storage_driver = resource.Body('docker_storage_driver') + #: The size in GB for the local storage on each server for the Docker + #: daemon to cache the images and host the containers. + docker_volume_size = resource.Body('docker_volume_size', type=int) + #: The DNS nameserver for the servers and containers in the bay/cluster to + #: use. + dns_nameserver = resource.Body('dns_nameserver') + #: The name or network ID of a Neutron network to provide connectivity to + #: the external internet for the bay/cluster. + external_network_id = resource.Body('external_network_id') + #: The name or network ID of a Neutron network to provide connectivity to + #: the internal network for the bay/cluster. + fixed_network = resource.Body('fixed_network') + #: Fixed subnet that are using to allocate network address for nodes in + #: bay/cluster. + fixed_subnet = resource.Body('fixed_subnet') + #: The nova flavor ID or name for booting the node servers. + flavor_id = resource.Body('flavor_id') + #: The IP address for a proxy to use when direct http access + #: from the servers to sites on the external internet is blocked. + #: This may happen in certain countries or enterprises, and the + #: proxy allows the servers and containers to access these sites. + #: The format is a URL including a port number. The default is + #: None. + http_proxy = resource.Body('http_proxy') + #: The IP address for a proxy to use when direct https access from the + #: servers to sites on the external internet is blocked. + https_proxy = resource.Body('https_proxy') + #: The name or UUID of the base image in Glance to boot the servers for the + #: bay/cluster. + image_id = resource.Body('image_id') + #: The URL pointing to user's own private insecure docker + #: registry to deploy and run docker containers. + insecure_registry = resource.Body('insecure_registry') + #: Whether enable or not using the floating IP of cloud provider. + is_floating_ip_enabled = resource.Body('floating_ip_enabled') + #: Indicates whether the ClusterTemplate is hidden or not. + is_hidden = resource.Body('hidden', type=bool) + #: this option can be set to false to create a bay/cluster without the load + #: balancer. + is_master_lb_enabled = resource.Body('master_lb_enabled', type=bool) + #: Specifying this parameter will disable TLS so that users can access the + #: COE endpoints without a certificate. + is_tls_disabled = resource.Body('tls_disabled', type=bool) + #: Setting this flag makes the baymodel/cluster template public and + #: accessible by other users. + is_public = resource.Body('public', type=bool) + #: This option provides an alternative registry based on the Registry V2 + is_registry_enabled = resource.Body('registry_enabled', type=bool) + #: The name of the SSH keypair to configure in the bay/cluster servers for + #: ssh access. + keypair_id = resource.Body('keypair_id') + #: Arbitrary labels. The accepted keys and valid values are defined in the + #: bay/cluster drivers. They are used as a way to pass additional + #: parameters that are specific to a bay/cluster driver. + labels = resource.Body('labels', type=dict) + #: The flavor of the master node for this baymodel/cluster template. + master_flavor_id = resource.Body('master_flavor_id') + #: The name of a network driver for providing the networks for the + #: containers. + network_driver = resource.Body('network_driver') + #: When a proxy server is used, some sites should not go through the proxy + #: and should be accessed normally. + no_proxy = resource.Body('no_proxy') + #: The servers in the bay/cluster can be vm or baremetal. + server_type = resource.Body('server_type') + #: The date and time when the resource was updated. + updated_at = resource.Body('updated_at') + #: The UUID of the cluster template. + uuid = resource.Body('uuid', alternate_id=True) + #: The name of a volume driver for managing the persistent storage for the + #: containers. + volume_driver = resource.Body('volume_driver') diff --git a/openstack/container_infrastructure_management/v1/service.py b/openstack/container_infrastructure_management/v1/service.py new file mode 100644 index 0000000000..5937f2045e --- /dev/null +++ b/openstack/container_infrastructure_management/v1/service.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Service(resource.Resource): + resources_key = 'mservices' + base_path = '/mservices' + + # capabilities + allow_list = True + + #: The name of the binary form of the Magnum service. + binary = resource.Body('binary') + #: The date and time when the resource was created. + created_at = resource.Body('created_at') + #: The disable reason of the service, null if the service is enabled or + #: disabled without reason provided. + disabled_reason = resource.Body('disabled_reason') + #: The host for the service. + host = resource.Body('host') + #: The total number of report. + report_count = resource.Body('report_count') + #: The current state of Magnum services. + state = resource.Body('state') + #: The date and time when the resource was updated. + updated_at = resource.Body('updated_at') diff --git a/openstack/database/database_service.py b/openstack/database/database_service.py index ced9805347..bc595facf1 100644 --- a/openstack/database/database_service.py +++ b/openstack/database/database_service.py @@ -10,15 +10,13 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack import service_filter +from openstack.database.v1 import _proxy +from openstack import service_description -class DatabaseService(service_filter.ServiceFilter): +class DatabaseService(service_description.ServiceDescription[_proxy.Proxy]): """The database service.""" - valid_versions = [service_filter.ValidVersion('v1')] - - def __init__(self, version=None): - """Create a database service.""" - super(DatabaseService, self).__init__(service_type='database', - version=version) + supported_versions = { + '1': _proxy.Proxy, + } diff --git a/openstack/database/v1/_proxy.py b/openstack/database/v1/_proxy.py index cb8351e5f3..be5a7c67f2 100644 --- a/openstack/database/v1/_proxy.py +++ b/openstack/database/v1/_proxy.py @@ -10,78 +10,120 @@ # License for the specific language governing permissions and limitations # under the License. +import typing as ty + from openstack.database.v1 import database as _database from openstack.database.v1 import flavor as _flavor from openstack.database.v1 import instance as _instance from openstack.database.v1 import user as _user from openstack import proxy +from openstack import resource + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['1']] = '1' -class Proxy(proxy.BaseProxy): + _resource_registry = { + "database": _database.Database, + "flavor": _flavor.Flavor, + "instance": _instance.Instance, + "user": _user.User, + } - def create_database(self, **attrs): + def create_database(self, instance, **attrs): """Create a new database from attributes + :param instance: This can be either the ID of an instance + or a :class:`~openstack.database.v1.instance.Instance` :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.database.v1.database.Database`, - comprised of the properties on the Database class. + a :class:`~openstack.database.v1.database.Database`, + comprised of the properties on the Database class. :returns: The results of server creation :rtype: :class:`~openstack.database.v1.database.Database` """ - return self._create(_database.Database, **attrs) + instance = self._get_resource(_instance.Instance, instance) + return self._create( + _database.Database, instance_id=instance.id, **attrs + ) - def delete_database(self, database, ignore_missing=True): + def delete_database(self, database, instance=None, ignore_missing=True): """Delete a database :param database: The value can be either the ID of a database or a - :class:`~openstack.database.v1.database.Database` instance. + :class:`~openstack.database.v1.database.Database` instance. + :param instance: This parameter needs to be specified when + an ID is given as `database`. + It can be either the ID of an instance + or a :class:`~openstack.database.v1.instance.Instance` :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the database does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent database. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the database does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent database. :returns: ``None`` """ - self._delete(_database.Database, database, - ignore_missing=ignore_missing) - - def find_database(self, name_or_id, ignore_missing=True): + instance_id = self._get_uri_attribute( + database, instance, "instance_id" + ) + self._delete( + _database.Database, + database, + instance_id=instance_id, + ignore_missing=ignore_missing, + ) + + def find_database(self, name_or_id, instance, ignore_missing=True): """Find a single database :param name_or_id: The name or ID of a database. + :param instance: This can be either the ID of an instance + or a :class:`~openstack.database.v1.instance.Instance` :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.database.v1.database.Database` or None """ - return self._find(_database.Database, name_or_id, - ignore_missing=ignore_missing) - - def databases(self, **query): + instance = self._get_resource(_instance.Instance, instance) + return self._find( + _database.Database, + name_or_id, + instance_id=instance.id, + ignore_missing=ignore_missing, + ) + + def databases(self, instance, **query): """Return a generator of databases - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param instance: This can be either the ID of an instance + or a :class:`~openstack.database.v1.instance.Instance` + instance that the interface belongs to. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of database objects :rtype: :class:`~openstack.database.v1.database.Database` """ - return self._list(_database.Database, paginated=False, **query) + instance = self._get_resource(_instance.Instance, instance) + return self._list(_database.Database, instance_id=instance.id, **query) - def get_database(self, database): + def get_database(self, database, instance=None): """Get a single database + :param instance: This parameter needs to be specified when + an ID is given as `database`. + It can be either the ID of an instance + or a :class:`~openstack.database.v1.instance.Instance` :param database: The value can be the ID of a database or a - :class:`~openstack.database.v1.database.Database` - instance. + :class:`~openstack.database.v1.database.Database` + instance. :returns: One :class:`~openstack.database.v1.database.Database` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_database.Database, database) @@ -90,44 +132,45 @@ def find_flavor(self, name_or_id, ignore_missing=True): :param name_or_id: The name or ID of a flavor. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.database.v1.flavor.Flavor` or None """ - return self._find(_flavor.Flavor, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _flavor.Flavor, name_or_id, ignore_missing=ignore_missing + ) def get_flavor(self, flavor): """Get a single flavor :param flavor: The value can be the ID of a flavor or a - :class:`~openstack.database.v1.flavor.Flavor` instance. + :class:`~openstack.database.v1.flavor.Flavor` instance. :returns: One :class:`~openstack.database.v1.flavor.Flavor` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_flavor.Flavor, flavor) def flavors(self, **query): """Return a generator of flavors - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of flavor objects :rtype: :class:`~openstack.database.v1.flavor.Flavor` """ - return self._list(_flavor.Flavor, paginated=False, **query) + return self._list(_flavor.Flavor, **query) def create_instance(self, **attrs): """Create a new instance from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.database.v1.instance.Instance`, - comprised of the properties on the Instance class. + a :class:`~openstack.database.v1.instance.Instance`, + comprised of the properties on the Instance class. :returns: The results of server creation :rtype: :class:`~openstack.database.v1.instance.Instance` @@ -138,130 +181,222 @@ def delete_instance(self, instance, ignore_missing=True): """Delete an instance :param instance: The value can be either the ID of an instance or a - :class:`~openstack.database.v1.instance.Instance` instance. + :class:`~openstack.database.v1.instance.Instance` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the instance does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent instance. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the instance does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent instance. :returns: ``None`` """ - self._delete(_instance.Instance, instance, - ignore_missing=ignore_missing) + self._delete( + _instance.Instance, instance, ignore_missing=ignore_missing + ) def find_instance(self, name_or_id, ignore_missing=True): """Find a single instance :param name_or_id: The name or ID of a instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.database.v1.instance.Instance` or None """ - return self._find(_instance.Instance, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _instance.Instance, name_or_id, ignore_missing=ignore_missing + ) def get_instance(self, instance): """Get a single instance :param instance: The value can be the ID of an instance or a - :class:`~openstack.database.v1.instance.Instance` - instance. + :class:`~openstack.database.v1.instance.Instance` + instance. :returns: One :class:`~openstack.database.v1.instance.Instance` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_instance.Instance, instance) def instances(self, **query): """Return a generator of instances - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of instance objects :rtype: :class:`~openstack.database.v1.instance.Instance` """ - return self._list(_instance.Instance, paginated=False, **query) + return self._list(_instance.Instance, **query) def update_instance(self, instance, **attrs): """Update a instance :param instance: Either the id of a instance or a - :class:`~openstack.database.v1.instance.Instance` - instance. - :attrs kwargs: The attributes to update on the instance represented - by ``value``. + :class:`~openstack.database.v1.instance.Instance` instance. + :param attrs: The attributes to update on the instance represented + by ``instance``. :returns: The updated instance :rtype: :class:`~openstack.database.v1.instance.Instance` """ return self._update(_instance.Instance, instance, **attrs) - def create_user(self, **attrs): + def create_user(self, instance, **attrs): """Create a new user from attributes + :param instance: This can be either the ID of an instance + or a :class:`~openstack.database.v1.instance.Instance` :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.database.v1.user.User`, - comprised of the properties on the User class. + a :class:`~openstack.database.v1.user.User`, + comprised of the properties on the User class. :returns: The results of server creation :rtype: :class:`~openstack.database.v1.user.User` """ - return self._create(_user.User, **attrs) + instance = self._get_resource(_instance.Instance, instance) + return self._create(_user.User, instance_id=instance.id, **attrs) - def delete_user(self, user, ignore_missing=True): + def delete_user(self, user, instance=None, ignore_missing=True): """Delete a user :param user: The value can be either the ID of a user or a - :class:`~openstack.database.v1.user.User` instance. + :class:`~openstack.database.v1.user.User` instance. + :param instance: This parameter needs to be specified when + an ID is given as `user`. + It can be either the ID of an instance + or a :class:`~openstack.database.v1.instance.Instance` :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the user does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent user. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the user does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent user. :returns: ``None`` """ - self._delete(_user.User, user, ignore_missing=ignore_missing) - - def find_user(self, name_or_id, ignore_missing=True): + instance = self._get_resource(_instance.Instance, instance) + self._delete( + _user.User, + user, + ignore_missing=ignore_missing, + instance_id=instance.id, + ) + + def find_user(self, name_or_id, instance, ignore_missing=True): """Find a single user :param name_or_id: The name or ID of a user. + :param instance: This can be either the ID of an instance + or a :class:`~openstack.database.v1.instance.Instance` :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.database.v1.user.User` or None """ - return self._find(_user.User, name_or_id, - ignore_missing=ignore_missing) - - def users(self, **query): + instance = self._get_resource(_instance.Instance, instance) + return self._find( + _user.User, + name_or_id, + instance_id=instance.id, + ignore_missing=ignore_missing, + ) + + def users(self, instance, **query): """Return a generator of users - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param instance: This can be either the ID of an instance + or a :class:`~openstack.database.v1.instance.Instance` + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of user objects :rtype: :class:`~openstack.database.v1.user.User` """ - return self._list(_user.User, paginated=False, **query) + instance = self._get_resource(_instance.Instance, instance) + return self._list(_user.User, instance_id=instance.id, **query) - def get_user(self, user): + def get_user(self, user, instance=None): """Get a single user :param user: The value can be the ID of a user or a - :class:`~openstack.database.v1.user.User` instance. + :class:`~openstack.database.v1.user.User` instance. + :param instance: This parameter needs to be specified when + an ID is given as `database`. + It can be either the ID of an instance + or a :class:`~openstack.database.v1.instance.Instance` :returns: One :class:`~openstack.database.v1.user.User` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ + instance = self._get_resource(_instance.Instance, instance) return self._get(_user.User, user) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/database/v1/database.py b/openstack/database/v1/database.py index 6181a977c7..7bf8eef8aa 100644 --- a/openstack/database/v1/database.py +++ b/openstack/database/v1/database.py @@ -10,16 +10,13 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.database import database_service from openstack import resource class Database(resource.Resource): - id_attribute = 'name' resource_key = 'database' resources_key = 'databases' base_path = '/instances/%(instance_id)s/databases' - service = database_service.DatabaseService() # capabilities allow_create = True @@ -28,11 +25,11 @@ class Database(resource.Resource): # Properties #: Set of symbols and encodings. The default character set is ``utf8``. - character_set = resource.prop('character_set') + character_set = resource.Body('character_set') #: Set of rules for comparing characters in a character set. #: The default value for collate is ``utf8_general_ci``. - collate = resource.prop('collate') + collate = resource.Body('collate') #: The ID of the instance - instance_id = resource.prop('instance_id') + instance_id = resource.URI('instance_id') #: The name of the database - name = resource.prop('name') + name = resource.Body('name', alternate_id=True) diff --git a/openstack/database/v1/flavor.py b/openstack/database/v1/flavor.py index 54cbecee4d..64a9dd3626 100644 --- a/openstack/database/v1/flavor.py +++ b/openstack/database/v1/flavor.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.database import database_service from openstack import resource @@ -18,16 +17,15 @@ class Flavor(resource.Resource): resource_key = 'flavor' resources_key = 'flavors' base_path = '/flavors' - service = database_service.DatabaseService() # capabilities allow_list = True - allow_retrieve = True + allow_fetch = True # Properties #: Links associated with the flavor - links = resource.prop('links') + links = resource.Body('links') #: The name of the flavor - name = resource.prop('name') + name = resource.Body('name') #: The size in MB of RAM the flavor has - ram = resource.prop('ram') + ram = resource.Body('ram') diff --git a/openstack/database/v1/instance.py b/openstack/database/v1/instance.py index 18865eae8d..31f1818620 100644 --- a/openstack/database/v1/instance.py +++ b/openstack/database/v1/instance.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.database import database_service from openstack import resource from openstack import utils @@ -19,26 +18,38 @@ class Instance(resource.Resource): resource_key = 'instance' resources_key = 'instances' base_path = '/instances' - service = database_service.DatabaseService() # capabilities allow_create = True - allow_retrieve = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True # Properties #: The flavor of the instance - flavor = resource.prop('flavor') + flavor = resource.Body('flavor') #: Links associated with the instance - links = resource.prop('links') + links = resource.Body('links') #: The name of the instance - name = resource.prop('name') + name = resource.Body('name') #: The status of the instance - status = resource.prop('status') + status = resource.Body('status') #: The size of the volume - volume = resource.prop('volume') + volume = resource.Body('volume') + #: A dictionary of datastore details, often including 'type' and 'version' + #: keys + datastore = resource.Body('datastore', type=dict) + #: The ID of this instance + id = resource.Body('id') + #: The region this instance resides in + region = resource.Body('region') + #: The name of the host + hostname = resource.Body('hostname') + #: The timestamp when this instance was created + created_at = resource.Body('created') + #: The timestamp when this instance was updated + updated_at = resource.Body('updated') def enable_root_user(self, session): """Enable login for the root user. @@ -47,12 +58,14 @@ def enable_root_user(self, session): and provides the user with a generated root password. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` + :type session: :class:`~keystoneauth1.adapter.Adapter` :returns: A dictionary with keys ``name`` and ``password`` specifying the login credentials. """ url = utils.urljoin(self.base_path, self.id, 'root') - resp = session.post(url, endpoint_filter=self.service) + resp = session.post( + url, + ) return resp.json()['user'] def is_root_enabled(self, session): @@ -61,12 +74,14 @@ def is_root_enabled(self, session): Determine if root is enabled on this particular instance. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` + :type session: :class:`~keystoneauth1.adapter.Adapter` :returns: ``True`` if root user is enabled for a specified database instance or ``False`` otherwise. """ url = utils.urljoin(self.base_path, self.id, 'root') - resp = session.get(url, endpoint_filter=self.service) + resp = session.get( + url, + ) return resp.json()['rootEnabled'] def restart(self, session): @@ -74,9 +89,9 @@ def restart(self, session): :returns: ``None`` """ - body = {'restart': {}} + body = {'restart': None} url = utils.urljoin(self.base_path, self.id, 'action') - session.post(url, endpoint_filter=self.service, json=body) + session.post(url, json=body) def resize(self, session, flavor_reference): """Resize the database instance @@ -85,7 +100,7 @@ def resize(self, session, flavor_reference): """ body = {'resize': {'flavorRef': flavor_reference}} url = utils.urljoin(self.base_path, self.id, 'action') - session.post(url, endpoint_filter=self.service, json=body) + session.post(url, json=body) def resize_volume(self, session, volume_size): """Resize the volume attached to the instance @@ -94,4 +109,4 @@ def resize_volume(self, session, volume_size): """ body = {'resize': {'volume': volume_size}} url = utils.urljoin(self.base_path, self.id, 'action') - session.post(url, endpoint_filter=self.service, json=body) + session.post(url, json=body) diff --git a/openstack/database/v1/user.py b/openstack/database/v1/user.py index 2abbfec4f1..d1c29cd689 100644 --- a/openstack/database/v1/user.py +++ b/openstack/database/v1/user.py @@ -10,37 +10,50 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.database import database_service from openstack import resource +from openstack import utils class User(resource.Resource): - id_attribute = 'name' resource_key = 'user' resources_key = 'users' base_path = '/instances/%(instance_id)s/users' - service = database_service.DatabaseService() # capabilities allow_create = True allow_delete = True allow_list = True - # path args - instance_id = resource.prop('instance_id') + instance_id = resource.URI('instance_id') # Properties #: Databases the user has access to - databases = resource.prop('databases') + databases = resource.Body('databases') #: The name of the user - name = resource.prop('name') + name = resource.Body('name', alternate_id=True) #: The password of the user - password = resource.prop('password') - - @classmethod - def create_by_id(cls, session, attrs, r_id=None, path_args=None): - url = cls._get_url(path_args) - # Create expects an array of users - body = {'users': [attrs]} - resp = session.post(url, endpoint_filter=cls.service, json=body) - return resp.json() + password = resource.Body('password') + + def _prepare_request( + self, + requires_id=True, + prepend_key=True, + patch=False, + base_path=None, + *args, + **kwargs, + ): + """Prepare a request for the database service's create call + + User.create calls require the resources_key. + The base_prepare_request would insert the resource_key (singular) + """ + body = {self.resources_key: self._body.dirty} + + if base_path is None: + base_path = self.base_path + + uri = base_path % self._uri.attributes + uri = utils.urljoin(uri, self.id) + + return resource._Request(uri, body, None) diff --git a/openstack/tests/functional/telemetry/alarm/v2/__init__.py b/openstack/dns/__init__.py similarity index 100% rename from openstack/tests/functional/telemetry/alarm/v2/__init__.py rename to openstack/dns/__init__.py diff --git a/openstack/dns/dns_service.py b/openstack/dns/dns_service.py new file mode 100644 index 0000000000..5eeaaeed22 --- /dev/null +++ b/openstack/dns/dns_service.py @@ -0,0 +1,22 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _proxy +from openstack import service_description + + +class DnsService(service_description.ServiceDescription[_proxy.Proxy]): + """The DNS service.""" + + supported_versions = { + '2': _proxy.Proxy, + } diff --git a/openstack/tests/functional/telemetry/v2/__init__.py b/openstack/dns/v2/__init__.py similarity index 100% rename from openstack/tests/functional/telemetry/v2/__init__.py rename to openstack/dns/v2/__init__.py diff --git a/openstack/dns/v2/_base.py b/openstack/dns/v2/_base.py new file mode 100644 index 0000000000..f7c6acf6c4 --- /dev/null +++ b/openstack/dns/v2/_base.py @@ -0,0 +1,198 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty +import urllib.parse + +from keystoneauth1 import adapter +import typing_extensions as ty_ext + +from openstack import exceptions +from openstack import resource + + +class Resource(resource.Resource): + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[True] = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[False], + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self: ... + + # excuse the duplication here: it's mypy's fault + # https://github.com/python/mypy/issues/14764 + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: + """Find a resource by its name or id. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param name_or_id: This resource's identifier, if needed by + the request. The default is ``None``. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict params: Any additional parameters to be passed into + underlying methods, such as to + :meth:`~openstack.resource.Resource.existing` + in order to pass on URI parameters. + + :return: The :class:`Resource` object matching the given name or id + or None if nothing matches. + :raises: :class:`openstack.exceptions.DuplicateResource` if more + than one resource is found for this request. + :raises: :class:`openstack.exceptions.NotFoundException` if nothing + is found and ignore_missing is ``False``. + """ + session = cls._get_session(session) + # Try to short-circuit by looking directly for a matching ID. + try: + match = cls.existing( + id=name_or_id, + connection=session._get_connection(), # type: ignore + **params, + ) + return match.fetch(session) + except exceptions.SDKException: + # DNS may return 400 when we try to do GET with name + pass + + if ( + 'name' in cls._query_mapping._mapping.keys() + and 'name' not in params + ): + params['name'] = name_or_id + + data = cls.list( + session, + list_base_path=list_base_path, + microversion=microversion, + all_projects=all_projects, + **params, + ) + + result = cls._get_one_match(name_or_id, data) + if result is not None: + return result + + if ignore_missing: + return None + raise exceptions.NotFoundException( + f"No {cls.__name__} found for {name_or_id}" + ) + + @classmethod + def list( + cls, + session: adapter.Adapter, + paginated: bool = True, + base_path: str | None = None, + allow_unknown_params: bool = False, + *, + microversion: str | None = None, + headers: dict[str, str] | None = None, + max_items: int | None = None, + project_id: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty.Generator[ty_ext.Self, None, None]: + if project_id or all_projects is not None: + if headers is None: + headers = {} + if project_id: + headers["x-auth-sudo-project-id"] = str(project_id) + if all_projects: + headers["x-auth-all-projects"] = str(all_projects) + + return super().list(session=session, headers=headers, **params) + + @classmethod + def _get_next_link(cls, uri, response, data, marker, limit, total_yielded): + next_link = None + params: dict[str, list[str] | str] = {} + if isinstance(data, dict): + links = data.get('links') + if links: + next_link = links.get('next') + + total = data.get('metadata', {}).get('total_count') + if total: + # We have a kill switch + total_count = int(total) + if total_count <= total_yielded: + return None, params + + # Parse params from Link (next page URL) into params. + # This prevents duplication of query parameters that with large + # number of pages result in HTTP 414 error eventually. + if next_link: + parts = urllib.parse.urlparse(next_link) + query_params = urllib.parse.parse_qs(parts.query) + params.update(query_params) + next_link = urllib.parse.urljoin(next_link, parts.path) + + # If we still have no link, and limit was given and is non-zero, + # and the number of records yielded equals the limit, then the user + # is playing pagination ball so we should go ahead and try once more. + if not next_link and limit: + next_link = uri + params['marker'] = marker + params['limit'] = limit + + return next_link, params diff --git a/openstack/dns/v2/_proxy.py b/openstack/dns/v2/_proxy.py new file mode 100644 index 0000000000..6b5175652c --- /dev/null +++ b/openstack/dns/v2/_proxy.py @@ -0,0 +1,1089 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +from openstack.dns.v2 import blacklist as _blacklist +from openstack.dns.v2 import floating_ip as _fip +from openstack.dns.v2 import limit as _limit +from openstack.dns.v2 import quota as _quota +from openstack.dns.v2 import recordset as _rs +from openstack.dns.v2 import service_status as _svc_status +from openstack.dns.v2 import tld as _tld +from openstack.dns.v2 import tsigkey as _tsigkey +from openstack.dns.v2 import zone as _zone +from openstack.dns.v2 import zone_export as _zone_export +from openstack.dns.v2 import zone_import as _zone_import +from openstack.dns.v2 import zone_nameserver as _zone_nameserver +from openstack.dns.v2 import zone_share as _zone_share +from openstack.dns.v2 import zone_transfer as _zone_transfer +from openstack import proxy +from openstack import resource + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['2']] = '2' + + _resource_registry = { + "blacklist": _blacklist.Blacklist, + "floating_ip": _fip.FloatingIP, + "limits": _limit.Limit, + "quota": _quota.Quota, + "recordset": _rs.Recordset, + "service_status": _svc_status.ServiceStatus, + "zone": _zone.Zone, + "tsigkey": _tsigkey.TSIGKey, + "zone_export": _zone_export.ZoneExport, + "zone_import": _zone_import.ZoneImport, + "zone_nameserver": _zone_nameserver.ZoneNameserver, + "zone_share": _zone_share.ZoneShare, + "zone_transfer_request": _zone_transfer.ZoneTransferRequest, + "tld": _tld.TLD, + } + + # ======== Zones ======== + def zones(self, **query): + """Retrieve a generator of zones + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + * `name`: Zone Name field. + * `type`: Zone Type field. + * `email`: Zone email field. + * `status`: Status of the zone. + * `ttl`: TTL field filter.abs + * `description`: Zone description field filter. + + :returns: A generator of zone + :class:`~openstack.dns.v2.zone.Zone` instances. + """ + return self._list(_zone.Zone, **query) + + def create_zone(self, **attrs): + """Create a new zone from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.dns.v2.zone.Zone`, + comprised of the properties on the Zone class. + :returns: The results of zone creation. + :rtype: :class:`~openstack.dns.v2.zone.Zone` + """ + if attrs.get('type') == "SECONDARY": + attrs.pop('email', None) + attrs.pop('ttl', None) + return self._create(_zone.Zone, prepend_key=False, **attrs) + + def get_zone(self, zone): + """Get a zone + + :param zone: The value can be the ID of a zone + or a :class:`~openstack.dns.v2.zone.Zone` instance. + :returns: Zone instance. + :rtype: :class:`~openstack.dns.v2.zone.Zone` + """ + return self._get(_zone.Zone, zone) + + def delete_zone(self, zone, ignore_missing=True, delete_shares=False): + """Delete a zone + + :param zone: The value can be the ID of a zone + or a :class:`~openstack.dns.v2.zone.Zone` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the zone does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent zone. + :param bool delete_shares: When True, delete the zone shares along with + the zone. + + :returns: Zone been deleted + :rtype: :class:`~openstack.dns.v2.zone.Zone` + """ + return self._delete( + _zone.Zone, + zone, + ignore_missing=ignore_missing, + delete_shares=delete_shares, + ) + + def update_zone(self, zone, **attrs): + """Update zone attributes + + :param zone: The id or an instance of + :class:`~openstack.dns.v2.zone.Zone`. + :param dict attrs: attributes for update on + :class:`~openstack.dns.v2.zone.Zone`. + + :rtype: :class:`~openstack.dns.v2.zone.Zone` + """ + return self._update(_zone.Zone, zone, **attrs) + + def find_zone(self, name_or_id, ignore_missing=True): + """Find a single zone + + :param name_or_id: The name or ID of a zone + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the zone does not exist. + When set to ``True``, no exception will be set when attempting + to delete a nonexistent zone. + + :returns: :class:`~openstack.dns.v2.zone.Zone` + """ + return self._find( + _zone.Zone, name_or_id, ignore_missing=ignore_missing + ) + + def abandon_zone(self, zone, **attrs): + """Abandon Zone + + :param zone: The value can be the ID of a zone to be abandoned + or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. + + :returns: None + """ + zone = self._get_resource(_zone.Zone, zone) + + return zone.abandon(self) + + def xfr_zone(self, zone, **attrs): + """Trigger update of secondary Zone + + :param zone: The value can be the ID of a zone to be abandoned + or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. + + :returns: None + """ + zone = self._get_resource(_zone.Zone, zone) + return zone.xfr(self) + + # ======== Zone nameservers ======== + def zone_nameservers(self, zone): + """Retrieve a generator of nameservers for a zone + + :param zone: The value can be the ID of a zone or a + :class:`~openstack.dns.v2.zone.Zone` instance. + :return: A generator of + :class:`~openstack.dns.v2.zone_nameserver.ZoneNameserver` + instances. + """ + zone_id = resource.Resource._get_id(zone) + return self._list( + _zone_nameserver.ZoneNameserver, + zone_id=zone_id, + ) + + # ======== Recordsets ======== + def recordsets(self, zone=None, **query): + """Retrieve a generator of recordsets + + :param zone: The optional value can be the ID of a zone + or a :class:`~openstack.dns.v2.zone.Zone` instance. If it is not + given all recordsets for all zones of the tenant would be + retrieved + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + * `name`: Recordset Name field. + * `type`: Type field. + * `status`: Status of the recordset. + * `ttl`: TTL field filter. + * `description`: Recordset description field filter. + + :returns: A generator of zone + (:class:`~openstack.dns.v2.recordset.Recordset`) instances + """ + base_path = None + if not zone: + base_path = '/recordsets' + else: + zone = self._get_resource(_zone.Zone, zone) + query.update({'zone_id': zone.id}) + return self._list(_rs.Recordset, base_path=base_path, **query) + + def create_recordset(self, zone, **attrs): + """Create a new recordset in the zone + + :param zone: The value can be the ID of a zone + or a :class:`~openstack.dns.v2.zone.Zone` instance. + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.dns.v2.recordset.Recordset`, + comprised of the properties on the Recordset class. + :returns: The results of zone creation + :rtype: :class:`~openstack.dns.v2.recordset.Recordset` + """ + zone = self._get_resource(_zone.Zone, zone) + attrs.update({'zone_id': zone.id}) + return self._create(_rs.Recordset, prepend_key=False, **attrs) + + def update_recordset(self, recordset, **attrs): + """Update Recordset attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.dns.v2.recordset.Recordset`, + comprised of the properties on the Recordset class. + :returns: The results of zone creation + :rtype: :class:`~openstack.dns.v2.recordset.Recordset` + """ + return self._update(_rs.Recordset, recordset, **attrs) + + def get_recordset(self, recordset, zone): + """Get a recordset + + :param zone: The value can be the ID of a zone + or a :class:`~openstack.dns.v2.zone.Zone` instance. + :param recordset: The value can be the ID of a recordset + or a :class:`~openstack.dns.v2.recordset.Recordset` instance. + :returns: Recordset instance + :rtype: :class:`~openstack.dns.v2.recordset.Recordset` + """ + zone = self._get_resource(_zone.Zone, zone) + return self._get(_rs.Recordset, recordset, zone_id=zone.id) + + def delete_recordset(self, recordset, zone=None, ignore_missing=True): + """Delete a zone + + :param recordset: The value can be the ID of a recordset + or a :class:`~openstack.dns.v2.recordset.Recordset` + instance. + :param zone: The value can be the ID of a zone + or a :class:`~openstack.dns.v2.zone.Zone` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the zone does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent zone. + + :returns: Recordset instance been deleted + :rtype: :class:`~openstack.dns.v2.recordset.Recordset` + """ + if zone: + zone = self._get_resource(_zone.Zone, zone) + recordset = self._get(_rs.Recordset, recordset, zone_id=zone.id) + return self._delete( + _rs.Recordset, recordset, ignore_missing=ignore_missing + ) + + def find_recordset(self, zone, name_or_id, ignore_missing=True, **query): + """Find a single recordset + + :param zone: The value can be the ID of a zone + or a :class:`~openstack.dns.v2.zone.Zone` instance. + :param name_or_id: The name or ID of a zone + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the zone does not exist. + When set to ``True``, no exception will be set when attempting + to delete a nonexistent zone. + + :returns: :class:`~openstack.dns.v2.recordset.Recordset` + """ + zone = self._get_resource(_zone.Zone, zone) + return self._find( + _rs.Recordset, + name_or_id, + ignore_missing=ignore_missing, + zone_id=zone.id, + **query, + ) + + # ======== Zone Imports ======== + def zone_imports(self, **query): + """Retrieve a generator of zone imports + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + * `zone_id`: Zone I field. + * `message`: Message field. + * `status`: Status of the zone import record. + + :returns: A generator of zone + :class:`~openstack.dns.v2.zone_import.ZoneImport` instances. + """ + return self._list(_zone_import.ZoneImport, **query) + + def create_zone_import(self, **attrs): + """Create a new zone import from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.dns.v2.zone_import.ZoneImport`, + comprised of the properties on the ZoneImport class. + :returns: The results of zone creation. + :rtype: :class:`~openstack.dns.v2.zone_import.ZoneImport` + """ + return self._create( + _zone_import.ZoneImport, prepend_key=False, **attrs + ) + + def get_zone_import(self, zone_import): + """Get a zone import record + + :param zone: The value can be the ID of a zone import + or a :class:`~openstack.dns.v2.zone_import.ZoneImport` instance. + :returns: ZoneImport instance. + :rtype: :class:`~openstack.dns.v2.zone_import.ZoneImport` + """ + return self._get(_zone_import.ZoneImport, zone_import) + + def delete_zone_import(self, zone_import, ignore_missing=True): + """Delete a zone import + + :param zone_import: The value can be the ID of a zone import + or a :class:`~openstack.dns.v2.zone_import.ZoneImport` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the zone does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent zone. + + :returns: None + """ + return self._delete( + _zone_import.ZoneImport, zone_import, ignore_missing=ignore_missing + ) + + # ======== Zone Exports ======== + def zone_exports(self, **query): + """Retrieve a generator of zone exports + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + * `zone_id`: Zone I field. + * `message`: Message field. + * `status`: Status of the zone import record. + + :returns: A generator of zone + :class:`~openstack.dns.v2.zone_export.ZoneExport` instances. + """ + return self._list(_zone_export.ZoneExport, **query) + + def create_zone_export(self, zone, **attrs): + """Create a new zone export from attributes + + :param zone: The value can be the ID of a zone to be exported + or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.dns.v2.zone_export.ZoneExport`, + comprised of the properties on the ZoneExport class. + :returns: The results of zone creation. + :rtype: :class:`~openstack.dns.v2.zone_export.ZoneExport` + """ + zone = self._get_resource(_zone.Zone, zone) + return self._create( + _zone_export.ZoneExport, + base_path='/zones/%(zone_id)s/tasks/export', + prepend_key=False, + zone_id=zone.id, + **attrs, + ) + + def get_zone_export(self, zone_export): + """Get a zone export record + + :param zone: The value can be the ID of a zone import + or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. + :returns: ZoneExport instance. + :rtype: :class:`~openstack.dns.v2.zone_export.ZoneExport` + """ + return self._get(_zone_export.ZoneExport, zone_export) + + def get_zone_export_text(self, zone_export): + """Get a zone export record as text + + :param zone: The value can be the ID of a zone import + or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. + :returns: ZoneExport instance. + :rtype: :class:`~openstack.dns.v2.zone_export.ZoneExport` + """ + return self._get( + _zone_export.ZoneExport, + zone_export, + base_path='/zones/tasks/export/%(id)s/export', + ) + + def delete_zone_export(self, zone_export, ignore_missing=True): + """Delete a zone export + + :param zone_export: The value can be the ID of a zone import + or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the zone does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent zone. + + :returns: None + """ + return self._delete( + _zone_export.ZoneExport, zone_export, ignore_missing=ignore_missing + ) + + # ======== FloatingIPs ======== + def floating_ips(self, **query): + """Retrieve a generator of recordsets + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + * `name`: Recordset Name field. + * `type`: Type field. + * `status`: Status of the recordset. + * `ttl`: TTL field filter. + * `description`: Recordset description field filter. + + :returns: A generator of floatingips + (:class:`~openstack.dns.v2.floating_ip.FloatingIP`) instances + """ + return self._list(_fip.FloatingIP, **query) + + def get_floating_ip(self, floating_ip): + """Get a Floating IP + + :param floating_ip: The value can be the ID of a floating ip + or a :class:`~openstack.dns.v2.floating_ip.FloatingIP` instance. + The ID is in format "region_name:floatingip_id" + :returns: FloatingIP instance. + :rtype: :class:`~openstack.dns.v2.floating_ip.FloatingIP` + """ + return self._get(_fip.FloatingIP, floating_ip) + + def update_floating_ip(self, floating_ip, **attrs): + """Update floating ip attributes + + :param floating_ip: The id or an instance of + :class:`~openstack.dns.v2.fip.FloatingIP`. + :param dict attrs: attributes for update on + :class:`~openstack.dns.v2.fip.FloatingIP`. + + :rtype: :class:`~openstack.dns.v2.fip.FloatingIP` + """ + return self._update(_fip.FloatingIP, floating_ip, **attrs) + + def unset_floating_ip(self, floating_ip): + """Unset a Floating IP PTR record + :param floating_ip: ID for the floatingip associated with the + project. + :returns: FloatingIP PTR record. + :rtype: :class:`~openstack.dns.v2.fip.FloatipgIP` + """ + # concat `region:floating_ip_id` as id + attrs = {'ptrdname': None} + return self._update(_fip.FloatingIP, floating_ip, **attrs) + + # ======== Zone Transfer ======== + def zone_transfer_requests(self, **query): + """Retrieve a generator of zone transfer requests + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + * `status`: Status of the recordset. + + :returns: A generator of transfer requests + (:class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest`) + instances + """ + return self._list(_zone_transfer.ZoneTransferRequest, **query) + + def get_zone_transfer_request(self, request): + """Get a ZoneTransfer Request info + + :param request: The value can be the ID of a transfer request + or a :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest` + instance. + :returns: Zone transfer request instance. + :rtype: :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest` + """ + return self._get(_zone_transfer.ZoneTransferRequest, request) + + def create_zone_transfer_request(self, zone, **attrs): + """Create a new ZoneTransfer Request from attributes + + :param zone: The value can be the ID of a zone to be transferred + or a :class:`~openstack.dns.v2.zone_export.ZoneExport` instance. + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest`, + comprised of the properties on the ZoneTransferRequest class. + :returns: The results of zone transfer request creation. + :rtype: :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest` + """ + zone = self._get_resource(_zone.Zone, zone) + return self._create( + _zone_transfer.ZoneTransferRequest, + base_path='/zones/%(zone_id)s/tasks/transfer_requests', + prepend_key=False, + zone_id=zone.id, + **attrs, + ) + + def update_zone_transfer_request(self, request, **attrs): + """Update ZoneTransfer Request attributes + + :param floating_ip: The id or an instance of + :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest`. + :param dict attrs: attributes for update on + :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest`. + + :rtype: :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest` + """ + return self._update( + _zone_transfer.ZoneTransferRequest, request, **attrs + ) + + def delete_zone_transfer_request(self, request, ignore_missing=True): + """Delete a ZoneTransfer Request + + :param request: The value can be the ID of a zone transfer request + or a :class:`~openstack.dns.v2.zone_transfer.ZoneTransferRequest` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the zone does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent zone. + + :returns: None + """ + return self._delete( + _zone_transfer.ZoneTransferRequest, + request, + ignore_missing=ignore_missing, + ) + + def zone_transfer_accepts(self, **query): + """Retrieve a generator of zone transfer accepts + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + * `status`: Status of the recordset. + + :returns: A generator of transfer accepts + (:class:`~openstack.dns.v2.zone_transfer.ZoneTransferAccept`) + instances + """ + return self._list(_zone_transfer.ZoneTransferAccept, **query) + + def get_zone_transfer_accept(self, accept): + """Get a ZoneTransfer Accept info + + :param request: The value can be the ID of a transfer accept + or a :class:`~openstack.dns.v2.zone_transfer.ZoneTransferAccept` + instance. + :returns: Zone transfer request instance. + :rtype: :class:`~openstack.dns.v2.zone_transfer.ZoneTransferAccept` + """ + return self._get(_zone_transfer.ZoneTransferAccept, accept) + + def create_zone_transfer_accept(self, **attrs): + """Create a new ZoneTransfer Accept from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.dns.v2.zone_transfer.ZoneTransferAccept`, + comprised of the properties on the ZoneTransferAccept class. + :returns: The results of zone transfer request creation. + :rtype: :class:`~openstack.dns.v2.zone_transfer.ZoneTransferAccept` + """ + return self._create(_zone_transfer.ZoneTransferAccept, **attrs) + + # ======== Zone Shares ======== + def zone_shares(self, zone, **query): + """Retrieve a generator of zone shares + + :param zone: The zone ID or a + :class:`~openstack.dns.v2.zone.Zone` instance + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + * `target_project_id`: The target project ID field. + + :returns: A generator of zone shares + :class:`~openstack.dns.v2.zone_share.ZoneShare` instances. + """ + zone_obj = self._get_resource(_zone.Zone, zone) + return self._list(_zone_share.ZoneShare, zone_id=zone_obj.id, **query) + + def get_zone_share(self, zone, zone_share): + """Get a zone share + + :param zone: The value can be the ID of a zone + or a :class:`~openstack.dns.v2.zone.Zone` instance. + :param zone_share: The zone_share can be either the ID of the zone + share or a :class:`~openstack.dns.v2.zone_share.ZoneShare` instance + that the zone share belongs to. + + :returns: ZoneShare instance. + :rtype: :class:`~openstack.dns.v2.zone_share.ZoneShare` + """ + zone_obj = self._get_resource(_zone.Zone, zone) + return self._get( + _zone_share.ZoneShare, zone_share, zone_id=zone_obj.id + ) + + def find_zone_share(self, zone, zone_share_id, ignore_missing=True): + """Find a single zone share + + :param zone: The value can be the ID of a zone + or a :class:`~openstack.dns.v2.zone.Zone` instance. + :param zone_share_id: The zone share ID + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the zone share does not exist. + When set to ``True``, None will be returned when attempting to + find a nonexistent zone share. + + :returns: :class:`~openstack.dns.v2.zone_share.ZoneShare` + """ + zone_obj = self._get_resource(_zone.Zone, zone) + return self._find( + _zone_share.ZoneShare, + zone_share_id, + ignore_missing=ignore_missing, + zone_id=zone_obj.id, + ) + + def create_zone_share(self, zone, **attrs): + """Create a new zone share from attributes + + :param zone: The zone ID or a + :class:`~openstack.dns.v2.zone.Zone` instance + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.dns.v2.zone_share.ZoneShare`, + comprised of the properties on the ZoneShare class. + + :returns: The results of zone share creation + :rtype: :class:`~openstack.dns.v2.zone_share.ZoneShare` + """ + zone_obj = self._get_resource(_zone.Zone, zone) + return self._create( + _zone_share.ZoneShare, zone_id=zone_obj.id, **attrs + ) + + def delete_zone_share(self, zone, zone_share, ignore_missing=True): + """Delete a zone share + + :param zone: The zone ID or a + :class:`~openstack.dns.v2.zone.Zone` instance + :param zone_share: The zone_share can be either the ID of the zone + share or a :class:`~openstack.dns.v2.zone_share.ZoneShare` instance + that the zone share belongs to. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the zone share does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent zone + share. + + :returns: ``None`` + """ + zone_obj = self._get_resource(_zone.Zone, zone) + self._delete( + _zone_share.ZoneShare, + zone_share, + ignore_missing=ignore_missing, + zone_id=zone_obj.id, + ) + + # ======== Limits ======== + def limits(self, **query): + """Retrieve a generator of limits + + :returns: A generator of limits + (:class:`~openstack.dns.v2.limit.Limit`) instances + """ + return self._list(_limit.Limit, **query) + + # ======== Quotas ======== + def quotas(self, **query): + """Return a generator of quotas + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + :returns: A generator of quota objects + :rtype: :class:`~openstack.dns.v2.quota.Quota` + """ + return self._list(_quota.Quota, **query) + + def get_quota(self, quota): + """Get a quota + + :param quota: The value can be the ID of a quota or a + :class:`~openstack.dns.v2.quota.Quota` instance. + The ID of a quota is the same as the project ID for the quota. + + :returns: One :class:`~openstack.dns.v2.quota.Quota` + :raises: :class:`~openstack.exceptions.ResourceNotFound` + """ + return self._get(_quota.Quota, quota) + + def update_quota(self, quota, **attrs): + """Update a quota + + :param quota: Either the ID of a quota or a + :class:`~openstack.dns.v2.quota.Quota` instance. The ID of a quota + is the same as the project ID for the quota. + :param dict attrs: The attributes to update on the quota represented + by ``quota``. + + :returns: The updated quota + :rtype: :class:`~openstack.dns.v2.quota.Quota` + """ + return self._update(_quota.Quota, quota, **attrs) + + def delete_quota(self, quota, ignore_missing=True): + """Delete a quota (i.e. reset to the default quota) + + :param quota: The value can be the ID of a quota or a + :class:`~openstack.dns.v2.quota.Quota` instance. + The ID of a quota is the same as the project ID for the quota. + :param bool ignore_missing: When set to ``False``, + :class:`~openstack.exceptions.ResourceNotFound` will be raised when + the quota does not exist. + When set to ``True``, no exception will be set when attempting to + delete a nonexistent quota. + + :returns: ``None`` + """ + return self._delete(_quota.Quota, quota, ignore_missing=ignore_missing) + + # ======== Service Statuses ======== + def service_statuses(self): + """Retrieve a generator of service statuses + + :returns: A generator of service statuses + :class:`~openstack.dns.v2.service_status.ServiceStatus` instances. + """ + return self._list(_svc_status.ServiceStatus) + + def get_service_status(self, service): + """Get a status of a service in the Designate system + + :param service: The value can be the ID of a service + or a :class:`~openstack.dns.v2.service_status.ServiceStatus` + instance. + + :returns: ServiceStatus instance. + :rtype: :class:`~openstack.dns.v2.service_status.ServiceStatus` + """ + return self._get(_svc_status.ServiceStatus, service) + + # ======== TLDs ======== + def tlds(self, **query): + """Retrieve a generator of tlds + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + * `name`: TLD Name field. + + :returns: A generator of tld + :class:`~openstack.dns.v2.tld.TLD` instances. + """ + return self._list(_tld.TLD, **query) + + def create_tld(self, **attrs): + """Create a new tld from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.dns.v2.tld.TLD`, + comprised of the properties on the TLD class. + :returns: The results of TLD creation. + :rtype: :class:`~openstack.dns.v2.tld.TLD` + """ + return self._create(_tld.TLD, prepend_key=False, **attrs) + + def get_tld(self, tld): + """Get a tld + + :param tld: The value can be the ID of a tld + or a :class:`~openstack.dns.v2.tld.TLD` instance. + :returns: tld instance. + :rtype: :class:`~openstack.dns.v2.tld.TLD` + """ + return self._get(_tld.TLD, tld) + + def delete_tld(self, tld, ignore_missing=True): + """Delete a tld + + :param tld: The value can be the ID of a tld + or a :class:`~openstack.dns.v2.tld.TLD` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the tld does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent tld. + + :returns: TLD been deleted + :rtype: :class:`~openstack.dns.v2.tld.TLD` + """ + return self._delete( + _tld.TLD, + tld, + ignore_missing=ignore_missing, + ) + + def update_tld(self, tld, **attrs): + """Update tld attributes + + :param tld: The id or an instance of + :class:`~openstack.dns.v2.tld.TLD`. + :param dict attrs: attributes for update on + :class:`~openstack.dns.v2.tld.TLD`. + + :rtype: :class:`~openstack.dns.v2.tld.TLD` + """ + return self._update(_tld.TLD, tld, **attrs) + + def find_tld(self, name_or_id, ignore_missing=True): + """Find a single tld + + :param name_or_id: The name or ID of a tld + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the tld does not exist. + When set to ``True``, no exception will be set when attempting + to delete a nonexistent tld. + + :returns: :class:`~openstack.dns.v2.tld.TLD` + """ + return self._find(_tld.TLD, name_or_id, ignore_missing=ignore_missing) + + # ====== TSIG keys ====== + def tsigkeys(self, **query): + """Retrieve a generator of zones + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + :returns: A generator of zone + :class: `~openstack.dns.v2.tsigkey.TSIGKey` instances. + """ + return self._list(_tsigkey.TSIGKey, **query) + + def create_tsigkey(self, **attrs): + """Create a new tsigkey from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.dns.v2.tsigkey.Tsigkey`, + comprised of the properties on the Tsigkey class. + :returns: The results of zone creation. + :rtype: :class:`~openstack.dns.v2.tsigkey.Tsigkey` + """ + return self._create(_tsigkey.TSIGKey, prepend_key=False, **attrs) + + def get_tsigkey(self, tsigkey): + """Get a zone + + :param tsigkey: The value can be the ID of a tsigkey + or a :class:'~openstack.dns.v2.tsigkey.TSIGKey' instance. + :returns: A generator of tsigkey + :class:'~openstack.dns.v2.tsigkey.TSIGKey' instances. + """ + return self._get(_tsigkey.TSIGKey, tsigkey) + + def delete_tsigkey( + self, tsigkey, ignore_missing=True, delete_shares=False + ): + """Delete a TSIG key + + :param tsigkey: The value can be the ID of a TSIG key + or a :class:`~openstack.dns.v2.tsigkey.TSIGKey` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.ResourceNotFound` will be raised when + the TSIG key does not exist. + When set to ``True``, no exception will be set when attempting to + delete a nonexistent TSIG key. + + :returns: TSIG Key that has been deleted + :rtype: :class:`~openstack.dns.v2.tsigkey.TSIGKey` + """ + + return self._delete( + _tsigkey.TSIGKey, + tsigkey, + ignore_missing=ignore_missing, + delete_shares=delete_shares, + ) + + def find_tsigkey(self, name_or_id, ignore_missing=True): + """Find a single tsigkey + + :param name_or_id: The name or ID of a tsigkey + :param bool ignore_missing: When set to ``False`` + :class: `!openstack.exceptions.ResourceNotFound` will be raised + when the tsigkey does not exit. + Wehn set to ``True``, no exception will be set when attempting + to delete a nonexitstent zone. + + :returns::class:`~openstack.dns.v2.tsigkey.TSIGKey` + """ + return self._find( + _tsigkey.TSIGKey, name_or_id, ignore_missing=ignore_missing + ) + + # ======== Blacklists ======== + def blacklists(self, **query): + """Retrieve a generator of blacklists + + :returns: A generator of blacklist + (:class:`~openstack.dns.v2.blacklist.Blacklist`) instances + """ + return self._list(_blacklist.Blacklist, **query) + + def get_blacklist(self, blacklist): + """Get a blacklist + + :param blacklist: The value can be the ID of a blacklist + or a :class:`~openstack.dns.v2.blacklist.Blacklist` instance. + + :returns: Blacklist instance. + :rtype: :class:`~openstack.dns.v2.blacklist.Blacklist` + """ + return self._get(_blacklist.Blacklist, blacklist) + + def create_blacklist(self, **attrs): + """Create a new blacklist + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.dns.v2.blacklist.Blacklist`, + comprised of the properties on the Blacklist class. + + :returns: The results of blacklist creation. + :rtype: :class:`~openstack.dns.v2.blacklist.Blacklist` + """ + return self._create(_blacklist.Blacklist, prepend_key=False, **attrs) + + def update_blacklist(self, blacklist, **attrs): + """Update blacklist attributes + + :param blacklist: The id or an instance of + :class: `~openstack.dns.v2.blacklist.Blacklist`. + :param attrs: attributes for update on + :class: `~openstack.dns.v2.blacklist.Blacklist`. + + :rtype: :class: `~openstack.dns.v2.blacklist.Blacklist`. + """ + return self._update(_blacklist.Blacklist, blacklist, **attrs) + + def delete_blacklist(self, blacklist, ignore_missing=True): + """Delete a blacklist + + :param blacklist: The id or an instance of + :class: `~openstack.dns.v2.blacklist.Blacklist`. + + :returns: Blacklist been deleted + :rtype: :class:`~openstack.dns.v2.blacklist.Blacklist` + """ + return self._delete( + _blacklist.Blacklist, blacklist, ignore_missing=ignore_missing + ) + + # ========== Utilities ========== + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) + + def _get_cleanup_dependencies(self): + # DNS may depend on floating ip + return {'dns': {'before': ['network']}} + + def _service_cleanup( + self, + dry_run=True, + client_status_queue=False, + identified_resources=None, + filters=None, + resource_evaluation_fn=None, + skip_resources=None, + ): + if not self.should_skip_resource_cleanup("zone", skip_resources): + # Delete all zones + for obj in self.zones(): + self._service_cleanup_del_res( + self.delete_zone, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + + if not self.should_skip_resource_cleanup( + "floating_ip", skip_resources + ): + # Unset all floatingIPs + # NOTE: FloatingIPs are not cleaned when filters are set + for obj in self.floating_ips(): + self._service_cleanup_del_res( + self.unset_floating_ip, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) diff --git a/openstack/dns/v2/blacklist.py b/openstack/dns/v2/blacklist.py new file mode 100644 index 0000000000..6c6d048d79 --- /dev/null +++ b/openstack/dns/v2/blacklist.py @@ -0,0 +1,47 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import resource + + +class Blacklist(_base.Resource): + """DNS Blacklist Resource""" + + resources_key = 'blacklists' + base_path = '/blacklists' + + # capabilities + allow_list = True + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + commit_method = "PATCH" + + _query_mapping = resource.QueryParameters( + 'pattern', + ) + + #: Properties + #: ID for the resource + id = resource.Body('id') + #: Pattern for this blacklist + pattern = resource.Body('pattern') + #: Description for this blacklist + description = resource.Body("description") + #: Timestampe when the blacklist created + created_at = resource.Body("created_at") + #: Timestampe when the blacklist last updated + updated_at = resource.Body("updated_at") + #: Links to the resource, and the other related resources. + links = resource.Body("links") diff --git a/openstack/dns/v2/floating_ip.py b/openstack/dns/v2/floating_ip.py new file mode 100644 index 0000000000..63e5790b46 --- /dev/null +++ b/openstack/dns/v2/floating_ip.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import resource + + +class FloatingIP(_base.Resource): + """DNS Floating IP Resource""" + + resources_key = 'floatingips' + base_path = '/reverse/floatingips' + + # capabilities + allow_fetch = True + allow_commit = True + allow_list = True + commit_method = "PATCH" + + #: Properties + #: current action in progress on the resource + action = resource.Body('action') + #: The floatingip address for this PTR record + address = resource.Body('address') + #: Description for this PTR record + description = resource.Body('description') + #: Domain name for this PTR record + ptrdname = resource.Body('ptrdname') + #: status of the resource + status = resource.Body('status') + #: Time to live for this PTR record + ttl = resource.Body('ttl', type=int) diff --git a/openstack/dns/v2/limit.py b/openstack/dns/v2/limit.py new file mode 100644 index 0000000000..c1ac459b61 --- /dev/null +++ b/openstack/dns/v2/limit.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import resource + + +class Limit(_base.Resource): + """DNS Limit Resource""" + + resource_key = 'limit' + base_path = '/limits' + + # capabilities + allow_list = True + + #: Properties + #: The max amount of items allowed per page + max_page_limit = resource.Body('max_page_limit', type=int) + #: The max length of a recordset name + max_recordset_name_length = resource.Body( + 'max_recordset_name_length', type=int + ) + #: The max amount of records contained in a recordset + max_recordset_records = resource.Body('max_recordset_records', type=int) + #: The max length of a zone name + max_zone_name_length = resource.Body('max_zone_name_length', type=int) + #: The max amount of records in a zone + max_zone_records = resource.Body('max_zone_records', type=int) + #: The max amount of recordsets per zone + max_zone_recordsets = resource.Body('max_zone_recordsets', type=int) + #: The max amount of zones for this project + max_zones = resource.Body('max_zones', type=int) + #: The lowest ttl allowed on this system + min_ttl = resource.Body('min_ttl', type=int) diff --git a/openstack/dns/v2/quota.py b/openstack/dns/v2/quota.py new file mode 100644 index 0000000000..3e8e70e802 --- /dev/null +++ b/openstack/dns/v2/quota.py @@ -0,0 +1,106 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +from keystoneauth1 import adapter +import typing_extensions as ty_ext + +from openstack.dns.v2 import _base +from openstack import resource + + +class Quota(_base.Resource): + """DNS Quota Resource""" + + base_path = "/quotas" + + # capabilities + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + commit_method = "PATCH" + + # Properties + #: The ID of the project. + project = resource.URI("project", alternate_id=True) + #: The maximum amount of recordsets allowed in a zone export. *Type: int* + api_export_size = resource.Body("api_export_size", type=int) + #: The maximum amount of records allowed per recordset. *Type: int* + recordset_records = resource.Body("recordset_records", type=int) + #: The maximum amount of records allowed per zone. *Type: int* + zone_records = resource.Body("zone_records", type=int) + #: The maximum amount of recordsets allowed per zone. *Type: int* + zone_recordsets = resource.Body("zone_recordsets", type=int) + #: The maximum amount of zones allowed per project. *Type: int* + zones = resource.Body("zones", type=int) + + def _prepare_request( + self, + requires_id=True, + prepend_key=False, + patch=False, + base_path=None, + params=None, + *, + resource_request_key=None, + **kwargs, + ): + _request = super()._prepare_request( + requires_id, prepend_key, base_path=base_path + ) + if self.resource_key in _request.body: + _body = _request.body[self.resource_key] + else: + _body = _request.body + if "id" in _body: + del _body["id"] + _request.headers = {'x-auth-sudo-project-id': self.id} + return _request + + def fetch( + self, + session: adapter.Adapter, + requires_id: bool = True, + base_path: str | None = None, + error_message: str | None = None, + skip_cache: bool = False, + *, + resource_response_key: str | None = None, + microversion: str | None = None, + **params: ty.Any, + ) -> ty_ext.Self: + request = self._prepare_request( + requires_id=requires_id, + base_path=base_path, + ) + session = self._get_session(session) + if microversion is None: + microversion = self._get_microversion(session) + self.microversion = microversion + + response = session.get( + request.url, + microversion=microversion, + params=params, + skip_cache=skip_cache, + headers=request.headers, + ) + + self._translate_response( + response, + error_message=error_message, + resource_response_key=resource_response_key, + ) + + return self diff --git a/openstack/dns/v2/recordset.py b/openstack/dns/v2/recordset.py new file mode 100644 index 0000000000..09f37b4f4f --- /dev/null +++ b/openstack/dns/v2/recordset.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import resource + + +class Recordset(_base.Resource): + """DNS Recordset Resource""" + + resources_key = 'recordsets' + base_path = '/zones/%(zone_id)s/recordsets' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'name', + 'type', + 'ttl', + 'data', + 'status', + 'description', + 'limit', + 'marker', + ) + + #: Properties + #: current action in progress on the resource + action = resource.Body('action') + #: Timestamp when the zone was created + created_at = resource.Body('create_at') + #: Recordset description + description = resource.Body('description') + #: Links contains a `self` pertaining to this zone or a `next` pertaining + #: to next page + links = resource.Body('links', type=dict) + #: DNS Name of the recordset + name = resource.Body('name') + #: ID of the project which the recordset belongs to + project_id = resource.Body('project_id') + #: DNS record value list + records = resource.Body('records', type=list) + #: Recordset status + #: Valid values include: `PENDING_CREATE`, `ACTIVE`,`PENDING_DELETE`, + #: `ERROR` + status = resource.Body('status') + #: Time to live, default 300, available value 300-2147483647 (seconds) + ttl = resource.Body('ttl', type=int) + #: DNS type of the recordset + #: Valid values include `A`, `AAAA`, `MX`, `CNAME`, `TXT`, `NS`, + #: `SSHFP`, `SPF`, `SRV`, `PTR` + type = resource.Body('type') + #: Timestamp when the zone was last updated + updated_at = resource.Body('updated_at') + #: The id of the Zone which this recordset belongs to + zone_id = resource.URI('zone_id') + #: The name of the Zone which this recordset belongs to + zone_name = resource.Body('zone_name') diff --git a/openstack/dns/v2/service_status.py b/openstack/dns/v2/service_status.py new file mode 100644 index 0000000000..86933f5714 --- /dev/null +++ b/openstack/dns/v2/service_status.py @@ -0,0 +1,51 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import resource + + +class ServiceStatus(_base.Resource): + """Designate Service Statuses""" + + resources_key = 'service_statuses' + base_path = '/service_statuses' + + # capabilities + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = True + + #: Capabilities for the service + capabilities = resource.Body('capabilities', type=dict) + #: Timestamp when the resource was created + created_at = resource.Body('created_at') + #: Timestamp when the last heartbeat was received + heartbeated_at = resource.Body('heartbeated_at') + #: Hostname of the host with the service instance + #: *Type: str* + hostname = resource.Body('hostname') + #: Links contains a `self` pertaining to this service status or a `next` + #: pertaining to next page + links = resource.Body('links', type=dict) + #: The name of the Designate service instance + #: *Type: str* + service_name = resource.Body('service_name') + #: Statistics for the service + stats = resource.Body('stats', type=dict) + #: The status of the resource + #: *Type: enum* + status = resource.Body('status') + #: Timestamp when the resource was last updated + updated_at = resource.Body('updated_at') diff --git a/openstack/dns/v2/tld.py b/openstack/dns/v2/tld.py new file mode 100644 index 0000000000..0a96481b88 --- /dev/null +++ b/openstack/dns/v2/tld.py @@ -0,0 +1,49 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import resource + + +class TLD(_base.Resource): + """DNS TLD Resource""" + + resources_key = "tlds" + base_path = "/tlds" + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + commit_method = "PATCH" + + _query_mapping = resource.QueryParameters( + "name", + "description", + "limit", + "marker", + ) + + #: TLD name + name = resource.Body("name") + #: TLD description + description = resource.Body("description") + #: Timestamp when the tld was created + created_at = resource.Body("created_at") + #: Timestamp when the tld was last updated + updated_at = resource.Body("updated_at") + #: Links contains a `self` pertaining to this tld or a `next` pertaining + #: to next page + links = resource.Body("links", type=dict) diff --git a/openstack/dns/v2/tsigkey.py b/openstack/dns/v2/tsigkey.py new file mode 100644 index 0000000000..2f65d7cfd6 --- /dev/null +++ b/openstack/dns/v2/tsigkey.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import resource +# from openstack import exceptions +# from openstack import utils + + +class TSIGKey(_base.Resource): + """DNS TSIGKEY Resource""" + + resources_key = 'tsigkeys' + base_path = '/tsigkeys' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + commit_method = "PATCH" + + _query_mapping = resource.QueryParameters( + 'name', + 'algorithm', + 'scope', + 'limit', + 'marker', + ) + + #: Properties + + #: ID for the resource + id = resource.Body('id') + #: resource id for this tsigkey which can be either zone or pool id + resource_id = resource.Body('resource_id') + #: TSIGKey name + name = resource.Body('name') + #: scope for this tsigkey which can be either ZONE or POOL scope + scope = resource.Body('scope') + #: The actual key to be used + secret = resource.Body('secret') + #: The encryption algorithm for this tsigkey + algorithm = resource.Body('algorithm') + #: Timestamp when the tsigkey was created + created_at = resource.Body('created_at') + #: Timestamp when the tsigkey was last updated + updated_at = resource.Body('updated_at') + #: Links contains a 'self' pertaining to this tsigkey or a 'next' + #: pertaining to next page + links = resource.Body('links', type=dict) diff --git a/openstack/dns/v2/zone.py b/openstack/dns/v2/zone.py new file mode 100644 index 0000000000..a868da9152 --- /dev/null +++ b/openstack/dns/v2/zone.py @@ -0,0 +1,108 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Zone(_base.Resource): + """DNS ZONE Resource""" + + resources_key = 'zones' + base_path = '/zones' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + commit_method = "PATCH" + + _query_mapping = resource.QueryParameters( + 'name', + 'type', + 'email', + 'status', + 'description', + 'ttl', + 'limit', + 'marker', + ) + + #: Properties + #: current action in progress on the resource + action = resource.Body('action') + #: Attributes + #: Key:Value pairs of information about this zone, and the pool the user + #: would like to place the zone in. This information can be used by the + #: scheduler to place zones on the correct pool. + attributes = resource.Body('attributes', type=dict) + #: Timestamp when the zone was created + created_at = resource.Body('created_at') + #: Zone description + #: *Type: str* + description = resource.Body('description') + #: The administrator email of this zone + #: *Type: str* + email = resource.Body('email') + #: Links contains a `self` pertaining to this zone or a `next` pertaining + #: to next page + links = resource.Body('links', type=dict) + #: The master list for slaver server to fetch DNS + masters = resource.Body('masters', type=list) + #: Zone name + name = resource.Body('name') + #: The pool which manages the zone, assigned by system + pool_id = resource.Body('pool_id') + #: The project id which the zone belongs to + project_id = resource.Body('project_id') + #: Serial number in the SOA record set in the zone, + #: which identifies the change on the primary DNS server + #: *Type: int* + serial = resource.Body('serial', type=int) + #: Zone status + #: Valid values include `PENDING_CREATE`, `ACTIVE`, + #: `PENDING_DELETE`, `ERROR` + status = resource.Body('status') + #: SOA TTL time, unit is seconds, default 300, TTL range 300-2147483647 + #: *Type: int* + ttl = resource.Body('ttl', type=int) + #: Zone type, + #: Valid values include `PRIMARY`, `SECONDARY` + #: *Type: str* + type = resource.Body('type') + #: Timestamp when the zone was last updated + updated_at = resource.Body('updated_at') + #: Whether the zone is shared with other projects + #: *Type: bool* + is_shared = resource.Body('shared') + + # Headers for DELETE requests + #: If true, delete any existing zone shares along with the zone + delete_shares = resource.Header('x-designate-delete-shares', type=bool) + + def _action(self, session, action, body): + """Preform actions given the message body.""" + url = utils.urljoin(self.base_path, self.id, 'tasks', action) + response = session.post(url, json=body) + exceptions.raise_from_response(response) + return response + + def abandon(self, session): + self._action(session, 'abandon', None) + + def xfr(self, session): + self._action(session, 'xfr', None) diff --git a/openstack/dns/v2/zone_export.py b/openstack/dns/v2/zone_export.py new file mode 100644 index 0000000000..7a42154138 --- /dev/null +++ b/openstack/dns/v2/zone_export.py @@ -0,0 +1,86 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import exceptions +from openstack import resource + + +class ZoneExport(_base.Resource): + """DNS Zone Exports Resource""" + + resource_key = '' + resources_key = 'exports' + base_path = '/zones/tasks/export' + + # capabilities + allow_create = True + allow_fetch = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters('zone_id', 'message', 'status') + + #: Properties + #: Timestamp when the zone was created + created_at = resource.Body('created_at') + #: Links contains a `self` pertaining to this zone or a `next` pertaining + #: to next page + links = resource.Body('links', type=dict) + #: Message + message = resource.Body('message') + #: Returns the total_count of resources matching this filter + metadata = resource.Body('metadata', type=list) + #: The project id which the zone belongs to + project_id = resource.Body('project_id') + #: Current status of the zone export + status = resource.Body('status') + #: Timestamp when the zone was last updated + updated_at = resource.Body('updated_at') + #: Version of the resource + version = resource.Body('version', type=int) + #: ID for the zone that was created by this export + zone_id = resource.Body('zone_id') + + def create(self, session, prepend_key=True, base_path=None, **kwargs): + """Create a remote resource based on this instance. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param prepend_key: A boolean indicating whether the resource_key + should be prepended in a resource creation + request. Default to True. + :param str base_path: Base part of the URI for creating resources, if + different from + :data:`~openstack.resource.Resource.base_path`. + :return: This :class:`Resource` instance. + :raises: :exc:`~openstack.exceptions.MethodNotSupported` if + :data:`Resource.allow_create` is not set to ``True``. + """ + if not self.allow_create: + raise exceptions.MethodNotSupported(self, "create") + + session = self._get_session(session) + microversion = self._get_microversion(session) + # Create ZoneExport requires empty body + # skip _prepare_request completely, since we need just empty body + request = resource._Request(self.base_path, None, None) + response = session.post( + request.url, + json=request.body, + headers=request.headers, + microversion=microversion, + ) + + self.microversion = microversion + self._translate_response(response) + return self diff --git a/openstack/dns/v2/zone_import.py b/openstack/dns/v2/zone_import.py new file mode 100644 index 0000000000..8d2c3e145c --- /dev/null +++ b/openstack/dns/v2/zone_import.py @@ -0,0 +1,88 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import exceptions +from openstack import resource + + +class ZoneImport(_base.Resource): + """DNS Zone Import Resource""" + + resource_key = '' + resources_key = 'imports' + base_path = '/zones/tasks/import' + + # capabilities + allow_create = True + allow_fetch = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters('zone_id', 'message', 'status') + + #: Properties + #: Timestamp when the zone was created + created_at = resource.Body('created_at') + #: Links contains a `self` pertaining to this zone or a `next` pertaining + #: to next page + links = resource.Body('links', type=dict) + #: Message + message = resource.Body('message') + #: Returns the total_count of resources matching this filter + metadata = resource.Body('metadata', type=list) + #: The project id which the zone belongs to + project_id = resource.Body('project_id') + #: Current status of the zone import + status = resource.Body('status') + #: Timestamp when the zone was last updated + updated_at = resource.Body('updated_at') + #: Version of the resource + version = resource.Body('version', type=int) + #: ID for the zone that was created by this import + zone_id = resource.Body('zone_id') + + def create(self, session, prepend_key=True, base_path=None, **kwargs): + """Create a remote resource based on this instance. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param prepend_key: A boolean indicating whether the resource_key + should be prepended in a resource creation + request. Default to True. + :param str base_path: Base part of the URI for creating resources, if + different from + :data:`~openstack.resource.Resource.base_path`. + :return: This :class:`Resource` instance. + :raises: :exc:`~openstack.exceptions.MethodNotSupported` if + :data:`Resource.allow_create` is not set to ``True``. + """ + if not self.allow_create: + raise exceptions.MethodNotSupported(self, "create") + + session = self._get_session(session) + microversion = self._get_microversion(session) + # Create ZoneImport requires empty body and 'text/dns' as content-type + # skip _prepare_request completely, since we need just empty body + request = resource._Request( + self.base_path, None, {'content-type': 'text/dns'} + ) + response = session.post( + request.url, + json=request.body, + headers=request.headers, + microversion=microversion, + ) + + self.microversion = microversion + self._translate_response(response) + return self diff --git a/openstack/dns/v2/zone_nameserver.py b/openstack/dns/v2/zone_nameserver.py new file mode 100644 index 0000000000..f91871f0a5 --- /dev/null +++ b/openstack/dns/v2/zone_nameserver.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import resource + + +class ZoneNameserver(_base.Resource): + """DNS Zone Nameserver resource""" + + resources_key = 'nameservers' + base_path = '/zones/%(zone_id)s/nameservers' + + # capabilities + allow_list = True + + _query_mapping = resource.QueryParameters( + include_pagination_defaults=False, + ) + + #: ID for the zone + zone_id = resource.URI('zone_id') + #: The hostname of the nameserver + #: *Type: str* + hostname = resource.Body('hostname') + #: The priority of the nameserver + #: *Type: int* + priority = resource.Body('priority') diff --git a/openstack/dns/v2/zone_share.py b/openstack/dns/v2/zone_share.py new file mode 100644 index 0000000000..af889d3559 --- /dev/null +++ b/openstack/dns/v2/zone_share.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import resource + + +class ZoneShare(_base.Resource): + """DNS ZONE Share Resource""" + + resources_key = 'shared_zones' + base_path = '/zones/%(zone_id)s/shares' + + # capabilities + allow_create = True + allow_delete = True + allow_fetch = True + allow_list = True + + _query_mapping = resource.QueryParameters('target_project_id') + + # Properties + #: The ID of the zone being shared. + zone_id = resource.URI('zone_id') + #: Timestamp when the share was created. + created_at = resource.Body('created_at') + #: Timestamp when the member was last updated. + updated_at = resource.Body('updated_at') + # FIXME(stephenfin): This conflicts since there is a zone ID in the URI + #: The zone ID of the zone being shared. + # zone_id = resource.Body('zone_id') + #: The project ID that owns the share. + project_id = resource.Body('project_id') + #: The target project ID that the zone is shared with. + target_project_id = resource.Body('target_project_id') diff --git a/openstack/dns/v2/zone_transfer.py b/openstack/dns/v2/zone_transfer.py new file mode 100644 index 0000000000..0c840184c6 --- /dev/null +++ b/openstack/dns/v2/zone_transfer.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _base +from openstack import resource + + +class ZoneTransferBase(_base.Resource): + """DNS Zone Transfer Request/Accept Base Resource""" + + _query_mapping = resource.QueryParameters('status') + + #: Properties + #: Timestamp when the resource was created + created_at = resource.Body('created_at') + #: Key that is used as part of the zone transfer accept process. + #: This is only shown to the creator, and must be communicated out of band. + key = resource.Body('key') + #: The project id which the zone belongs to + project_id = resource.Body('project_id') + #: Current status of the zone import + status = resource.Body('status') + #: Timestamp when the resource was last updated + updated_at = resource.Body('updated_at') + #: Version of the resource + version = resource.Body('version', type=int) + #: ID for the zone that is being exported + zone_id = resource.Body('zone_id') + + +class ZoneTransferRequest(ZoneTransferBase): + """DNS Zone Transfer Request Resource""" + + base_path = '/zones/tasks/transfer_requests' + resources_key = 'transfer_requests' + + # capabilities + allow_create = True + allow_fetch = True + allow_delete = True + allow_list = True + allow_commit = True + + #: Description + description = resource.Body('description') + #: A project ID that the request will be limited to. + #: No other project will be allowed to accept this request. + target_project_id = resource.Body('target_project_id') + #: Name for the zone that is being exported + zone_name = resource.Body('zone_name') + + +class ZoneTransferAccept(ZoneTransferBase): + """DNS Zone Transfer Accept Resource""" + + base_path = '/zones/tasks/transfer_accepts' + resources_key = 'transfer_accepts' + + # capabilities + allow_create = True + allow_fetch = True + allow_list = True + + #: Name for the zone that is being exported + zone_transfer_request_id = resource.Body('zone_transfer_request_id') diff --git a/openstack/dns/version.py b/openstack/dns/version.py new file mode 100644 index 0000000000..e9bd971a30 --- /dev/null +++ b/openstack/dns/version.py @@ -0,0 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack import resource + + +class Version(resource.Resource): + resource_key = 'version' + resources_key = 'versions' + base_path = '/' + + # capabilities + allow_list = True + + # Properties + links = resource.Body('links') + status = resource.Body('status') diff --git a/openstack/exceptions.py b/openstack/exceptions.py index ff3176fd87..2fb600ebfa 100644 --- a/openstack/exceptions.py +++ b/openstack/exceptions.py @@ -16,95 +16,281 @@ Exception definitions. """ -import six +import json +import re +import typing as ty +import warnings + +import requests +from requests import exceptions as _rex + +from openstack import warnings as os_warnings + +if ty.TYPE_CHECKING: + from openstack import resource class SDKException(Exception): """The base exception class for all exceptions this library raises.""" - def __init__(self, message=None, cause=None): + + def __init__(self, message: str | None = None, extra_data: ty.Any = None): self.message = self.__class__.__name__ if message is None else message - self.cause = cause - super(SDKException, self).__init__(self.message) + self.extra_data = extra_data + super().__init__(self.message) class EndpointNotFound(SDKException): """A mismatch occurred between what the client and server expect.""" - def __init__(self, message=None): - super(EndpointNotFound, self).__init__(message) + + def __init__(self, message: str | None = None): + super().__init__(message) class InvalidResponse(SDKException): """The response from the server is not valid for this request.""" - def __init__(self, response): - super(InvalidResponse, self).__init__() - self.response = response + def __init__(self, message: str | None = None): + super().__init__(message) class InvalidRequest(SDKException): """The request to the server is not valid.""" - def __init__(self, message=None): - super(InvalidRequest, self).__init__(message) + def __init__(self, message: str | None = None): + super().__init__(message) + +class HttpException(SDKException, _rex.HTTPError): + """The base exception for all HTTP error responses.""" -class HttpException(SDKException): + source: str + status_code: int | None - def __init__(self, message=None, details=None, response=None, - request_id=None, url=None, method=None, - http_status=None, cause=None): - super(HttpException, self).__init__(message=message, cause=cause) + def __init__( + self, + message: str | None = 'Error', + response: requests.Response | None = None, + http_status: int | None = None, + details: str | None = None, + request_id: str | None = None, + ): + if http_status is not None: + warnings.warn( + "The 'http_status' parameter is unnecessary and will be " + "removed in a future release", + os_warnings.RemovedInSDK50Warning, + ) + + if request_id is not None: + warnings.warn( + "The 'request_id' parameter is unnecessary and will be " + "removed in a future release", + os_warnings.RemovedInSDK50Warning, + ) + + if not message: + if response is not None: + message = f"{self.__class__.__name__}: {response.status_code}" + else: + message = f"{self.__class__.__name__}: Unknown error" + status = ( + response.status_code + if response is not None + else 'Unknown error' + ) + message = f'{self.__class__.__name__}: {status}' + + # Call directly rather than via super to control parameters + SDKException.__init__(self, message=message) + _rex.HTTPError.__init__(self, message, response=response) + + if response is not None: + self.request_id = response.headers.get('x-openstack-request-id') + self.status_code = response.status_code + else: + self.request_id = request_id + self.status_code = http_status self.details = details - self.response = response - self.request_id = request_id - self.url = url - self.method = method - self.http_status = http_status - - def __unicode__(self): - msg = self.__class__.__name__ + ": " + self.message + self.url = (self.request and self.request.url) or None + self.method = (self.request and self.request.method) or None + self.source = "Server" + if self.status_code is not None and (400 <= self.status_code < 500): + self.source = "Client" + + def __str__(self) -> str: + # 'Error' is the default value for self.message. If self.message isn't + # 'Error', then someone has set a more informative error message + # and we should use it. If it is 'Error', then we should construct a + # better message from the information we do have. + if not self.url or self.message == 'Error': + return self.message + if self.url: + remote_error = f"{self.source} Error for url: {self.url}" + if self.details: + remote_error += ', ' if self.details: - msg += ", " + six.text_type(self.details) - return msg + remote_error += str(self.details) + + return f"{super().__str__()}: {remote_error}" - def __str__(self): - return self.__unicode__() + +class BadRequestException(HttpException): + """HTTP 400 Bad Request.""" class NotFoundException(HttpException): """HTTP 404 Not Found.""" - pass + + +class ForbiddenException(HttpException): + """HTTP 403 Forbidden Request.""" + + +class ConflictException(HttpException): + """HTTP 409 Conflict.""" + + +class PreconditionFailedException(HttpException): + """HTTP 412 Precondition Failed.""" class MethodNotSupported(SDKException): """The resource does not support this operation type.""" - def __init__(self, resource, method): + + def __init__( + self, + resource: ty.Union['resource.Resource', type['resource.Resource']], + method: str, + ): # This needs to work with both classes and instances. try: name = resource.__name__ except AttributeError: name = resource.__class__.__name__ - message = ('The %s method is not supported for %s.%s' % - (method, resource.__module__, name)) - super(MethodNotSupported, self).__init__(message=message) + message = ( + f'The {method} method is not supported for ' + f'{resource.__module__}.{name}' + ) + super().__init__(message=message) class DuplicateResource(SDKException): """More than one resource exists with that name.""" - pass - - -class ResourceNotFound(NotFoundException): - """No resource exists with that name or id.""" - pass class ResourceTimeout(SDKException): """Timeout waiting for resource.""" - pass class ResourceFailure(SDKException): """General resource failure.""" - pass + + +class InvalidResourceQuery(SDKException): + """Invalid query params for resource.""" + + +def _extract_message(obj: ty.Any) -> str | None: + if isinstance(obj, dict): + # Most of services: compute, network + if obj.get('message'): + return str(obj['message']) + # Ironic starting with Stein + elif obj.get('faultstring'): + return str(obj['faultstring']) + elif isinstance(obj, str): + # Ironic before Stein has double JSON encoding, nobody remembers why. + try: + obj = json.loads(obj) + except Exception: # noqa: S110 + # This is best effort. Ignore any errors. + pass + else: + return _extract_message(obj) + return None + + +def raise_from_response( + response: requests.Response, + error_message: str | None = None, +) -> None: + """Raise an instance of an HTTPException based on keystoneauth response.""" + if response.status_code < 400: + return + + cls: type[SDKException] + if response.status_code == 400: + cls = BadRequestException + elif response.status_code == 403: + cls = ForbiddenException + elif response.status_code == 404: + cls = NotFoundException + elif response.status_code == 409: + cls = ConflictException + elif response.status_code == 412: + cls = PreconditionFailedException + else: + cls = HttpException + + details = None + content_type = response.headers.get('content-type', '') + if response.content and 'application/json' in content_type: + # Iterate over the nested objects to retrieve "message" attribute. + # TODO(shade) Add exception handling for times when the content type + # is lying. + + try: + content = response.json() + messages = [_extract_message(obj) for obj in content.values()] + if not any(messages): + # Exception dict may be the root dict in projects that use WSME + messages = [_extract_message(content)] + # Join all of the messages together nicely and filter out any + # objects that don't have a "message" attr. + details = '\n'.join(msg for msg in messages if msg) + except Exception: + details = response.text + elif response.content and 'text/html' in content_type: + messages = [] + for line in response.text.splitlines(): + message = re.sub(r'<.+?>', '', line.strip()) + if message not in messages: + messages.append(message) + + # Return joined string separated by colons. + details = ': '.join(msg for msg in messages if msg) + + if not details: + details = response.reason if response.reason else response.text + + raise cls( + message=error_message, + response=response, + details=details, + ) + + +class ConfigException(SDKException): + """Something went wrong with parsing your OpenStack Config.""" + + +class NotSupported(SDKException): + """Request cannot be performed by any supported API version.""" + + +class ValidationException(SDKException): + """Validation failed for resource.""" + + +class ServiceDisabledException(ConfigException): + """This service is disabled for reasons.""" + + +class ServiceDiscoveryException(SDKException): + """The service cannot be discovered.""" + + +# Backwards compatibility +OpenStackCloudException = SDKException +ResourceNotFound = NotFoundException diff --git a/openstack/fields.py b/openstack/fields.py new file mode 100644 index 0000000000..78c5a1b16f --- /dev/null +++ b/openstack/fields.py @@ -0,0 +1,318 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import abc +import typing as ty +import warnings + +from requests import structures + +from openstack import format +from openstack import warnings as os_warnings + +_SEEN_FORMAT = '{name}_seen' + +_T1 = ty.TypeVar('_T1') +_T2 = ty.TypeVar('_T2') +_T3 = ty.TypeVar('_T3', str, bool, int, float) + + +# case 1: data_type is unset -> return value as-is +@ty.overload +def _convert_type( + value: _T1, + data_type: None, + list_type: None = None, +) -> _T1: ... + + +# case 2: data_type is primitive type -> return value as said primitive type +@ty.overload +def _convert_type( + value: _T1, + data_type: type[_T3], + list_type: None = None, +) -> _T3: ... + + +# case 3: data_type is list, no list_type -> return value as list of whatever +# we got +@ty.overload +def _convert_type( + value: _T1, + data_type: type[list[ty.Any]], + list_type: None = None, +) -> list[_T1]: ... + + +# case 4: data_type is list, list_type is primitive type -> return value as +# list of said primitive type +@ty.overload +def _convert_type( + value: ty.Any, + data_type: type[list[ty.Any]], + list_type: type[_T3], +) -> list[_T3]: ... + + +# case 5: data_type is dict or Resource -> return value as dict/Resource +@ty.overload +def _convert_type( + value: ty.Any, + data_type: type[dict[ty.Any, ty.Any]], + list_type: None = None, +) -> dict[ty.Any, ty.Any]: ... + + +# case 6: data_type is a Formatter -> return value after conversion +@ty.overload +def _convert_type( + value: ty.Any, + data_type: type[format.Formatter[type[_T2]]], + list_type: None = None, +) -> _T2: ... + + +def _convert_type( + value: _T1, + data_type: type[ + _T3 | list[ty.Any] | dict[ty.Any, ty.Any] | format.Formatter[_T2], + ] + | None, + list_type: type[_T3] | None = None, +) -> _T1 | _T3 | list[_T3] | list[_T1] | dict[ty.Any, ty.Any] | _T2: + # This should allow handling list of dicts that have their own + # Component type directly. See openstack/compute/v2/limits.py + # and the RateLimit type for an example. + if data_type is None: + return value + elif issubclass(data_type, list): + if isinstance(value, list | set | tuple): + if not list_type: + return data_type(value) + return [_convert_type(x, list_type) for x in value] + elif list_type: + return [_convert_type(value, list_type)] + else: + return [value] + elif isinstance(value, data_type): + return value + elif issubclass(data_type, dict): + if isinstance(value, dict): + return data_type(**value) + # TODO(stephenfin): This should be a warning/error + return dict() + elif issubclass(data_type, format.Formatter): + return data_type.deserialize(value) + elif issubclass(data_type, bool): + return data_type(value) + elif issubclass(data_type, int | float): + if isinstance(value, int | float): + return data_type(value) + if isinstance(value, str): + if issubclass(data_type, int) and value.isdigit(): + return data_type(value) + elif issubclass(data_type, float) and ( + x.isdigit() for x in value.split() + ): + return data_type(value) + return data_type() + + # at this point we expect to have a str and you can convert basically + # anything to a string, but there could be untyped code out there passing + # random monstrosities so we need the try-catch to be safe + try: + return data_type(value) + except ValueError: + return data_type() + + +class _BaseComponent(abc.ABC): + # The name this component is being tracked as in the Resource + key: ty.ClassVar[str] + # The class to be used for mappings + _map_cls: ty.ClassVar[type[ty.MutableMapping[str, ty.Any]]] = dict + + name: str + data_type: ty.Any | None + default: ty.Any + alias: str | None + aka: str | None + alternate_id: bool + list_type: ty.Any | None + coerce_to_default: bool + deprecated: bool + deprecation_reason: str | None + + def __init__( + self, + name: str, + type: ty.Any | None = None, + default: ty.Any = None, + alias: str | None = None, + aka: str | None = None, + alternate_id: bool = False, + list_type: ty.Any | None = None, + coerce_to_default: bool = False, + deprecated: bool = False, + deprecation_reason: str | None = None, + ): + """A typed descriptor for a component that makes up a Resource + + :param name: The name this component exists as on the server + :param type: + The type this component is expected to be by the server. + By default this is None, meaning any value you specify + will work. If you specify type=dict and then set a + component to a string, __set__ will fail, for example. + :param default: Typically None, but any other default can be set. + :param alias: If set, alternative attribute on object to return. + :param aka: If set, additional name attribute would be available under. + :param alternate_id: When `True`, this property is known internally as + a value that can be sent with requests that require an ID but when + `id` is not a name the Resource has. This is a relatively uncommon + case, and this setting should only be used once per Resource. + :param list_type: If type is `list`, list_type designates what the type + of the elements of the list should be. + :param coerce_to_default: If the Component is None or not present, + force the given default to be used. If a default is not given but a + type is given, construct an empty version of the type in question. + :param deprecated: Indicates if the option is deprecated. If it is, we + display a warning message to the user. + :param deprecation_reason: Custom deprecation message. + """ + self.name = name + self.data_type = type + if type is not None and coerce_to_default and not default: + self.default = type() + else: + self.default = default + self.alias = alias + self.aka = aka + self.alternate_id = alternate_id + self.list_type = list_type + self.coerce_to_default = coerce_to_default + + self.deprecated = deprecated + self.deprecation_reason = deprecation_reason + + def __get__( + self, + instance: object, + owner: type[object] | None = None, + ) -> ty.Any: + if instance is None: + return self + + attributes = getattr(instance, self.key) + + try: + value = attributes[self.name] + except KeyError: + value = self.default + if self.alias: + # Resource attributes can be aliased to each other. If neither + # of them exist, then simply doing a + # getattr(instance, self.alias) here sends things into + # infinite recursion (this _get method is what gets called + # when getattr(instance) is called. + # To combat that, we set a flag on the instance saying that + # we have seen the current name, and we check before trying + # to resolve the alias if there is already a flag set for that + # alias name. We then remove the seen flag for ourselves after + # we exit the alias getattr to clean up after ourselves for + # the next time. + alias_flag = _SEEN_FORMAT.format(name=self.alias) + if not getattr(instance, alias_flag, False): + seen_flag = _SEEN_FORMAT.format(name=self.name) + # Prevent infinite recursion + setattr(instance, seen_flag, True) + value = getattr(instance, self.alias) + delattr(instance, seen_flag) + self.warn_if_deprecated_property(value) + return value + + # self.data_type() should not be called on None objects. + if value is None: + return None + + # This warning are pretty intrusive. Every time attribute is accessed + # a warning is being thrown. In neutron clients we have way too many + # places that still refer to tenant_id even though they may also + # properly support project_id. For now we silence tenant_id warnings. + if self.name != "tenant_id": + self.warn_if_deprecated_property(value) + + return _convert_type(value, self.data_type, self.list_type) + + @property + def type(self) -> ty.Any | None: + # deprecated alias proxy + return self.data_type + + def warn_if_deprecated_property(self, value: ty.Any) -> None: + deprecated = object.__getattribute__(self, 'deprecated') + deprecation_reason = object.__getattribute__( + self, + 'deprecation_reason', + ) + + if value and deprecated: + warnings.warn( + "The field {!r} has been deprecated. {}".format( + self.name, deprecation_reason or "Avoid usage." + ), + os_warnings.RemovedFieldWarning, + ) + + def __set__(self, instance: object, value: ty.Any) -> None: + if self.coerce_to_default and value is None: + value_ = self.default + elif value != self.default: + value_ = _convert_type(value, self.data_type, self.list_type) + else: + value_ = value + + attributes = getattr(instance, self.key) + attributes[self.name] = value_ + + def __delete__(self, instance: object) -> None: + try: + attributes = getattr(instance, self.key) + del attributes[self.name] + except KeyError: + pass + + +class Body(_BaseComponent): + """Body attributes""" + + key = "_body" + + +class Header(_BaseComponent): + """Header attributes""" + + key = "_header" + _map_cls = structures.CaseInsensitiveDict + + +class URI(_BaseComponent): + """URI attributes""" + + key = "_uri" + + +class Computed(_BaseComponent): + """Computed attributes""" + + key = "_computed" diff --git a/openstack/tests/unit/bare_metal/__init__.py b/openstack/fixture/__init__.py similarity index 100% rename from openstack/tests/unit/bare_metal/__init__.py rename to openstack/fixture/__init__.py diff --git a/openstack/fixture/connection.py b/openstack/fixture/connection.py new file mode 100644 index 0000000000..d9a68614c8 --- /dev/null +++ b/openstack/fixture/connection.py @@ -0,0 +1,105 @@ +# Copyright 2019 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +import fixtures +from keystoneauth1.fixture import v2 +from keystoneauth1.fixture import v3 +import os_service_types + +_service_type_manager = os_service_types.ServiceTypes() + +_SUBURL_TEMPLATES = { + 'public': 'https://example.com/{service_type}', + 'internal': 'https://internal.example.com/{service_type}', + 'admin': 'https://example.com/{service_type}', +} +_ENDPOINT_TEMPLATES = { + 'public': 'https://{service_type}.example.com', + 'internal': 'https://internal.{service_type}.example.com', + 'admin': 'https://{service_type}.example.com', +} + + +class ConnectionFixture(fixtures.Fixture): + _suffixes = { + 'baremetal': '/', + 'block-storage': '/{project_id}', + 'compute': '/v2.1/', + 'container-infrastructure-management': '/v1', + 'object-store': '/v1/{project_id}', + 'orchestration': '/v1/{project_id}', + 'volumev2': '/v2/{project_id}', + 'volumev3': '/v3/{project_id}', + } + + def __init__(self, suburl=False, project_id=None, *args, **kwargs): + super().__init__(*args, **kwargs) + self._endpoint_templates = _ENDPOINT_TEMPLATES + if suburl: + self.use_suburl() + self.project_id = project_id or uuid.uuid4().hex.replace('-', '') + self.build_tokens() + + def use_suburl(self): + self._endpoint_templates = _SUBURL_TEMPLATES + + def _get_endpoint_templates(self, service_type, alias=None, v2=False): + templates = {} + for k, v in self._endpoint_templates.items(): + suffix = self._suffixes.get( + alias, self._suffixes.get(service_type, '') + ) + # For a keystone v2 catalog, we want to list the + # versioned endpoint in the catalog, because that's + # more likely how those were deployed. + if v2: + suffix = '/v2.0' + templates[k] = (v + suffix).format( + service_type=service_type, + project_id=self.project_id, + ) + return templates + + def _setUp(self): + pass + + def clear_tokens(self): + self.v2_token = v2.Token(tenant_id=self.project_id) + self.v3_token = v3.Token(project_id=self.project_id) + + def build_tokens(self): + self.clear_tokens() + for service in _service_type_manager.services: + service_type = service['service_type'] + if service_type == 'ec2-api': + continue + service_name = service['project'] + ets = self._get_endpoint_templates(service_type) + v3_svc = self.v3_token.add_service(service_type, name=service_name) + v2_svc = self.v2_token.add_service(service_type, name=service_name) + v3_svc.add_standard_endpoints(region='RegionOne', **ets) + if service_type == 'identity': + ets = self._get_endpoint_templates(service_type, v2=True) + v2_svc.add_endpoint(region='RegionOne', **ets) + for alias in service.get('aliases', []): + ets = self._get_endpoint_templates(service_type, alias=alias) + v3_svc = self.v3_token.add_service(alias, name=service_name) + v2_svc = self.v2_token.add_service(alias, name=service_name) + v3_svc.add_standard_endpoints(region='RegionOne', **ets) + v2_svc.add_endpoint(region='RegionOne', **ets) + + def _cleanup(self): + pass diff --git a/openstack/format.py b/openstack/format.py index 94fd310fe9..0e7c765401 100644 --- a/openstack/format.py +++ b/openstack/format.py @@ -10,24 +10,21 @@ # License for the specific language governing permissions and limitations # under the License. +import typing as ty -class Formatter(object): +_T = ty.TypeVar('_T') - @classmethod - def serialize(cls, value): - """Return a string representing the formatted value""" - raise NotImplementedError +class Formatter(ty.Generic[_T]): @classmethod - def deserialize(cls, value): + def deserialize(cls, value: ty.Any) -> _T: """Return a formatted object representing the value""" raise NotImplementedError -class BoolStr(Formatter): - +class BoolStr(Formatter[bool]): @classmethod - def deserialize(cls, value): + def deserialize(cls, value: ty.Any) -> bool: """Convert a boolean string to a boolean""" expr = str(value).lower() if "true" == expr: @@ -35,17 +32,4 @@ def deserialize(cls, value): elif "false" == expr: return False else: - raise ValueError("Unable to deserialize boolean string: %s" - % value) - - @classmethod - def serialize(cls, value): - """Convert a boolean to a boolean string""" - if isinstance(value, bool): - if value: - return "true" - else: - return "false" - else: - raise ValueError("Unable to serialize boolean string: %s" - % value) + raise ValueError(f"Unable to deserialize boolean string: {value}") diff --git a/openstack/identity/identity_service.py b/openstack/identity/identity_service.py index 4bcfafa5ba..e50d535cb4 100644 --- a/openstack/identity/identity_service.py +++ b/openstack/identity/identity_service.py @@ -10,25 +10,17 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack import service_filter +from openstack.identity.v2 import _proxy as _proxy_v2 +from openstack.identity.v3 import _proxy as _proxy_v3 +from openstack import service_description -class IdentityService(service_filter.ServiceFilter): +class IdentityService( + service_description.ServiceDescription[_proxy_v2.Proxy | _proxy_v3.Proxy] +): """The identity service.""" - valid_versions = [ - service_filter.ValidVersion('v3'), - service_filter.ValidVersion('v2'), - ] - - def __init__(self, **kwargs): - """Create an identity service.""" - kwargs['service_type'] = 'identity' - super(IdentityService, self).__init__(**kwargs) - - -class AdminService(IdentityService): - - def __init__(self, **kwargs): - kwargs['interface'] = service_filter.ServiceFilter.ADMIN - super(AdminService, self).__init__(**kwargs) + supported_versions = { + '2': _proxy_v2.Proxy, + '3': _proxy_v3.Proxy, + } diff --git a/openstack/identity/v2/_proxy.py b/openstack/identity/v2/_proxy.py index 348bcb97ed..41a7a16183 100644 --- a/openstack/identity/v2/_proxy.py +++ b/openstack/identity/v2/_proxy.py @@ -10,20 +10,46 @@ # License for the specific language governing permissions and limitations # under the License. +import typing as ty + +from openstack.identity.v2 import extension as _extension from openstack.identity.v2 import role as _role from openstack.identity.v2 import tenant as _tenant from openstack.identity.v2 import user as _user from openstack import proxy +from openstack import resource + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['2']] = '2' + + def extensions(self): + """Retrieve a generator of extensions + + :returns: A generator of extension instances. + :rtype: :class:`~openstack.identity.v2.extension.Extension` + """ + return self._list(_extension.Extension) + def get_extension(self, extension): + """Get a single extension -class Proxy(proxy.BaseProxy): + :param extension: The value can be the ID of an extension or a + :class:`~openstack.identity.v2.extension.Extension` + instance. + + :returns: One :class:`~openstack.identity.v2.extension.Extension` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no extension can be found. + """ + return self._get(_extension.Extension, extension) def create_role(self, **attrs): """Create a new role from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v2.role.Role`, - comprised of the properties on the Role class. + a :class:`~openstack.identity.v2.role.Role`, + comprised of the properties on the Role class. :returns: The results of role creation :rtype: :class:`~openstack.identity.v2.role.Role` @@ -34,12 +60,12 @@ def delete_role(self, role, ignore_missing=True): """Delete a role :param role: The value can be either the ID of a role or a - :class:`~openstack.identity.v2.role.Role` instance. + :class:`~openstack.identity.v2.role.Role` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the role does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent role. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the role does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent role. :returns: ``None`` """ @@ -50,45 +76,46 @@ def find_role(self, name_or_id, ignore_missing=True): :param name_or_id: The name or ID of a role. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v2.role.Role` or None """ - return self._find(_role.Role, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _role.Role, name_or_id, ignore_missing=ignore_missing + ) def get_role(self, role): """Get a single role :param role: The value can be the ID of a role or a - :class:`~openstack.identity.v2.role.Role` instance. + :class:`~openstack.identity.v2.role.Role` instance. :returns: One :class:`~openstack.identity.v2.role.Role` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_role.Role, role) def roles(self, **query): """Retrieve a generator of roles - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of role instances. :rtype: :class:`~openstack.identity.v2.role.Role` """ - return self._list(_role.Role, paginated=True, **query) + return self._list(_role.Role, **query) def update_role(self, role, **attrs): """Update a role :param role: Either the ID of a role or a - :class:`~openstack.identity.v2.role.Role` instance. - :attrs kwargs: The attributes to update on the role represented - by ``value``. + :class:`~openstack.identity.v2.role.Role` instance. + :param attrs: The attributes to update on the role represented + by ``role``. :returns: The updated role :rtype: :class:`~openstack.identity.v2.role.Role` @@ -99,8 +126,8 @@ def create_tenant(self, **attrs): """Create a new tenant from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v2.tenant.Tenant`, - comprised of the properties on the Tenant class. + a :class:`~openstack.identity.v2.tenant.Tenant`, + comprised of the properties on the Tenant class. :returns: The results of tenant creation :rtype: :class:`~openstack.identity.v2.tenant.Tenant` @@ -111,12 +138,12 @@ def delete_tenant(self, tenant, ignore_missing=True): """Delete a tenant :param tenant: The value can be either the ID of a tenant or a - :class:`~openstack.identity.v2.tenant.Tenant` instance. + :class:`~openstack.identity.v2.tenant.Tenant` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the tenant does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent tenant. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the tenant does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent tenant. :returns: ``None`` """ @@ -127,45 +154,46 @@ def find_tenant(self, name_or_id, ignore_missing=True): :param name_or_id: The name or ID of a tenant. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v2.tenant.Tenant` or None """ - return self._find(_tenant.Tenant, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _tenant.Tenant, name_or_id, ignore_missing=ignore_missing + ) def get_tenant(self, tenant): """Get a single tenant :param tenant: The value can be the ID of a tenant or a - :class:`~openstack.identity.v2.tenant.Tenant` instance. + :class:`~openstack.identity.v2.tenant.Tenant` instance. :returns: One :class:`~openstack.identity.v2.tenant.Tenant` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_tenant.Tenant, tenant) def tenants(self, **query): """Retrieve a generator of tenants - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of tenant instances. :rtype: :class:`~openstack.identity.v2.tenant.Tenant` """ - return self._list(_tenant.Tenant, paginated=True, **query) + return self._list(_tenant.Tenant, **query) def update_tenant(self, tenant, **attrs): """Update a tenant :param tenant: Either the ID of a tenant or a - :class:`~openstack.identity.v2.tenant.Tenant` instance. - :attrs kwargs: The attributes to update on the tenant represented - by ``value``. + :class:`~openstack.identity.v2.tenant.Tenant` instance. + :param attrs: The attributes to update on the tenant represented + by ``tenant``. :returns: The updated tenant :rtype: :class:`~openstack.identity.v2.tenant.Tenant` @@ -176,8 +204,8 @@ def create_user(self, **attrs): """Create a new user from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v2.user.User`, - comprised of the properties on the User class. + a :class:`~openstack.identity.v2.user.User`, + comprised of the properties on the User class. :returns: The results of user creation :rtype: :class:`~openstack.identity.v2.user.User` @@ -188,12 +216,12 @@ def delete_user(self, user, ignore_missing=True): """Delete a user :param user: The value can be either the ID of a user or a - :class:`~openstack.identity.v2.user.User` instance. + :class:`~openstack.identity.v2.user.User` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the user does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent user. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the user does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent user. :returns: ``None`` """ @@ -204,47 +232,111 @@ def find_user(self, name_or_id, ignore_missing=True): :param name_or_id: The name or ID of a user. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v2.user.User` or None """ - return self._find(_user.User, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _user.User, name_or_id, ignore_missing=ignore_missing + ) def get_user(self, user): """Get a single user :param user: The value can be the ID of a user or a - :class:`~openstack.identity.v2.user.User` instance. + :class:`~openstack.identity.v2.user.User` instance. :returns: One :class:`~openstack.identity.v2.user.User` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_user.User, user) def users(self, **query): """Retrieve a generator of users - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of user instances. :rtype: :class:`~openstack.identity.v2.user.User` """ - return self._list(_user.User, paginated=True, **query) + return self._list(_user.User, **query) def update_user(self, user, **attrs): """Update a user :param user: Either the ID of a user or a - :class:`~openstack.identity.v2.user.User` instance. - :attrs kwargs: The attributes to update on the user represented - by ``value``. + :class:`~openstack.identity.v2.user.User` instance. + :param attrs: The attributes to update on the user represented + by ``user``. :returns: The updated user :rtype: :class:`~openstack.identity.v2.user.User` """ return self._update(_user.User, user, **attrs) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/identity/v2/extension.py b/openstack/identity/v2/extension.py index 19c8ab448b..629e31d64a 100644 --- a/openstack/identity/v2/extension.py +++ b/openstack/identity/v2/extension.py @@ -10,7 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service +import typing as ty + +from keystoneauth1 import adapter +import typing_extensions as ty_ext + from openstack import resource @@ -18,35 +22,48 @@ class Extension(resource.Resource): resource_key = 'extension' resources_key = 'extensions' base_path = '/extensions' - service = identity_service.IdentityService() # capabilities allow_list = True + allow_fetch = True # Properties #: A unique identifier, which will be used for accessing the extension #: through a dedicated url ``/extensions/*alias*``. The extension #: alias uniquely identifies an extension and is prefixed by a vendor #: identifier. *Type: string* - alias = resource.prop('alias') + alias = resource.Body('alias', alternate_id=True) #: A description of the extension. *Type: string* - description = resource.prop('description') + description = resource.Body('description') #: Links to the documentation in various format. *Type: string* - links = resource.prop('links') + links = resource.Body('links', type=list, list_type=dict) #: The name of the extension. *Type: string* - name = resource.prop('name') + name = resource.Body('name') #: The second unique identifier of the extension after the alias. #: It is usually a URL which will be used. Example: #: "http://docs.openstack.org/identity/api/ext/s3tokens/v1.0" #: *Type: string* - namespace = resource.prop('namespace') + namespace = resource.Body('namespace') #: The last time the extension has been modified (update date). - updated_at = resource.prop('updated') + updated_at = resource.Body('updated') @classmethod - def list(cls, session, **params): - resp = session.get(cls.base_path, endpoint_filter=cls.service, - params=params) + def list( + cls, + session: adapter.Adapter, + paginated: bool = True, + base_path: str | None = None, + allow_unknown_params: bool = False, + *, + microversion: str | None = None, + headers: dict[str, str] | None = None, + max_items: int | None = None, + **params: ty.Any, + ) -> ty.Generator[ty_ext.Self, None, None]: + if base_path is None: + base_path = cls.base_path + + resp = session.get(base_path, params=params) resp = resp.json() for data in resp[cls.resources_key]['values']: yield cls.existing(**data) diff --git a/openstack/identity/v2/role.py b/openstack/identity/v2/role.py index 536b192802..df27e55963 100644 --- a/openstack/identity/v2/role.py +++ b/openstack/identity/v2/role.py @@ -11,7 +11,6 @@ # under the License. from openstack import format -from openstack.identity import identity_service from openstack import resource @@ -19,20 +18,19 @@ class Role(resource.Resource): resource_key = 'role' resources_key = 'roles' base_path = '/OS-KSADM/roles' - service = identity_service.IdentityService() # capabilities allow_create = True - allow_retrieve = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True # Properties #: The description of the role. *Type: string* - description = resource.prop('description') + description = resource.Body('description') #: Setting this attribute to ``False`` prevents this role from being #: available in the role list. *Type: bool* - is_enabled = resource.prop('enabled', type=format.BoolStr) + is_enabled = resource.Body('enabled', type=format.BoolStr) #: Unique role name. *Type: string* - name = resource.prop('name') + name = resource.Body('name') diff --git a/openstack/identity/v2/tenant.py b/openstack/identity/v2/tenant.py index 93630c992d..1ea272ffa5 100644 --- a/openstack/identity/v2/tenant.py +++ b/openstack/identity/v2/tenant.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service from openstack import resource @@ -18,22 +17,21 @@ class Tenant(resource.Resource): resource_key = 'tenant' resources_key = 'tenants' base_path = '/tenants' - service = identity_service.AdminService() # capabilities allow_create = True - allow_retrieve = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True # Properties #: The description of the tenant. *Type: string* - description = resource.prop('description') + description = resource.Body('description') #: Setting this attribute to ``False`` prevents users from authorizing #: against this tenant. Additionally, all pre-existing tokens authorized #: for the tenant are immediately invalidated. Re-enabling a tenant #: does not re-enable pre-existing tokens. *Type: bool* - is_enabled = resource.prop('enabled', type=bool) + is_enabled = resource.Body('enabled', type=bool) #: Unique tenant name. *Type: string* - name = resource.prop('name') + name = resource.Body('name') diff --git a/openstack/identity/v2/user.py b/openstack/identity/v2/user.py index 930494c900..7b4d893a5e 100644 --- a/openstack/identity/v2/user.py +++ b/openstack/identity/v2/user.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service from openstack import resource @@ -18,22 +17,21 @@ class User(resource.Resource): resource_key = 'user' resources_key = 'users' base_path = '/users' - service = identity_service.AdminService() # capabilities allow_create = True - allow_retrieve = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True # Properties #: The email of this user. *Type: string* - email = resource.prop('email') + email = resource.Body('email') #: Setting this value to ``False`` prevents the user from authenticating or #: receiving authorization. Additionally, all pre-existing tokens held by #: the user are immediately invalidated. Re-enabling a user does not #: re-enable pre-existing tokens. *Type: bool* - is_enabled = resource.prop('enabled', type=bool) + is_enabled = resource.Body('enabled', type=bool) #: The name of this user. *Type: string* - name = resource.prop('name') + name = resource.Body('name') diff --git a/openstack/identity/v3/_proxy.py b/openstack/identity/v3/_proxy.py index 5b7bd3cb39..b38eb837fa 100644 --- a/openstack/identity/v3/_proxy.py +++ b/openstack/identity/v3/_proxy.py @@ -10,21 +10,93 @@ # License for the specific language governing permissions and limitations # under the License. +import typing as ty +import warnings + +import openstack.exceptions as exception +from openstack.identity.v3 import ( + application_credential as _application_credential, +) +from openstack.identity.v3 import access_rule as _access_rule from openstack.identity.v3 import credential as _credential from openstack.identity.v3 import domain as _domain +from openstack.identity.v3 import domain_config as _domain_config from openstack.identity.v3 import endpoint as _endpoint +from openstack.identity.v3 import federation_protocol as _federation_protocol from openstack.identity.v3 import group as _group +from openstack.identity.v3 import identity_provider as _identity_provider +from openstack.identity.v3 import limit as _limit +from openstack.identity.v3 import mapping as _mapping from openstack.identity.v3 import policy as _policy from openstack.identity.v3 import project as _project from openstack.identity.v3 import region as _region +from openstack.identity.v3 import registered_limit as _registered_limit from openstack.identity.v3 import role as _role +from openstack.identity.v3 import role_assignment as _role_assignment +from openstack.identity.v3 import ( + role_domain_group_assignment as _role_domain_group_assignment, +) +from openstack.identity.v3 import ( + role_domain_user_assignment as _role_domain_user_assignment, +) +from openstack.identity.v3 import ( + role_project_group_assignment as _role_project_group_assignment, +) +from openstack.identity.v3 import ( + role_project_user_assignment as _role_project_user_assignment, +) +from openstack.identity.v3 import ( + role_system_group_assignment as _role_system_group_assignment, +) +from openstack.identity.v3 import ( + role_system_user_assignment as _role_system_user_assignment, +) from openstack.identity.v3 import service as _service +from openstack.identity.v3 import service_provider as _service_provider +from openstack.identity.v3 import system as _system +from openstack.identity.v3 import token as _token from openstack.identity.v3 import trust as _trust from openstack.identity.v3 import user as _user -from openstack import proxy2 as proxy - - -class Proxy(proxy.BaseProxy): +from openstack import proxy +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['3']] = '3' + + _resource_registry = { + "application_credential": _application_credential.ApplicationCredential, # noqa: E501 + "access_rule": _access_rule.AccessRule, + "credential": _credential.Credential, + "domain": _domain.Domain, + "endpoint": _endpoint.Endpoint, + "federation_protocol": _federation_protocol.FederationProtocol, + "group": _group.Group, + "identity_provider": _identity_provider.IdentityProvider, + "limit": _limit.Limit, + "mapping": _mapping.Mapping, + "policy": _policy.Policy, + "project": _project.Project, + "region": _region.Region, + "registered_limit": _registered_limit.RegisteredLimit, + "role": _role.Role, + "role_assignment": _role_assignment.RoleAssignment, + "role_domain_group_assignment": _role_domain_group_assignment.RoleDomainGroupAssignment, # noqa: E501 + "role_domain_user_assignment": _role_domain_user_assignment.RoleDomainUserAssignment, # noqa: E501 + "role_project_group_assignment": _role_project_group_assignment.RoleProjectGroupAssignment, # noqa: E501 + "role_project_user_assignment": _role_project_user_assignment.RoleProjectUserAssignment, # noqa: E501 + "role_system_group_assignment": _role_system_group_assignment.RoleSystemGroupAssignment, # noqa: E501 + "role_system_user_assignment": _role_system_user_assignment.RoleSystemUserAssignment, # noqa: E501 + "service": _service.Service, + "system": _system.System, + "trust": _trust.Trust, + "token": _token.Token, + "user": _user.User, + } + + # ========== Credentials ========== def create_credential(self, **attrs): """Create a new credential from attributes @@ -42,32 +114,34 @@ def delete_credential(self, credential, ignore_missing=True): """Delete a credential :param credential: The value can be either the ID of a credential or a - :class:`~openstack.identity.v3.credential.Credential` instance. + :class:`~openstack.identity.v3.credential.Credential` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the credential does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent credential. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the credential does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent credential. :returns: ``None`` """ - self._delete(_credential.Credential, credential, - ignore_missing=ignore_missing) + self._delete( + _credential.Credential, credential, ignore_missing=ignore_missing + ) def find_credential(self, name_or_id, ignore_missing=True): """Find a single credential :param name_or_id: The name or ID of a credential. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.credential.Credential` - or None + or None """ - return self._find(_credential.Credential, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _credential.Credential, name_or_id, ignore_missing=ignore_missing + ) def get_credential(self, credential): """Get a single credential @@ -76,42 +150,44 @@ def get_credential(self, credential): :class:`~openstack.identity.v3.credential.Credential` instance. :returns: One :class:`~openstack.identity.v3.credential.Credential` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_credential.Credential, credential) def credentials(self, **query): """Retrieve a generator of credentials - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of credentials instances. :rtype: :class:`~openstack.identity.v3.credential.Credential` """ # TODO(briancurtin): This is paginated but requires base list changes. - return self._list(_credential.Credential, paginated=False, **query) + return self._list(_credential.Credential, **query) def update_credential(self, credential, **attrs): """Update a credential :param credential: Either the ID of a credential or a :class:`~openstack.identity.v3.credential.Credential` instance. - :attrs kwargs: The attributes to update on the credential represented - by ``value``. + :param attrs: The attributes to update on the credential represented + by ``credential``. :returns: The updated credential :rtype: :class:`~openstack.identity.v3.credential.Credential` """ return self._update(_credential.Credential, credential, **attrs) + # ========== Domains ========== + def create_domain(self, **attrs): """Create a new domain from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v3.domain.Domain`, - comprised of the properties on the Domain class. + a :class:`~openstack.identity.v3.domain.Domain`, + comprised of the properties on the Domain class. :returns: The results of domain creation :rtype: :class:`~openstack.identity.v3.domain.Domain` @@ -122,12 +198,12 @@ def delete_domain(self, domain, ignore_missing=True): """Delete a domain :param domain: The value can be either the ID of a domain or a - :class:`~openstack.identity.v3.domain.Domain` instance. + :class:`~openstack.identity.v3.domain.Domain` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the domain does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent domain. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the domain does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent domain. :returns: ``None`` """ @@ -138,58 +214,140 @@ def find_domain(self, name_or_id, ignore_missing=True): :param name_or_id: The name or ID of a domain. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.domain.Domain` or None """ - return self._find(_domain.Domain, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _domain.Domain, name_or_id, ignore_missing=ignore_missing + ) def get_domain(self, domain): """Get a single domain :param domain: The value can be the ID of a domain or a - :class:`~openstack.identity.v3.domain.Domain` instance. + :class:`~openstack.identity.v3.domain.Domain` instance. :returns: One :class:`~openstack.identity.v3.domain.Domain` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_domain.Domain, domain) def domains(self, **query): """Retrieve a generator of domains - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of domain instances. :rtype: :class:`~openstack.identity.v3.domain.Domain` """ # TODO(briancurtin): This is paginated but requires base list changes. - return self._list(_domain.Domain, paginated=False, **query) + return self._list(_domain.Domain, **query) def update_domain(self, domain, **attrs): """Update a domain :param domain: Either the ID of a domain or a - :class:`~openstack.identity.v3.domain.Domain` instance. - :attrs kwargs: The attributes to update on the domain represented - by ``value``. + :class:`~openstack.identity.v3.domain.Domain` instance. + :param attrs: The attributes to update on the domain represented + by ``domain``. :returns: The updated domain :rtype: :class:`~openstack.identity.v3.domain.Domain` """ return self._update(_domain.Domain, domain, **attrs) + # ========== Domain configs ========== + + def create_domain_config(self, domain, **attrs): + """Create a new config for a domain from attributes. + + :param domain: The value can be the ID of a domain or + a :class:`~openstack.identity.v3.domain.Domain` instance. + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.identity.v3.domain_config.DomainConfig` + comprised of the properties on the DomainConfig class. + + :returns: The results of domain config creation + :rtype: :class:`~openstack.identity.v3.domain_config.DomainConfig` + """ + domain_id = resource.Resource._get_id(domain) + return self._create( + _domain_config.DomainConfig, + domain_id=domain_id, + **attrs, + ) + + def delete_domain_config(self, domain, ignore_missing=True): + """Delete a config for a domain + + :param domain: The value can be the ID of a domain or a + a :class:`~openstack.identity.v3.domain.Domain` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the identity provider does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent config for a domain. + + :returns: ``None`` + """ + domain_id = resource.Resource._get_id(domain) + self._delete( + _domain_config.DomainConfig, + None, + domain_id=domain_id, + ignore_missing=ignore_missing, + ) + + def get_domain_config(self, domain): + """Get a single config for a domain + + :param domain_id: The value can be the ID of a domain or a + :class:`~openstack.identity.v3.domain.Domain` instance. + + :returns: One + :class:`~openstack.identity.v3.domain_config.DomainConfig` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + domain_id = resource.Resource._get_id(domain) + return self._get( + _domain_config.DomainConfig, + domain_id=domain_id, + requires_id=False, + ) + + def update_domain_config(self, domain, **attrs): + """Update a config for a domain + + :param domain_id: The value can be the ID of a domain or a + :class:`~openstack.identity.v3.domain.Domain` instance. + :param attrs: The attributes to update on the config for a domain + represented by ``domain_id``. + + :returns: The updated config for a domain + :rtype: :class:`~openstack.identity.v3.domain_config.DomainConfig` + """ + domain_id = resource.Resource._get_id(domain) + return self._update( + _domain_config.DomainConfig, + None, + domain_id=domain_id, + **attrs, + ) + + # ========== Endpoints ========== + def create_endpoint(self, **attrs): """Create a new endpoint from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v3.endpoint.Endpoint`, - comprised of the properties on the Endpoint class. + a :class:`~openstack.identity.v3.endpoint.Endpoint`, + comprised of the properties on the Endpoint class. :returns: The results of endpoint creation :rtype: :class:`~openstack.identity.v3.endpoint.Endpoint` @@ -200,77 +358,126 @@ def delete_endpoint(self, endpoint, ignore_missing=True): """Delete an endpoint :param endpoint: The value can be either the ID of an endpoint or a - :class:`~openstack.identity.v3.endpoint.Endpoint` instance. + :class:`~openstack.identity.v3.endpoint.Endpoint` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the endpoint does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent endpoint. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the endpoint does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent endpoint. :returns: ``None`` """ - self._delete(_endpoint.Endpoint, endpoint, - ignore_missing=ignore_missing) + self._delete( + _endpoint.Endpoint, endpoint, ignore_missing=ignore_missing + ) def find_endpoint(self, name_or_id, ignore_missing=True): """Find a single endpoint :param name_or_id: The name or ID of a endpoint. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.endpoint.Endpoint` or None """ - return self._find(_endpoint.Endpoint, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _endpoint.Endpoint, name_or_id, ignore_missing=ignore_missing + ) - def get_endpoint(self, endpoint): + # TODO(stephenfin): This conflicts with Adapter.get_endpoint + def get_endpoint(self, endpoint): # type: ignore[override] """Get a single endpoint :param endpoint: The value can be the ID of an endpoint or a - :class:`~openstack.identity.v3.endpoint.Endpoint` - instance. + :class:`~openstack.identity.v3.endpoint.Endpoint` + instance. :returns: One :class:`~openstack.identity.v3.endpoint.Endpoint` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_endpoint.Endpoint, endpoint) def endpoints(self, **query): """Retrieve a generator of endpoints - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of endpoint instances. :rtype: :class:`~openstack.identity.v3.endpoint.Endpoint` """ # TODO(briancurtin): This is paginated but requires base list changes. - return self._list(_endpoint.Endpoint, paginated=False, **query) + return self._list(_endpoint.Endpoint, **query) def update_endpoint(self, endpoint, **attrs): """Update a endpoint - :param endpoint: Either the ID of a endpoint or a - :class:`~openstack.identity.v3.endpoint.Endpoint` - instance. - :attrs kwargs: The attributes to update on the endpoint represented - by ``value``. + :param endpoint: Either the ID of an endpoint or a + :class:`~openstack.identity.v3.endpoint.Endpoint` instance. + :param attrs: The attributes to update on the endpoint represented + by ``endpoint``. :returns: The updated endpoint :rtype: :class:`~openstack.identity.v3.endpoint.Endpoint` """ return self._update(_endpoint.Endpoint, endpoint, **attrs) + # ========== Project endpoints ========== + + def project_endpoints(self, project, **query): + """Retrieve a generator of endpoints which are associated with the + project. + + :param project: Either the project ID or an instance of + :class:`~openstack.identity.v3.project.Project` + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of endpoint instances. + :rtype: :class:`~openstack.identity.v3.endpoint.ProjectEndpoint` + """ + project_id = self._get_resource(_project.Project, project).id + return self._list( + _endpoint.ProjectEndpoint, project_id=project_id, **query + ) + + def associate_endpoint_with_project(self, project, endpoint): + """Creates a direct association between project and endpoint + + :param project: Either the ID of a project or a + :class:`~openstack.identity.v3.project.Project` instance. + :param endpoint: Either the ID of an endpoint or a + :class:`~openstack.identity.v3.endpoint.Endpoint` instance. + :returns: None + """ + project = self._get_resource(_project.Project, project) + endpoint = self._get_resource(_endpoint.Endpoint, endpoint) + project.associate_endpoint(self, endpoint.id) + + def disassociate_endpoint_from_project(self, project, endpoint): + """Removes a direct association between project and endpoint + + :param project: Either the ID of a project or a + :class:`~openstack.identity.v3.project.Project` instance. + :param endpoint: Either the ID of an endpoint or a + :class:`~openstack.identity.v3.endpoint.Endpoint` instance. + :returns: None + """ + project = self._get_resource(_project.Project, project) + endpoint = self._get_resource(_endpoint.Endpoint, endpoint) + project.disassociate_endpoint(self, endpoint.id) + + # ========== Groups ========== + def create_group(self, **attrs): """Create a new group from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v3.group.Group`, - comprised of the properties on the Group class. + a :class:`~openstack.identity.v3.group.Group`, + comprised of the properties on the Group class. :returns: The results of group creation :rtype: :class:`~openstack.identity.v3.group.Group` @@ -281,75 +488,134 @@ def delete_group(self, group, ignore_missing=True): """Delete a group :param group: The value can be either the ID of a group or a - :class:`~openstack.identity.v3.group.Group` instance. + :class:`~openstack.identity.v3.group.Group` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the group does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent group. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the group does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent group. :returns: ``None`` """ self._delete(_group.Group, group, ignore_missing=ignore_missing) - def find_group(self, name_or_id, ignore_missing=True): + def find_group(self, name_or_id, ignore_missing=True, **query): """Find a single group :param name_or_id: The name or ID of a group. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.group.Group` or None """ - return self._find(_group.Group, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _group.Group, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_group(self, group): """Get a single group :param group: The value can be the ID of a group or a - :class:`~openstack.identity.v3.group.Group` - instance. + :class:`~openstack.identity.v3.group.Group` + instance. :returns: One :class:`~openstack.identity.v3.group.Group` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_group.Group, group) def groups(self, **query): """Retrieve a generator of groups - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of group instances. :rtype: :class:`~openstack.identity.v3.group.Group` """ # TODO(briancurtin): This is paginated but requires base list changes. - return self._list(_group.Group, paginated=False, **query) + return self._list(_group.Group, **query) def update_group(self, group, **attrs): """Update a group :param group: Either the ID of a group or a - :class:`~openstack.identity.v3.group.Group` instance. - :attrs kwargs: The attributes to update on the group represented - by ``value``. + :class:`~openstack.identity.v3.group.Group` instance. + :param attrs: The attributes to update on the group represented + by ``group``. :returns: The updated group :rtype: :class:`~openstack.identity.v3.group.Group` """ return self._update(_group.Group, group, **attrs) + def add_user_to_group(self, user, group): + """Add user to group + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :return: ``None`` + """ + user = self._get_resource(_user.User, user) + group = self._get_resource(_group.Group, group) + group.add_user(self, user) + + def remove_user_from_group(self, user, group): + """Remove user to group + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :return: ``None`` + """ + user = self._get_resource(_user.User, user) + group = self._get_resource(_group.Group, group) + group.remove_user(self, user) + + def check_user_in_group(self, user, group): + """Check whether user belongsto group + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :return: A boolean representing current relation + """ + user = self._get_resource(_user.User, user) + group = self._get_resource(_group.Group, group) + return group.check_user(self, user) + + def group_users(self, group, **attrs): + """List users in a group + + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :param attrs: Only password_expires_at can be filter for result. + + :return: List of :class:`~openstack.identity.v3.user.User` + """ + group = self._get_resource(_group.Group, group) + base_path = utils.urljoin(group.base_path, group.id, 'users') + users = self._list(_user.User, base_path=base_path, **attrs) + return users + + # ========== Policies ========== + def create_policy(self, **attrs): """Create a new policy from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v3.policy.Policy`, - comprised of the properties on the Policy class. + a :class:`~openstack.identity.v3.policy.Policy`, + comprised of the properties on the Policy class. :returns: The results of policy creation :rtype: :class:`~openstack.identity.v3.policy.Policy` @@ -360,12 +626,12 @@ def delete_policy(self, policy, ignore_missing=True): """Delete a policy :param policy: The value can be either the ID of a policy or a - :class:`~openstack.identity.v3.policy.Policy` instance. + :class:`~openstack.identity.v3.policy.Policy` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the policy does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent policy. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the policy does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent policy. :returns: ``None`` """ @@ -376,58 +642,61 @@ def find_policy(self, name_or_id, ignore_missing=True): :param name_or_id: The name or ID of a policy. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.policy.Policy` or None """ - return self._find(_policy.Policy, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _policy.Policy, name_or_id, ignore_missing=ignore_missing + ) def get_policy(self, policy): """Get a single policy :param policy: The value can be the ID of a policy or a - :class:`~openstack.identity.v3.policy.Policy` instance. + :class:`~openstack.identity.v3.policy.Policy` instance. :returns: One :class:`~openstack.identity.v3.policy.Policy` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_policy.Policy, policy) def policies(self, **query): """Retrieve a generator of policies - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of policy instances. :rtype: :class:`~openstack.identity.v3.policy.Policy` """ # TODO(briancurtin): This is paginated but requires base list changes. - return self._list(_policy.Policy, paginated=False, **query) + return self._list(_policy.Policy, **query) def update_policy(self, policy, **attrs): """Update a policy :param policy: Either the ID of a policy or a - :class:`~openstack.identity.v3.policy.Policy` instance. - :attrs kwargs: The attributes to update on the policy represented - by ``value``. + :class:`~openstack.identity.v3.policy.Policy` instance. + :param attrs: The attributes to update on the policy represented + by ``policy``. :returns: The updated policy :rtype: :class:`~openstack.identity.v3.policy.Policy` """ return self._update(_policy.Policy, policy, **attrs) + # ========== Project ========== + def create_project(self, **attrs): """Create a new project from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v3.project.Project`, - comprised of the properties on the Project class. + a :class:`~openstack.identity.v3.project.Project`, + comprised of the properties on the Project class. :returns: The results of project creation :rtype: :class:`~openstack.identity.v3.project.Project` @@ -440,28 +709,32 @@ def delete_project(self, project, ignore_missing=True): :param project: The value can be either the ID of a project or a :class:`~openstack.identity.v3.project.Project` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the project does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent project. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the project does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent project. :returns: ``None`` """ self._delete(_project.Project, project, ignore_missing=ignore_missing) - def find_project(self, name_or_id, ignore_missing=True): + def find_project(self, name_or_id, ignore_missing=True, **query): """Find a single project :param name_or_id: The name or ID of a project. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.project.Project` or None """ - return self._find(_project.Project, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _project.Project, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_project(self, project): """Get a single project @@ -470,42 +743,76 @@ def get_project(self, project): :class:`~openstack.identity.v3.project.Project` instance. :returns: One :class:`~openstack.identity.v3.project.Project` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_project.Project, project) def projects(self, **query): """Retrieve a generator of projects - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of project instances. :rtype: :class:`~openstack.identity.v3.project.Project` """ # TODO(briancurtin): This is paginated but requires base list changes. - return self._list(_project.Project, paginated=False, **query) + return self._list(_project.Project, **query) + + def user_projects(self, user, **query): + """Retrieve a generator of projects to which the user has authorization + to access. + + :param user: Either the user id or an instance of + :class:`~openstack.identity.v3.user.User` + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of project instances. + :rtype: :class:`~openstack.identity.v3.project.UserProject` + """ + user = self._get_resource(_user.User, user) + return self._list(_project.UserProject, user_id=user.id, **query) + + def endpoint_projects(self, endpoint, **query): + """Retrieve a generator of projects which are associated with the + endpoint. + + :param endpoint: Either the endpoint ID or an instance of + :class:`~openstack.identity.v3.endpoint.Endpoint` + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of project instances. + :rtype: :class:`~openstack.identity.v3.project.EndpointProject` + """ + endpoint_id = self._get_resource(_endpoint.Endpoint, endpoint).id + return self._list( + _project.EndpointProject, endpoint_id=endpoint_id, **query + ) def update_project(self, project, **attrs): """Update a project :param project: Either the ID of a project or a :class:`~openstack.identity.v3.project.Project` instance. - :attrs kwargs: The attributes to update on the project represented - by ``value``. + :param attrs: The attributes to update on the project represented + by ``project``. :returns: The updated project :rtype: :class:`~openstack.identity.v3.project.Project` """ return self._update(_project.Project, project, **attrs) + # ========== Services ========== + def create_service(self, **attrs): """Create a new service from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v3.service.Service`, - comprised of the properties on the Service class. + a :class:`~openstack.identity.v3.service.Service`, + comprised of the properties on the Service class. :returns: The results of service creation :rtype: :class:`~openstack.identity.v3.service.Service` @@ -518,10 +825,10 @@ def delete_service(self, service, ignore_missing=True): :param service: The value can be either the ID of a service or a :class:`~openstack.identity.v3.service.Service` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the service does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent service. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the service does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent service. :returns: ``None`` """ @@ -532,14 +839,15 @@ def find_service(self, name_or_id, ignore_missing=True): :param name_or_id: The name or ID of a service. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.service.Service` or None """ - return self._find(_service.Service, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _service.Service, name_or_id, ignore_missing=ignore_missing + ) def get_service(self, service): """Get a single service @@ -548,42 +856,44 @@ def get_service(self, service): :class:`~openstack.identity.v3.service.Service` instance. :returns: One :class:`~openstack.identity.v3.service.Service` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_service.Service, service) def services(self, **query): """Retrieve a generator of services - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of service instances. :rtype: :class:`~openstack.identity.v3.service.Service` """ # TODO(briancurtin): This is paginated but requires base list changes. - return self._list(_service.Service, paginated=False, **query) + return self._list(_service.Service, **query) def update_service(self, service, **attrs): """Update a service :param service: Either the ID of a service or a :class:`~openstack.identity.v3.service.Service` instance. - :attrs kwargs: The attributes to update on the service represented - by ``value``. + :param attrs: The attributes to update on the service represented + by ``service``. :returns: The updated service :rtype: :class:`~openstack.identity.v3.service.Service` """ return self._update(_service.Service, service, **attrs) + # ========== Users ========== + def create_user(self, **attrs): """Create a new user from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v3.user.User`, - comprised of the properties on the User class. + a :class:`~openstack.identity.v3.user.User`, + comprised of the properties on the User class. :returns: The results of user creation :rtype: :class:`~openstack.identity.v3.user.User` @@ -594,74 +904,129 @@ def delete_user(self, user, ignore_missing=True): """Delete a user :param user: The value can be either the ID of a user or a - :class:`~openstack.identity.v3.user.User` instance. + :class:`~openstack.identity.v3.user.User` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the user does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent user. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the user does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent user. :returns: ``None`` """ self._delete(_user.User, user, ignore_missing=ignore_missing) - def find_user(self, name_or_id, ignore_missing=True): + def find_user(self, name_or_id, ignore_missing=True, **query): """Find a single user :param name_or_id: The name or ID of a user. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.user.User` or None """ - return self._find(_user.User, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _user.User, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_user(self, user): """Get a single user :param user: The value can be the ID of a user or a - :class:`~openstack.identity.v3.user.User` instance. + :class:`~openstack.identity.v3.user.User` instance. :returns: One :class:`~openstack.identity.v3.user.User` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_user.User, user) + def user_groups(self, user): + """List groups a user is in + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance + + :return: List of :class:`~openstack.identity.v3.group.group` + """ + user_id = self._get_resource(_user.User, user).id + groups = self._list(_group.UserGroup, user_id=user_id) + return groups + def users(self, **query): """Retrieve a generator of users - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of user instances. :rtype: :class:`~openstack.identity.v3.user.User` """ # TODO(briancurtin): This is paginated but requires base list changes. - return self._list(_user.User, paginated=False, **query) + return self._list(_user.User, **query) def update_user(self, user, **attrs): """Update a user :param user: Either the ID of a user or a - :class:`~openstack.identity.v3.user.User` instance. - :attrs kwargs: The attributes to update on the user represented - by ``value``. + :class:`~openstack.identity.v3.user.User` instance. + :param attrs: The attributes to update on the user represented + by ``attrs``. :returns: The updated user :rtype: :class:`~openstack.identity.v3.user.User` """ return self._update(_user.User, user, **attrs) + # ========== Tokens ========== + + def validate_token( + self, token: str, nocatalog: bool = False, allow_expired: bool = False + ) -> _token.Token: + """Validate a token + + :param token: The token to validate. + :param nocatalog: Whether the returned token should not include a + catalog. + :param allow_expired: Whether to allow expired tokens. + + :returns: A :class:`~openstack.identity.v3.token.Token`. + """ + return _token.Token.validate( + self, token, nocatalog=nocatalog, allow_expired=allow_expired + ) + + def check_token(self, token: str, allow_expired: bool = False) -> bool: + """Check if a token is valid. + + :param token: The token to check. + :param allow_expired: Whether to allow expired tokens. + + :returns: True if valid, else False. + """ + return _token.Token.check(self, token, allow_expired=allow_expired) + + def revoke_token(self, token: str) -> None: + """Revoke a token. + + :param token: The token to revoke. + + :returns: None + """ + _token.Token.revoke(self, token) + + # ========== Trusts ========== + def create_trust(self, **attrs): """Create a new trust from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v3.trust.Trust`, - comprised of the properties on the Trust class. + a :class:`~openstack.identity.v3.trust.Trust`, + comprised of the properties on the Trust class. :returns: The results of trust creation :rtype: :class:`~openstack.identity.v3.trust.Trust` @@ -672,12 +1037,12 @@ def delete_trust(self, trust, ignore_missing=True): """Delete a trust :param trust: The value can be either the ID of a trust or a - :class:`~openstack.identity.v3.trust.Trust` instance. + :class:`~openstack.identity.v3.trust.Trust` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the credential does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent credential. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the credential does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent credential. :returns: ``None`` """ @@ -688,45 +1053,48 @@ def find_trust(self, name_or_id, ignore_missing=True): :param name_or_id: The name or ID of a trust. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.identity.v3.trust.Trust` or None """ - return self._find(_trust.Trust, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _trust.Trust, name_or_id, ignore_missing=ignore_missing + ) def get_trust(self, trust): """Get a single trust :param trust: The value can be the ID of a trust or a - :class:`~openstack.identity.v3.trust.Trust` instance. + :class:`~openstack.identity.v3.trust.Trust` instance. :returns: One :class:`~openstack.identity.v3.trust.Trust` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_trust.Trust, trust) def trusts(self, **query): """Retrieve a generator of trusts - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of trust instances. :rtype: :class:`~openstack.identity.v3.trust.Trust` """ # TODO(briancurtin): This is paginated but requires base list changes. - return self._list(_trust.Trust, paginated=False, **query) + return self._list(_trust.Trust, **query) + + # ========== Regions ========== def create_region(self, **attrs): """Create a new region from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v3.region.Region`, - comprised of the properties on the Region class. + a :class:`~openstack.identity.v3.region.Region`, + comprised of the properties on the Region class. :returns: The results of region creation. :rtype: :class:`~openstack.identity.v3.region.Region` @@ -737,12 +1105,12 @@ def delete_region(self, region, ignore_missing=True): """Delete a region :param region: The value can be either the ID of a region or a - :class:`~openstack.identity.v3.region.Region` instance. + :class:`~openstack.identity.v3.region.Region` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the region does not exist. - When set to ``True``, no exception will be thrown when - attempting to delete a nonexistent region. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the region does not exist. + When set to ``True``, no exception will be thrown when + attempting to delete a nonexistent region. :returns: ``None`` """ @@ -753,58 +1121,66 @@ def find_region(self, name_or_id, ignore_missing=True): :param name_or_id: The name or ID of a region. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the region does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent region. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the region does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent region. :returns: One :class:`~openstack.identity.v3.region.Region` or None """ - return self._find(_region.Region, name_or_id, - ignore_missing=ignore_missing) + warnings.warn( + "find_region is deprecated and will be removed in a future " + "release; please use get_region instead.", + os_warnings.RemovedInSDK60Warning, + ) + return self._find( + _region.Region, name_or_id, ignore_missing=ignore_missing + ) def get_region(self, region): """Get a single region :param region: The value can be the ID of a region or a - :class:`~openstack.identity.v3.region.Region` instance. + :class:`~openstack.identity.v3.region.Region` instance. :returns: One :class:`~openstack.identity.v3.region.Region` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no matching region can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no matching region can be found. """ return self._get(_region.Region, region) def regions(self, **query): """Retrieve a generator of regions - :param kwargs \*\*query: Optional query parameters to be sent to limit - the regions being returned. + :param kwargs query: Optional query parameters to be sent to limit + the regions being returned. :returns: A generator of region instances. :rtype: :class:`~openstack.identity.v3.region.Region` """ # TODO(briancurtin): This is paginated but requires base list changes. - return self._list(_region.Region, paginated=False, **query) + return self._list(_region.Region, **query) def update_region(self, region, **attrs): """Update a region :param region: Either the ID of a region or a - :class:`~openstack.identity.v3.region.Region` instance. - :attrs kwargs: The attributes to update on the region represented - by ``value``. + :class:`~openstack.identity.v3.region.Region` instance. + :param attrs: The attributes to update on the region represented + by ``region``. :returns: The updated region. :rtype: :class:`~openstack.identity.v3.region.Region` """ return self._update(_region.Region, region, **attrs) + # ========== Roles ========== + def create_role(self, **attrs): """Create a new role from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.identity.v3.role.Role`, - comprised of the properties on the Role class. + a :class:`~openstack.identity.v3.role.Role`, + comprised of the properties on the Role class. :returns: The results of role creation. :rtype: :class:`~openstack.identity.v3.role.Role` @@ -815,63 +1191,1326 @@ def delete_role(self, role, ignore_missing=True): """Delete a role :param role: The value can be either the ID of a role or a - :class:`~openstack.identity.v3.role.Role` instance. + :class:`~openstack.identity.v3.role.Role` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the role does not exist. - When set to ``True``, no exception will be thrown when - attempting to delete a nonexistent role. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the role does not exist. + When set to ``True``, no exception will be thrown when + attempting to delete a nonexistent role. :returns: ``None`` """ self._delete(_role.Role, role, ignore_missing=ignore_missing) - def find_role(self, name_or_id, ignore_missing=True): + def find_role(self, name_or_id, ignore_missing=True, **query): """Find a single role :param name_or_id: The name or ID of a role. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the role does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent role. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the role does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent role. :returns: One :class:`~openstack.identity.v3.role.Role` or None """ - return self._find(_role.Role, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _role.Role, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_role(self, role): """Get a single role :param role: The value can be the ID of a role or a - :class:`~openstack.identity.v3.role.Role` instance. + :class:`~openstack.identity.v3.role.Role` instance. :returns: One :class:`~openstack.identity.v3.role.Role` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no matching role can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no matching role can be found. """ return self._get(_role.Role, role) def roles(self, **query): """Retrieve a generator of roles - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. The options - are: domain_id, name. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. The options + are: domain_id, name. :return: A generator of role instances. :rtype: :class:`~openstack.identity.v3.role.Role` """ - return self._list(_role.Role, paginated=False, **query) + return self._list(_role.Role, **query) def update_role(self, role, **attrs): """Update a role :param role: Either the ID of a role or a - :class:`~openstack.identity.v3.role.Role` instance. + :class:`~openstack.identity.v3.role.Role` instance. :param dict kwargs: The attributes to update on the role represented - by ``value``. Only name can be updated + by ``value``. Only name can be updated :returns: The updated role. :rtype: :class:`~openstack.identity.v3.role.Role` """ return self._update(_role.Role, role, **attrs) + + # ========== Role assignments ========== + + def role_assignments_filter( + self, domain=None, project=None, system=None, group=None, user=None + ): + """Retrieve a generator of roles assigned to user/group + + :param domain: Either the ID of a domain or a + :class:`~openstack.identity.v3.domain.Domain` instance. + :param project: Either the ID of a project or a + :class:`~openstack.identity.v3.project.Project` + instance. + :param system: Either the system name or a + :class:`~openstack.identity.v3.system.System` + instance. + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :return: A generator of role instances. + :rtype: :class:`~openstack.identity.v3.role.Role` + """ + if domain and project and system: + raise exception.InvalidRequest( + 'Only one of domain, project, or system can be specified' + ) + + if domain is None and project is None and system is None: + raise exception.InvalidRequest( + 'Either domain, project, or system should be specified' + ) + + if group and user: + raise exception.InvalidRequest( + 'Only one of group or user can be specified' + ) + + if group is None and user is None: + raise exception.InvalidRequest( + 'Either group or user should be specified' + ) + + if domain: + domain_id = resource.Resource._get_id(domain) + if group: + group_id = resource.Resource._get_id(group) + return self._list( + _role_domain_group_assignment.RoleDomainGroupAssignment, + domain_id=domain_id, + group_id=group_id, + ) + else: + user_id = resource.Resource._get_id(user) + return self._list( + _role_domain_user_assignment.RoleDomainUserAssignment, + domain_id=domain_id, + user_id=user_id, + ) + elif project: + project_id = resource.Resource._get_id(project) + if group: + group_id = resource.Resource._get_id(group) + return self._list( + _role_project_group_assignment.RoleProjectGroupAssignment, + project_id=project_id, + group_id=group_id, + ) + else: + user_id = resource.Resource._get_id(user) + return self._list( + _role_project_user_assignment.RoleProjectUserAssignment, + project_id=project_id, + user_id=user_id, + ) + else: + system_id = resource.Resource._get_id(system) + if group: + group_id = resource.Resource._get_id(group) + return self._list( + _role_system_group_assignment.RoleSystemGroupAssignment, + system_id=system_id, + group_id=group_id, + ) + else: + user_id = resource.Resource._get_id(user) + return self._list( + _role_system_user_assignment.RoleSystemUserAssignment, + system_id=system_id, + user_id=user_id, + ) + + def role_assignments(self, **query): + """Retrieve a generator of role assignments + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. The options + are: group_id, role_id, scope_domain_id, + scope_project_id, inherited_to, user_id, include_names, + include_subtree. + :return: + :class:`~openstack.identity.v3.role_assignment.RoleAssignment` + """ + return self._list(_role_assignment.RoleAssignment, **query) + + def assign_domain_role_to_user( + self, domain, user, role, *, inherited=False + ): + """Assign role to user on a domain + + :param domain: Either the ID of a domain or a + :class:`~openstack.identity.v3.domain.Domain` instance. + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param bool inherited: Whether the role assignment is inherited. + :return: ``None`` + """ + domain = self._get_resource(_domain.Domain, domain) + user = self._get_resource(_user.User, user) + role = self._get_resource(_role.Role, role) + domain.assign_role_to_user(self, user, role, inherited) + + def unassign_domain_role_from_user( + self, domain, user, role, *, inherited=False + ): + """Unassign role from user on a domain + + :param domain: Either the ID of a domain or a + :class:`~openstack.identity.v3.domain.Domain` instance. + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param bool inherited: Whether the role assignment is inherited. + :return: ``None`` + """ + domain = self._get_resource(_domain.Domain, domain) + user = self._get_resource(_user.User, user) + role = self._get_resource(_role.Role, role) + domain.unassign_role_from_user(self, user, role, inherited) + + def validate_user_has_domain_role( + self, domain, user, role, *, inherited=False + ): + """Validates that a user has a role on a domain + + :param domain: Either the ID of a domain or a + :class:`~openstack.identity.v3.domain.Domain` instance. + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :returns: True if user has role in domain + """ + domain = self._get_resource(_domain.Domain, domain) + user = self._get_resource(_user.User, user) + role = self._get_resource(_role.Role, role) + return domain.validate_user_has_role(self, user, role, inherited) + + def assign_domain_role_to_group( + self, domain, group, role, *, inherited=False + ): + """Assign role to group on a domain + + :param domain: Either the ID of a domain or a + :class:`~openstack.identity.v3.domain.Domain` instance. + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param bool inherited: Whether the role assignment is inherited. + :return: ``None`` + """ + domain = self._get_resource(_domain.Domain, domain) + group = self._get_resource(_group.Group, group) + role = self._get_resource(_role.Role, role) + domain.assign_role_to_group(self, group, role, inherited) + + def unassign_domain_role_from_group( + self, domain, group, role, *, inherited=False + ): + """Unassign role from group on a domain + + :param domain: Either the ID of a domain or a + :class:`~openstack.identity.v3.domain.Domain` instance. + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param bool inherited: Whether the role assignment is inherited. + :return: ``None`` + """ + domain = self._get_resource(_domain.Domain, domain) + group = self._get_resource(_group.Group, group) + role = self._get_resource(_role.Role, role) + domain.unassign_role_from_group(self, group, role, inherited) + + def validate_group_has_domain_role( + self, domain, group, role, *, inherited=False + ): + """Validates that a group has a role on a domain + + :param domain: Either the ID of a domain or a + :class:`~openstack.identity.v3.domain.Domain` instance. + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :returns: True if group has role on domain + """ + domain = self._get_resource(_domain.Domain, domain) + group = self._get_resource(_group.Group, group) + role = self._get_resource(_role.Role, role) + return domain.validate_group_has_role(self, group, role, inherited) + + def assign_project_role_to_user( + self, project, user, role, *, inherited=False + ): + """Assign role to user on a project + + :param project: Either the ID of a project or a + :class:`~openstack.identity.v3.project.Project` + instance. + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param bool inherited: Whether the role assignment is inherited. + :return: ``None`` + """ + project = self._get_resource(_project.Project, project) + user = self._get_resource(_user.User, user) + role = self._get_resource(_role.Role, role) + project.assign_role_to_user(self, user, role, inherited) + + def unassign_project_role_from_user( + self, project, user, role, *, inherited=False + ): + """Unassign role from user on a project + + :param project: Either the ID of a project or a + :class:`~openstack.identity.v3.project.Project` + instance. + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param bool inherited: Whether the role assignment is inherited. + :return: ``None`` + """ + project = self._get_resource(_project.Project, project) + user = self._get_resource(_user.User, user) + role = self._get_resource(_role.Role, role) + project.unassign_role_from_user(self, user, role, inherited) + + def validate_user_has_project_role( + self, project, user, role, *, inherited=False + ): + """Validates that a user has a role on a project + + :param project: Either the ID of a project or a + :class:`~openstack.identity.v3.project.Project` + instance. + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :returns: True if user has role in project + """ + project = self._get_resource(_project.Project, project) + user = self._get_resource(_user.User, user) + role = self._get_resource(_role.Role, role) + return project.validate_user_has_role(self, user, role, inherited) + + def assign_project_role_to_group( + self, project, group, role, *, inherited=False + ): + """Assign role to group on a project + + :param project: Either the ID of a project or a + :class:`~openstack.identity.v3.project.Project` + instance. + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param bool inherited: Whether the role assignment is inherited. + :return: ``None`` + """ + project = self._get_resource(_project.Project, project) + group = self._get_resource(_group.Group, group) + role = self._get_resource(_role.Role, role) + project.assign_role_to_group(self, group, role, inherited) + + def unassign_project_role_from_group( + self, project, group, role, *, inherited=False + ): + """Unassign role from group on a project + + :param project: Either the ID of a project or a + :class:`~openstack.identity.v3.project.Project` + instance. + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param bool inherited: Whether the role assignment is inherited. + :return: ``None`` + """ + project = self._get_resource(_project.Project, project) + group = self._get_resource(_group.Group, group) + role = self._get_resource(_role.Role, role) + project.unassign_role_from_group(self, group, role, inherited) + + def validate_group_has_project_role( + self, project, group, role, *, inherited=False + ): + """Validates that a group has a role on a project + + :param project: Either the ID of a project or a + :class:`~openstack.identity.v3.project.Project` + instance. + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :returns: True if group has role in project + """ + project = self._get_resource(_project.Project, project) + group = self._get_resource(_group.Group, group) + role = self._get_resource(_role.Role, role) + return project.validate_group_has_role(self, group, role, inherited) + + def assign_system_role_to_user(self, user, role, system): + """Assign a role to user on a system + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param system: The system name + :return: ``None`` + """ + user = self._get_resource(_user.User, user) + role = self._get_resource(_role.Role, role) + system = self._get_resource(_system.System, system) + system.assign_role_to_user(self, user, role) + + def unassign_system_role_from_user(self, user, role, system): + """Unassign a role from user on a system + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param system: The system name + :return: ``None`` + """ + user = self._get_resource(_user.User, user) + role = self._get_resource(_role.Role, role) + system = self._get_resource(_system.System, system) + system.unassign_role_from_user(self, user, role) + + def validate_user_has_system_role(self, user, role, system): + """Validates that a user has a role on a system + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param system: The system name + :returns: True if user has role in system + """ + user = self._get_resource(_user.User, user) + role = self._get_resource(_role.Role, role) + system = self._get_resource(_system.System, system) + return system.validate_user_has_role(self, user, role) + + def assign_system_role_to_group(self, group, role, system): + """Assign a role to group on a system + + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param system: The system name + :return: ``None`` + """ + group = self._get_resource(_group.Group, group) + role = self._get_resource(_role.Role, role) + system = self._get_resource(_system.System, system) + system.assign_role_to_group(self, group, role) + + def unassign_system_role_from_group(self, group, role, system): + """Unassign a role from group on a system + + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param system: The system name + :return: ``None`` + """ + group = self._get_resource(_group.Group, group) + role = self._get_resource(_role.Role, role) + system = self._get_resource(_system.System, system) + system.unassign_role_from_group(self, group, role) + + def validate_group_has_system_role(self, group, role, system): + """Validates that a group has a role on a system + + :param group: Either the ID of a group or a + :class:`~openstack.identity.v3.group.Group` instance. + :param role: Either the ID of a role or a + :class:`~openstack.identity.v3.role.Role` instance. + :param system: The system name + :returns: True if group has role on system + """ + group = self._get_resource(_group.Group, group) + role = self._get_resource(_role.Role, role) + system = self._get_resource(_system.System, system) + return system.validate_group_has_role(self, group, role) + + # ========== Registered limits ========== + + def registered_limits(self, **query): + """Retrieve a generator of registered_limits + + :param kwargs query: Optional query parameters to be sent to limit + the registered_limits being returned. + + :returns: A generator of registered_limits instances. + :rtype: + :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` + """ + return self._list(_registered_limit.RegisteredLimit, **query) + + def get_registered_limit(self, registered_limit): + """Get a single registered_limit + + :param registered_limit: The value can be the ID of a registered_limit + or a + :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` + instance. + + :returns: One + :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_registered_limit.RegisteredLimit, registered_limit) + + def create_registered_limit(self, **attrs): + """Create a new registered_limit from attributes + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.identity.v3.registered_limit.RegisteredLimit`, + comprised of the properties on the RegisteredLimit class. + + :returns: The results of registered_limit creation. + :rtype: + :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` + """ + return self._create(_registered_limit.RegisteredLimit, **attrs) + + def update_registered_limit(self, registered_limit, **attrs): + """Update a registered_limit + + :param registered_limit: Either the ID of a registered_limit. or a + :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` + instance. + :param dict kwargs: The attributes to update on the registered_limit + represented by ``value``. + + :returns: The updated registered_limit. + :rtype: + :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` + """ + return self._update( + _registered_limit.RegisteredLimit, registered_limit, **attrs + ) + + def delete_registered_limit(self, registered_limit, ignore_missing=True): + """Delete a registered_limit + + :param registered_limit: The value can be either the ID of a + registered_limit or a + :class:`~openstack.identity.v3.registered_limit.RegisteredLimit` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the registered_limit does not exist. When set to ``True``, no + exception will be thrown when attempting to delete a nonexistent + registered_limit. + + :returns: ``None`` + """ + self._delete( + _registered_limit.RegisteredLimit, + registered_limit, + ignore_missing=ignore_missing, + ) + + # ========== Limits ========== + + def limits(self, **query): + """Retrieve a generator of limits + + :param kwargs query: Optional query parameters to be sent to limit + the limits being returned. + + :returns: A generator of limits instances. + :rtype: :class:`~openstack.identity.v3.limit.Limit` + """ + return self._list(_limit.Limit, **query) + + def get_limit(self, limit): + """Get a single limit + + :param limit: The value can be the ID of a limit + or a :class:`~openstack.identity.v3.limit.Limit` instance. + + :returns: One :class:`~openstack.identity.v3.limit.Limit` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + return self._get(_limit.Limit, limit) + + def create_limit(self, **attrs): + """Create a new limit from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.identity.v3.limit.Limit`, comprised of the + properties on the Limit class. + + :returns: The results of limit creation. + :rtype: :class:`~openstack.identity.v3.limit.Limit` + """ + return self._create(_limit.Limit, **attrs) + + def update_limit(self, limit, **attrs): + """Update a limit + + :param limit: Either the ID of a limit. or a + :class:`~openstack.identity.v3.limit.Limit` instance. + :param dict kwargs: The attributes to update on the limit represented + by ``value``. + + :returns: The updated limit. + :rtype: :class:`~openstack.identity.v3.limit.Limit` + """ + return self._update(_limit.Limit, limit, **attrs) + + def delete_limit(self, limit, ignore_missing=True): + """Delete a limit + + :param limit: The value can be either the ID of a limit or a + :class:`~openstack.identity.v3.limit.Limit` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the limit does not exist. When set to ``True``, no exception + will be thrown when attempting to delete a nonexistent limit. + + :returns: ``None`` + """ + self._delete(_limit.Limit, limit, ignore_missing=ignore_missing) + + # ========== Application credentials ========== + + def application_credentials(self, user, **query): + """Retrieve a generator of application credentials + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + + :param kwargs query: Optional query parameters to be sent to + limit the resources being returned. + + :returns: A generator of application credentials instances. + :rtype: + :class:`~openstack.identity.v3.application_credential.ApplicationCredential` + """ + user = self._get_resource(_user.User, user) + return self._list( + _application_credential.ApplicationCredential, + user_id=user.id, + **query, + ) + + def get_application_credential(self, user, application_credential): + """Get a single application credential + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + + :param application_credential: The value can be the ID of a + application credential or a + :class:`~openstack.identity.v3.application_credential.ApplicationCredential` + instance. + + :returns: One + :class:`~openstack.identity.v3.application_credential.ApplicationCredential` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + user = self._get_resource(_user.User, user) + return self._get( + _application_credential.ApplicationCredential, + application_credential, + user_id=user.id, + ) + + def create_application_credential(self, user, name, **attrs): + """Create a new application credential from attributes + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param name: The name of the application credential which is + unique to the user. + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.identity.v3.application_credential.ApplicationCredential`, + comprised of the properties on the ApplicationCredential class. + + + :returns: The results of application credential creation. + :rtype: + :class:`~openstack.identity.v3.application_credential.ApplicationCredential` + """ + + user = self._get_resource(_user.User, user) + return self._create( + _application_credential.ApplicationCredential, + name=name, + user_id=user.id, + **attrs, + ) + + def find_application_credential( + self, + user, + name_or_id, + ignore_missing=True, + **query, + ): + """Find a single application credential + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param name_or_id: The name or ID of an application credential. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + + :returns: One + :class:`~openstack.identity.v3.application_credential.ApplicationCredential` + or None + """ + user = self._get_resource(_user.User, user) + return self._find( + _application_credential.ApplicationCredential, + user_id=user.id, + name_or_id=name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def delete_application_credential( + self, user, application_credential, ignore_missing=True + ): + """Delete an application credential + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param application credential: The value can be either the ID of an + application credential or a + :class:`~openstack.identity.v3.application_credential.ApplicationCredential` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the application credential does not exist. When set to + ``True``, no exception will be thrown when attempting to delete + a nonexistent application credential. + + :returns: ``None`` + """ + user = self._get_resource(_user.User, user) + self._delete( + _application_credential.ApplicationCredential, + application_credential, + user_id=user.id, + ignore_missing=ignore_missing, + ) + + # ========== Federation protocols ========== + + def create_federation_protocol(self, idp_id, **attrs): + """Create a new federation protocol from attributes + + :param idp_id: The ID of the identity provider or a + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + representing the identity provider the protocol is to be + attached to. + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.identity.v3.federation_protocol.FederationProtocol`, + comprised of the properties on the + FederationProtocol class. + + :returns: The results of federation protocol creation + :rtype: + :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` + """ + + idp_cls = _identity_provider.IdentityProvider + if isinstance(idp_id, idp_cls): + idp_id = idp_id.id + return self._create( + _federation_protocol.FederationProtocol, idp_id=idp_id, **attrs + ) + + def delete_federation_protocol( + self, idp_id, protocol, ignore_missing=True + ): + """Delete a federation protocol + + :param idp_id: The ID of the identity provider or a + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + representing the identity provider the protocol is attached to. + Can be None if protocol is a + :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` + instance. + :param protocol: The ID of a federation protocol or a + :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the federation protocol does not exist. When set to + ``True``, no exception will be set when attempting to delete a + nonexistent federation protocol. + + :returns: ``None`` + """ + cls = _federation_protocol.FederationProtocol + if idp_id is None and isinstance(protocol, cls): + idp_id = protocol.idp_id + idp_cls = _identity_provider.IdentityProvider + if isinstance(idp_id, idp_cls): + idp_id = idp_id.id + self._delete( + cls, protocol, ignore_missing=ignore_missing, idp_id=idp_id + ) + + def find_federation_protocol(self, idp_id, protocol, ignore_missing=True): + """Find a single federation protocol + + :param idp_id: The ID of the identity provider or a + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + representing the identity provider the protocol is attached to. + :param protocol: The name or ID of a federation protocol. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :returns: One federation protocol or None + :rtype: + :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` + """ + idp_cls = _identity_provider.IdentityProvider + if isinstance(idp_id, idp_cls): + idp_id = idp_id.id + return self._find( + _federation_protocol.FederationProtocol, + protocol, + ignore_missing=ignore_missing, + idp_id=idp_id, + ) + + def get_federation_protocol(self, idp_id, protocol): + """Get a single federation protocol + + :param idp_id: The ID of the identity provider or a + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + representing the identity provider the protocol is attached to. + Can be None if protocol is a + :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` + :param protocol: The value can be the ID of a federation protocol or a + :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` + instance. + + :returns: One federation protocol + :rtype: + :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + cls = _federation_protocol.FederationProtocol + if idp_id is None and isinstance(protocol, cls): + idp_id = protocol.idp_id + idp_cls = _identity_provider.IdentityProvider + if isinstance(idp_id, idp_cls): + idp_id = idp_id.id + return self._get(cls, protocol, idp_id=idp_id) + + def federation_protocols(self, idp_id, **query): + """Retrieve a generator of federation protocols + + :param idp_id: The ID of the identity provider or a + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + representing the identity provider the protocol is attached to. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of federation protocol instances. + :rtype: + :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` + """ + idp_cls = _identity_provider.IdentityProvider + if isinstance(idp_id, idp_cls): + idp_id = idp_id.id + return self._list( + _federation_protocol.FederationProtocol, idp_id=idp_id, **query + ) + + def update_federation_protocol(self, idp_id, protocol, **attrs): + """Update a federation protocol + + :param idp_id: The ID of the identity provider or a + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + representing the identity provider the protocol is attached to. + Can be None if protocol is a + :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` + :param protocol: Either the ID of a federation protocol or a + :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` + instance. + :param attrs: The attributes to update on the federation protocol + represented by ``protocol``. + + :returns: The updated federation protocol + :rtype: + :class:`~openstack.identity.v3.federation_protocol.FederationProtocol` + """ + cls = _federation_protocol.FederationProtocol + if (idp_id is None) and (isinstance(protocol, cls)): + idp_id = protocol.idp_id + idp_cls = _identity_provider.IdentityProvider + if isinstance(idp_id, idp_cls): + idp_id = idp_id.id + return self._update(cls, protocol, idp_id=idp_id, **attrs) + + # ========== Mappings ========== + + def create_mapping(self, **attrs): + """Create a new mapping from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.identity.v3.mapping.Mapping`, + comprised of the properties on the Mapping class. + + :returns: The results of mapping creation + :rtype: :class:`~openstack.identity.v3.mapping.Mapping` + """ + return self._create(_mapping.Mapping, **attrs) + + def delete_mapping(self, mapping, ignore_missing=True): + """Delete a mapping + + :param mapping: The ID of a mapping or a + :class:`~openstack.identity.v3.mapping.Mapping` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the mapping does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent mapping. + + :returns: ``None`` + """ + self._delete(_mapping.Mapping, mapping, ignore_missing=ignore_missing) + + def find_mapping(self, name_or_id, ignore_missing=True): + """Find a single mapping + + :param name_or_id: The name or ID of a mapping. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :returns: One :class:`~openstack.identity.v3.mapping.Mapping` or None + """ + warnings.warn( + "find_mapping is deprecated and will be removed in a future " + "release; please use get_mapping instead.", + os_warnings.RemovedInSDK60Warning, + ) + return self._find( + _mapping.Mapping, name_or_id, ignore_missing=ignore_missing + ) + + def get_mapping(self, mapping): + """Get a single mapping + + :param mapping: The value can be the ID of a mapping or a + :class:`~openstack.identity.v3.mapping.Mapping` + instance. + + :returns: One :class:`~openstack.identity.v3.mapping.Mapping` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_mapping.Mapping, mapping) + + def mappings(self, **query): + """Retrieve a generator of mappings + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of mapping instances. + :rtype: :class:`~openstack.identity.v3.mapping.Mapping` + """ + return self._list(_mapping.Mapping, **query) + + def update_mapping(self, mapping, **attrs): + """Update a mapping + + :param mapping: Either the ID of a mapping or a + :class:`~openstack.identity.v3.mapping.Mapping` instance. + :param attrs: The attributes to update on the mapping represented + by ``mapping``. + + :returns: The updated mapping + :rtype: :class:`~openstack.identity.v3.mapping.Mapping` + """ + return self._update(_mapping.Mapping, mapping, **attrs) + + # ========== Identity providers ========== + + def create_identity_provider(self, **attrs): + """Create a new identity provider from attributes + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + comprised of the properties on the IdentityProvider class. + + :returns: The results of identity provider creation + :rtype: + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + """ + return self._create(_identity_provider.IdentityProvider, **attrs) + + def delete_identity_provider(self, identity_provider, ignore_missing=True): + """Delete an identity provider + + :param mapping: The ID of an identity provoder or a + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the identity provider does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent identity provider. + + :returns: ``None`` + """ + self._delete( + _identity_provider.IdentityProvider, + identity_provider, + ignore_missing=ignore_missing, + ) + + def find_identity_provider(self, name_or_id, ignore_missing=True): + """Find a single identity provider + + :param name_or_id: The name or ID of an identity provider + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :returns: The details of an identity provider or None. + :rtype: + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + """ + warnings.warn( + "find_identity_provider is deprecated and will be removed in a " + "future release; please use get_identity_provider instead.", + os_warnings.RemovedInSDK60Warning, + ) + return self._find( + _identity_provider.IdentityProvider, + name_or_id, + ignore_missing=ignore_missing, + ) + + def get_identity_provider(self, identity_provider): + """Get a single mapping + + :param mapping: The value can be the ID of an identity provider or a + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + instance. + + :returns: The details of an identity provider. + :rtype: + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _identity_provider.IdentityProvider, identity_provider + ) + + def identity_providers(self, **query): + """Retrieve a generator of identity providers + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of identity provider instances. + :rtype: + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + """ + return self._list(_identity_provider.IdentityProvider, **query) + + def update_identity_provider(self, identity_provider, **attrs): + """Update a mapping + + :param mapping: Either the ID of an identity provider or a + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + instance. + :param attrs: The attributes to update on the identity_provider + represented by ``identity_provider``. + + :returns: The updated identity provider. + :rtype: + :class:`~openstack.identity.v3.identity_provider.IdentityProvider` + """ + return self._update( + _identity_provider.IdentityProvider, identity_provider, **attrs + ) + + # ========== Access rules ========== + + def access_rules(self, user, **query): + """Retrieve a generator of access rules + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param kwargs query: Optional query parameters to be sent to + limit the resources being returned. + + :returns: A generator of access rules instances. + :rtype: :class:`~openstack.identity.v3.access_rule.AccessRule` + """ + user = self._get_resource(_user.User, user) + return self._list(_access_rule.AccessRule, user_id=user.id, **query) + + def get_access_rule(self, user, access_rule): + """Get a single access rule + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param access rule: The value can be the ID of an access rule or a + :class:`~.access_rule.AccessRule` instance. + + :returns: One :class:`~.access_rule.AccessRule` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + user = self._get_resource(_user.User, user) + return self._get(_access_rule.AccessRule, access_rule, user_id=user.id) + + def delete_access_rule(self, user, access_rule, ignore_missing=True): + """Delete an access rule + + :param user: Either the ID of a user or a + :class:`~openstack.identity.v3.user.User` instance. + :param access rule: The value can be either the ID of an + access rule or a :class:`~.access_rule.AccessRule` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the access rule does not exist. When set to ``True``, no + exception will be thrown when attempting to delete a nonexistent + access rule. + + :returns: ``None`` + """ + user = self._get_resource(_user.User, user) + self._delete( + _access_rule.AccessRule, + access_rule, + user_id=user.id, + ignore_missing=ignore_missing, + ) + + # ========== Service providers ========== + + def create_service_provider(self, **attrs): + """Create a new service provider from attributes + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.identity.v3.service_provider.ServiceProvider`, + comprised of the properties on the ServiceProvider class. + + :returns: The results of service provider creation + :rtype: + :class:`~openstack.identity.v3.service_provider.ServiceProvider` + """ + return self._create(_service_provider.ServiceProvider, **attrs) + + def delete_service_provider(self, service_provider, ignore_missing=True): + """Delete a service provider + + :param service_provider: The ID of a service provider or a + :class:`~openstack.identity.v3.service_provider.ServiceProvider` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the service provider does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent service provider. + + :returns: ``None`` + """ + self._delete( + _service_provider.ServiceProvider, + service_provider, + ignore_missing=ignore_missing, + ) + + def find_service_provider(self, name_or_id, ignore_missing=True): + """Find a single service provider + + :param name_or_id: The name or ID of a service provider + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + + :returns: The details of an service provider or None. + :rtype: + :class:`~openstack.identity.v3.service_provider.ServiceProvider` + """ + return self._find( + _service_provider.ServiceProvider, + name_or_id, + ignore_missing=ignore_missing, + ) + + def get_service_provider(self, service_provider): + """Get a single service provider + + :param service_provider: The value can be the ID of a service provider + or a + :class:`~openstack.identity.v3.server_provider.ServiceProvider` + instance. + + :returns: The details of an service provider. + :rtype: + :class:`~openstack.identity.v3.service_provider.ServiceProvider` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_service_provider.ServiceProvider, service_provider) + + def service_providers(self, **query): + """Retrieve a generator of service providers + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of service provider instances. + :rtype: + :class:`~openstack.identity.v3.service_provider.ServiceProvider` + """ + return self._list(_service_provider.ServiceProvider, **query) + + def update_service_provider(self, service_provider, **attrs): + """Update a service provider + + :param service_provider: Either the ID of an service provider or a + :class:`~openstack.identity.v3.service_provider.ServiceProvider` + instance. + :param attrs: The attributes to update on the service provider + represented by ``service_provider``. + + :returns: The updated service provider. + :rtype: + :class:`~openstack.identity.v3.service_provider.ServiceProvider` + """ + return self._update( + _service_provider.ServiceProvider, service_provider, **attrs + ) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/identity/v3/access_rule.py b/openstack/identity/v3/access_rule.py new file mode 100644 index 0000000000..ddd5d16582 --- /dev/null +++ b/openstack/identity/v3/access_rule.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class AccessRule(resource.Resource): + resource_key = 'access_rule' + resources_key = 'access_rules' + base_path = '/users/%(user_id)s/access_rules' + + # capabilities + allow_fetch = True + allow_delete = True + allow_list = True + + # Properties + #: The links for the access rule resource. + links = resource.Body('links') + #: Method that application credential is permitted to use. + # *Type: string* + method = resource.Body('method') + #: Path that the application credential is permitted to access. + # *Type: string* + path = resource.Body('path') + #: Service type identifier that application credential had access. + # *Type: string* + service = resource.Body('service') + #: User ID using access rule. *Type: string* + user_id = resource.URI('user_id') diff --git a/openstack/identity/v3/application_credential.py b/openstack/identity/v3/application_credential.py new file mode 100644 index 0000000000..8548aeedc8 --- /dev/null +++ b/openstack/identity/v3/application_credential.py @@ -0,0 +1,51 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ApplicationCredential(resource.Resource): + resource_key = 'application_credential' + resources_key = 'application_credentials' + base_path = '/users/%(user_id)s/application_credentials' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Properties + #: User ID using application credential. *Type: string* + user_id = resource.URI('user_id') + #: User object using application credential. *Type: string* + user = resource.Body('user') + #: The links for the application credential resource. + links = resource.Body('links') + #: name of the user. *Type: string* + name = resource.Body('name') + #: secret that application credential will be created with, if any. + # *Type: string* + secret = resource.Body('secret') + #: description of application credential's purpose. *Type: string* + description = resource.Body('description') + #: expire time of application credential. *Type: string* + expires_at = resource.Body('expires_at') + #: roles of the user. *Type: list* + roles = resource.Body('roles') + #: restricts the application credential. *Type: boolean* + unrestricted = resource.Body('unrestricted', type=bool) + #: ID of project. *Type: string* + project_id = resource.Body('project_id') + #: access rules for application credential. *Type: list* + access_rules = resource.Body('access_rules') diff --git a/openstack/identity/v3/credential.py b/openstack/identity/v3/credential.py index e492099791..48a38b827a 100644 --- a/openstack/identity/v3/credential.py +++ b/openstack/identity/v3/credential.py @@ -10,23 +10,26 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service -from openstack import resource2 as resource +from openstack import resource class Credential(resource.Resource): resource_key = 'credential' resources_key = 'credentials' base_path = '/credentials' - service = identity_service.IdentityService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - patch_update = True + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'type', + 'user_id', + ) # Properties #: Arbitrary blob of the credential data, to be parsed according to the diff --git a/openstack/identity/v3/domain.py b/openstack/identity/v3/domain.py index b7f330ad92..2f443b165c 100644 --- a/openstack/identity/v3/domain.py +++ b/openstack/identity/v3/domain.py @@ -10,23 +10,29 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service -from openstack import resource2 as resource +from openstack.common import tag +from openstack import resource +from openstack import utils -class Domain(resource.Resource): +class Domain(resource.Resource, tag.TagMixin): resource_key = 'domain' resources_key = 'domains' base_path = '/domains' - service = identity_service.IdentityService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - patch_update = True + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'name', + is_enabled='enabled', + **tag.TagMixin._tag_query_parameters, + ) # Properties #: The description of this domain. *Type: string* @@ -39,7 +45,102 @@ class Domain(resource.Resource): #: Re-enabling a domain does not re-enable pre-existing tokens. #: *Type: bool* is_enabled = resource.Body('enabled', type=bool) - #: The globally unique name of this domain. *Type: string* - name = resource.Body('name') + #: The resource options for the project. Available resource options are + #: immutable. + options = resource.Body('options', type=dict) #: The links related to the domain resource. links = resource.Body('links') + + def assign_role_to_user(self, session, user, role, inherited): + """Assign role to user on domain""" + url = utils.urljoin( + self.base_path, + self.id, + 'users', + user.id, + 'roles', + role.id, + ) + if inherited: + url = utils.urljoin('OS-INHERIT', url, 'inherited_to_projects') + resp = session.put( + url, + ) + if resp.status_code == 204: + return True + return False + + def validate_user_has_role(self, session, user, role, inherited): + """Validates that a user has a role on a domain""" + url = utils.urljoin( + self.base_path, self.id, 'users', user.id, 'roles', role.id + ) + if inherited: + url = utils.urljoin('OS-INHERIT', url, 'inherited_to_projects') + resp = session.head( + url, + ) + if resp.status_code == 204: + return True + return False + + def unassign_role_from_user(self, session, user, role, inherited): + """Unassigns a role from a user on a domain""" + url = utils.urljoin( + self.base_path, self.id, 'users', user.id, 'roles', role.id + ) + if inherited: + url = utils.urljoin('OS-INHERIT', url, 'inherited_to_projects') + resp = session.delete( + url, + ) + if resp.status_code == 204: + return True + return False + + def assign_role_to_group(self, session, group, role, inherited): + """Assign role to group on domain""" + url = utils.urljoin( + self.base_path, + self.id, + 'groups', + group.id, + 'roles', + role.id, + ) + if inherited: + url = utils.urljoin('OS-INHERIT', url, 'inherited_to_projects') + resp = session.put( + url, + ) + if resp.status_code == 204: + return True + return False + + def validate_group_has_role(self, session, group, role, inherited): + """Validates that a group has a role on a domain""" + url = utils.urljoin( + self.base_path, self.id, 'groups', group.id, 'roles', role.id + ) + if inherited: + url = utils.urljoin('OS-INHERIT', url, 'inherited_to_projects') + resp = session.head( + url, + ) + if resp.status_code == 204: + return True + return False + + def unassign_role_from_group(self, session, group, role, inherited): + """Unassigns a role from a group on a domain""" + url = utils.urljoin( + self.base_path, self.id, 'groups', group.id, 'roles', role.id + ) + if inherited: + url = utils.urljoin('OS-INHERIT', url, 'inherited_to_projects') + resp = session.delete( + url, + ) + if resp.status_code == 204: + return True + return False diff --git a/openstack/identity/v3/domain_config.py b/openstack/identity/v3/domain_config.py new file mode 100644 index 0000000000..ba95185d37 --- /dev/null +++ b/openstack/identity/v3/domain_config.py @@ -0,0 +1,47 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class DomainConfigLDAP(resource.Resource): + #: The base distinguished name (DN) of LDAP. + user_tree_dn = resource.Body('user_tree_dn') + #: The LDAP URL. + url = resource.Body('url') + + +class DomainConfigDriver(resource.Resource): + #: The Identity backend driver. + driver = resource.Body('driver') + + +class DomainConfig(resource.Resource): + resource_key = 'config' + base_path = '/domains/%(domain_id)s/config' + requires_id = False + create_requires_id = False + commit_method = 'PATCH' + create_method = 'PUT' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + + #: The domain ID. + domain_id = resource.URI('domain_id') + #: An identity object. + identity = resource.Body('identity', type=DomainConfigDriver) + #: The config object. + ldap = resource.Body('ldap', type=DomainConfigLDAP) diff --git a/openstack/identity/v3/endpoint.py b/openstack/identity/v3/endpoint.py index a088f7de89..0fb3fca9d4 100644 --- a/openstack/identity/v3/endpoint.py +++ b/openstack/identity/v3/endpoint.py @@ -10,23 +10,27 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service -from openstack import resource2 as resource +from openstack import resource class Endpoint(resource.Resource): resource_key = 'endpoint' resources_key = 'endpoints' base_path = '/endpoints' - service = identity_service.IdentityService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - patch_update = True + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'interface', + 'region_id', + 'service_id', + ) # Properties #: Describes the interface of the endpoint according to one of the @@ -53,3 +57,17 @@ class Endpoint(resource.Resource): service_id = resource.Body('service_id') #: Fully qualified URL of the service endpoint. *Type: string* url = resource.Body('url') + + +class ProjectEndpoint(Endpoint): + base_path = '/OS-EP-FILTER/projects/%(project_id)s/endpoints' + + #: The ID for the project from the URI of the resource + project_id = resource.URI('project_id') + + # capabilities + allow_create = False + allow_fetch = False + allow_commit = False + allow_delete = False + allow_list = True diff --git a/openstack/identity/v3/federation_protocol.py b/openstack/identity/v3/federation_protocol.py new file mode 100644 index 0000000000..d9fe6f1154 --- /dev/null +++ b/openstack/identity/v3/federation_protocol.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class FederationProtocol(resource.Resource): + resource_key = 'protocol' + resources_key = 'protocols' + base_path = '/OS-FEDERATION/identity_providers/%(idp_id)s/protocols' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + create_exclude_id_from_body = True + create_method = 'PUT' + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'id', + ) + + # Properties + #: name of the protocol (read only) *Type: string* + name = resource.Body('id') + #: The ID of the identity provider the protocol is attached to. + # *Type: string* + idp_id = resource.URI('idp_id') + #: The definition of the protocol + # *Type: dict* + mapping_id = resource.Body('mapping_id') diff --git a/openstack/identity/v3/group.py b/openstack/identity/v3/group.py index b47c8cfd2b..5e1600b942 100644 --- a/openstack/identity/v3/group.py +++ b/openstack/identity/v3/group.py @@ -10,23 +10,28 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service -from openstack import resource2 as resource +from openstack import exceptions +from openstack import resource +from openstack import utils class Group(resource.Resource): resource_key = 'group' resources_key = 'groups' base_path = '/groups' - service = identity_service.IdentityService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - patch_update = True + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'domain_id', + 'name', + ) # Properties #: The description of this group. *Type: string* @@ -38,3 +43,48 @@ class Group(resource.Resource): domain_id = resource.Body('domain_id') #: Unique group name, within the owning domain. *Type: string* name = resource.Body('name') + + def add_user(self, session, user): + """Add user to the group""" + url = utils.urljoin(self.base_path, self.id, 'users', user.id) + resp = session.put( + url, + ) + exceptions.raise_from_response(resp) + + def remove_user(self, session, user): + """Remove user from the group""" + url = utils.urljoin(self.base_path, self.id, 'users', user.id) + resp = session.delete( + url, + ) + exceptions.raise_from_response(resp) + + def check_user(self, session, user): + """Check whether user belongs to group""" + url = utils.urljoin(self.base_path, self.id, 'users', user.id) + resp = session.head( + url, + ) + if resp.status_code == 404: + # If we recieve 404 - treat this as False, + # rather then returning exception + return False + exceptions.raise_from_response(resp) + if resp.status_code == 204: + return True + return False + + +class UserGroup(Group): + base_path = '/users/%(user_id)s/groups' + + #: The ID for the user from the URI of the resource + user_id = resource.URI('user_id') + + # capabilities + allow_create = False + allow_fetch = False + allow_commit = False + allow_delete = False + allow_list = True diff --git a/openstack/identity/v3/identity_provider.py b/openstack/identity/v3/identity_provider.py new file mode 100644 index 0000000000..0e57afa596 --- /dev/null +++ b/openstack/identity/v3/identity_provider.py @@ -0,0 +1,50 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class IdentityProvider(resource.Resource): + resource_key = 'identity_provider' + resources_key = 'identity_providers' + base_path = '/OS-FEDERATION/identity_providers' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + create_method = 'PUT' + create_exclude_id_from_body = True + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'id', + is_enabled='enabled', + ) + + # Properties + #: The id of a domain associated with this identity provider. + # *Type: string* + domain_id = resource.Body('domain_id') + #: A description of this identity provider. *Type: string* + description = resource.Body('description') + #: If the identity provider is currently enabled. *Type: bool* + is_enabled = resource.Body('enabled', type=bool) + #: Remote IDs associated with the identity provider. *Type: list* + remote_ids = resource.Body('remote_ids', type=list) + #: The length of validity in minutes for group memberships. *Type: int* + authorization_ttl = resource.Body('authorization_ttl', type=int) + + #: The identifier of the identity provider (read only). *Type: string* + name = resource.Body('id') diff --git a/openstack/identity/v3/limit.py b/openstack/identity/v3/limit.py new file mode 100644 index 0000000000..891aed2b0d --- /dev/null +++ b/openstack/identity/v3/limit.py @@ -0,0 +1,153 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource + + +class Limit(resource.Resource): + resource_key = 'limit' + resources_key = 'limits' + base_path = '/limits' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + commit_method = 'PATCH' + commit_jsonpatch = True + + _query_mapping = resource.QueryParameters( + 'service_id', 'region_id', 'resource_name', 'project_id' + ) + + # Properties + #: User-facing description of the registered_limit. *Type: string* + description = resource.Body('description') + #: The links for the registered_limit resource. + links = resource.Body('links') + #: ID of service. *Type: string* + service_id = resource.Body('service_id') + #: ID of region, if any. *Type: string* + region_id = resource.Body('region_id') + #: The resource name. *Type: string* + resource_name = resource.Body('resource_name') + #: The resource limit value. *Type: int* + resource_limit = resource.Body('resource_limit') + #: ID of project. *Type: string* + project_id = resource.Body('project_id') + + def create( + self, + session, + prepend_key=True, + base_path=None, + *, + resource_request_key=None, + resource_response_key='limits', + microversion=None, + **params, + ): + return super().create( + session, + prepend_key=prepend_key, + base_path=base_path, + resource_request_key=resource_request_key, + resource_response_key=resource_response_key, + microversion=microversion, + **params, + ) + + def _prepare_request_body( + self, + patch, + prepend_key, + *, + resource_request_key=None, + ): + body = self._body.dirty + if prepend_key and self.resource_key is not None: + if patch: + body = {self.resource_key: body} + else: + # Keystone support bunch create for unified limit. So the + # request body for creating limit is a list instead of dict. + body = {self.resources_key: [body]} + return body + + def _translate_response( + self, + response, + has_body=None, + error_message=None, + *, + resource_response_key=None, + ): + """Given a KSA response, inflate this instance with its data + + DELETE operations don't return a body, so only try to work + with a body when has_body is True. + + This method updates attributes that correspond to headers + and body on this instance and clears the dirty set. + """ + if has_body is None: + has_body = self.has_body + + exceptions.raise_from_response(response, error_message=error_message) + + if has_body: + try: + body = response.json() + if resource_response_key and resource_response_key in body: + body = body[resource_response_key] + elif self.resource_key and self.resource_key in body: + body = body[self.resource_key] + + # Keystone support bunch create for unified limit. So the + # response body for creating limit is a list instead of dict. + if isinstance(body, list): + body = body[0] + + # Do not allow keys called "self" through. Glance chose + # to name a key "self", so we need to pop it out because + # we can't send it through cls.existing and into the + # Resource initializer. "self" is already the first + # argument and is practically a reserved word. + body.pop("self", None) + + body_attrs = self._consume_body_attrs(body) + if self._allow_unknown_attrs_in_body: + body_attrs.update(body) + self._unknown_attrs_in_body.update(body) + elif self._store_unknown_attrs_as_properties: + body_attrs = self._pack_attrs_under_properties( + body_attrs, body + ) + + self._body.attributes.update(body_attrs) + self._body.clean() + if self.commit_jsonpatch or self.allow_patch: + # We need the original body to compare against + self._original_body = body_attrs.copy() + except ValueError: + # Server returned not parse-able response (202, 204, etc) + # Do simply nothing + pass + + headers = self._consume_header_attrs(response.headers) + self._header.attributes.update(headers) + self._header.clean() + self._update_location() + dict.update(self, self.to_dict()) diff --git a/openstack/identity/v3/mapping.py b/openstack/identity/v3/mapping.py new file mode 100644 index 0000000000..8178fd53e1 --- /dev/null +++ b/openstack/identity/v3/mapping.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Mapping(resource.Resource): + resource_key = 'mapping' + resources_key = 'mappings' + base_path = '/OS-FEDERATION/mappings' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + create_method = 'PUT' + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters() + + # Properties + #: The rules of this mapping. *Type: list* + rules = resource.Body('rules', type=list) + #: The attribute mapping schema version. *Type: string* + schema_version = resource.Body('schema_version', type=str) + + #: The identifier of the mapping. *Type: string* + name = resource.Body('id') diff --git a/openstack/identity/v3/policy.py b/openstack/identity/v3/policy.py index bd814d5ba6..35d3bbcb58 100644 --- a/openstack/identity/v3/policy.py +++ b/openstack/identity/v3/policy.py @@ -10,23 +10,21 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service -from openstack import resource2 as resource +from openstack import resource class Policy(resource.Resource): resource_key = 'policy' resources_key = 'policies' base_path = '/policies' - service = identity_service.IdentityService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - patch_update = True + commit_method = 'PATCH' # Properties #: The policy rule set itself, as a serialized blob. *Type: string* diff --git a/openstack/identity/v3/project.py b/openstack/identity/v3/project.py index 439962a32c..bb56d4c428 100644 --- a/openstack/identity/v3/project.py +++ b/openstack/identity/v3/project.py @@ -10,23 +10,35 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service -from openstack import resource2 as resource +from openstack.common import tag +from openstack import exceptions +from openstack import resource +from openstack import utils -class Project(resource.Resource): +class Project(resource.Resource, tag.TagMixin): resource_key = 'project' resources_key = 'projects' base_path = '/projects' - service = identity_service.IdentityService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - patch_update = True + commit_method = 'PATCH' + + _allow_unknown_attrs_in_body = True + + _query_mapping = resource.QueryParameters( + 'domain_id', + 'is_domain', + 'name', + 'parent_id', + is_enabled='enabled', + **tag.TagMixin._tag_query_parameters, + ) # Properties #: The description of the project. *Type: string* @@ -45,8 +57,165 @@ class Project(resource.Resource): #: for the project are immediately invalidated. Re-enabling a project #: does not re-enable pre-existing tokens. *Type: bool* is_enabled = resource.Body('enabled', type=bool) - #: Unique project name, within the owning domain. *Type: string* - name = resource.Body('name') + #: The resource options for the project. Available resource options are + #: immutable. + options = resource.Body('options', type=dict) #: The ID of the parent of the project. #: New in version 3.4 parent_id = resource.Body('parent_id') + #: The links related to the project resource. + links = resource.Body('links') + + def assign_role_to_user(self, session, user, role, inherited): + """Assign role to user on project""" + url = utils.urljoin( + self.base_path, + self.id, + 'users', + user.id, + 'roles', + role.id, + ) + if inherited: + url = utils.urljoin('OS-INHERIT', url, 'inherited_to_projects') + resp = session.put( + url, + ) + if resp.status_code == 204: + return True + return False + + def validate_user_has_role(self, session, user, role, inherited): + """Validates that a user has a role on a project""" + url = utils.urljoin( + self.base_path, self.id, 'users', user.id, 'roles', role.id + ) + if inherited: + url = utils.urljoin('OS-INHERIT', url, 'inherited_to_projects') + resp = session.head( + url, + ) + if resp.status_code == 204: + return True + return False + + def unassign_role_from_user(self, session, user, role, inherited): + """Unassigns a role from a user on a project""" + url = utils.urljoin( + self.base_path, self.id, 'users', user.id, 'roles', role.id + ) + if inherited: + url = utils.urljoin('OS-INHERIT', url, 'inherited_to_projects') + resp = session.delete( + url, + ) + if resp.status_code == 204: + return True + return False + + def assign_role_to_group(self, session, group, role, inherited): + """Assign role to group on project""" + url = utils.urljoin( + self.base_path, + self.id, + 'groups', + group.id, + 'roles', + role.id, + ) + if inherited: + url = utils.urljoin('OS-INHERIT', url, 'inherited_to_projects') + resp = session.put( + url, + ) + if resp.status_code == 204: + return True + return False + + def validate_group_has_role(self, session, group, role, inherited): + """Validates that a group has a role on a project""" + url = utils.urljoin( + self.base_path, self.id, 'groups', group.id, 'roles', role.id + ) + if inherited: + url = utils.urljoin('OS-INHERIT', url, 'inherited_to_projects') + resp = session.head( + url, + ) + if resp.status_code == 204: + return True + return False + + def unassign_role_from_group(self, session, group, role, inherited): + """Unassigns a role from a group on a project""" + url = utils.urljoin( + self.base_path, self.id, 'groups', group.id, 'roles', role.id + ) + if inherited: + url = utils.urljoin('OS-INHERIT', url, 'inherited_to_projects') + resp = session.delete( + url, + ) + if resp.status_code == 204: + return True + return False + + def associate_endpoint(self, session, endpoint_id): + """Associate endpoint with project. + + :param session: The session to use for making this request. + :param endpoint_id: The ID of an endpoint. + :returns: None + """ + url = utils.urljoin( + '/OS-EP-FILTER/projects', + self.id, + 'endpoints', + endpoint_id, + ) + response = session.put(url) + exceptions.raise_from_response(response) + + def disassociate_endpoint(self, session, endpoint_id): + """Disassociate endpoint from project. + + :param session: The session to use for making this request. + :param endpoint_id: The ID of an endpoint. + :returns: None + """ + url = utils.urljoin( + '/OS-EP-FILTER/projects', + self.id, + 'endpoints', + endpoint_id, + ) + response = session.delete(url) + exceptions.raise_from_response(response) + + +class UserProject(Project): + base_path = '/users/%(user_id)s/projects' + + #: The ID for the user from the URI of the resource + user_id = resource.URI('user_id') + + # capabilities + allow_create = False + allow_fetch = False + allow_commit = False + allow_delete = False + allow_list = True + + +class EndpointProject(Project): + base_path = '/OS-EP-FILTER/endpoints/%(endpoint_id)s/projects' + + #: The ID for the endpoint from the URI of the resource + endpoint_id = resource.URI('endpoint_id') + + # capabilities + allow_create = False + allow_fetch = False + allow_commit = False + allow_delete = False + allow_list = True diff --git a/openstack/identity/v3/region.py b/openstack/identity/v3/region.py index 1fdd1c20cd..33db63c8fc 100644 --- a/openstack/identity/v3/region.py +++ b/openstack/identity/v3/region.py @@ -10,23 +10,25 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service -from openstack import resource2 as resource +from openstack import resource class Region(resource.Resource): resource_key = 'region' resources_key = 'regions' base_path = '/regions' - service = identity_service.IdentityService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - patch_update = True + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'parent_region_id', + ) # Properties #: User-facing description of the region. *Type: string* diff --git a/openstack/identity/v3/registered_limit.py b/openstack/identity/v3/registered_limit.py new file mode 100644 index 0000000000..db51b475d0 --- /dev/null +++ b/openstack/identity/v3/registered_limit.py @@ -0,0 +1,153 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource + + +class RegisteredLimit(resource.Resource): + resource_key = 'registered_limit' + resources_key = 'registered_limits' + base_path = '/registered_limits' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + commit_method = 'PATCH' + commit_jsonpatch = True + + _query_mapping = resource.QueryParameters( + 'service_id', 'region_id', 'resource_name' + ) + + # Properties + #: User-facing description of the registered_limit. *Type: string* + description = resource.Body('description') + #: The links for the registered_limit resource. + links = resource.Body('links') + #: ID of service. *Type: string* + service_id = resource.Body('service_id') + #: ID of region, if any. *Type: string* + region_id = resource.Body('region_id') + #: The resource name. *Type: string* + resource_name = resource.Body('resource_name') + #: The default limit value. *Type: int* + default_limit = resource.Body('default_limit') + + def create( + self, + session, + prepend_key=True, + base_path=None, + *, + resource_request_key=None, + resource_response_key='registered_limits', + microversion=None, + **params, + ): + return super().create( + session, + prepend_key=prepend_key, + base_path=base_path, + resource_request_key=resource_request_key, + resource_response_key=resource_response_key, + microversion=microversion, + **params, + ) + + def _prepare_request_body( + self, + patch, + prepend_key, + *, + resource_request_key=None, + ): + body = self._body.dirty + if prepend_key and self.resource_key is not None: + if patch: + body = {self.resource_key: body} + else: + # Keystone supports bunch create for registered limit. So the + # request body for creating registered_limit is a list instead + # of dict. + body = {self.resources_key: [body]} + return body + + def _translate_response( + self, + response, + has_body=None, + error_message=None, + *, + resource_response_key=None, + ): + """Given a KSA response, inflate this instance with its data + + DELETE operations don't return a body, so only try to work + with a body when has_body is True. + + This method updates attributes that correspond to headers + and body on this instance and clears the dirty set. + """ + if has_body is None: + has_body = self.has_body + + exceptions.raise_from_response(response, error_message=error_message) + + if has_body: + try: + body = response.json() + if resource_response_key and resource_response_key in body: + body = body[resource_response_key] + elif self.resource_key and self.resource_key in body: + body = body[self.resource_key] + + # Keystone supports bunch create for registered limit. So the + # response body for creating registered_limit is a list instead + # of dict. + if isinstance(body, list): + body = body[0] + + # Do not allow keys called "self" through. Glance chose + # to name a key "self", so we need to pop it out because + # we can't send it through cls.existing and into the + # Resource initializer. "self" is already the first + # argument and is practically a reserved word. + body.pop("self", None) + + body_attrs = self._consume_body_attrs(body) + if self._allow_unknown_attrs_in_body: + body_attrs.update(body) + self._unknown_attrs_in_body.update(body) + elif self._store_unknown_attrs_as_properties: + body_attrs = self._pack_attrs_under_properties( + body_attrs, body + ) + + self._body.attributes.update(body_attrs) + self._body.clean() + if self.commit_jsonpatch or self.allow_patch: + # We need the original body to compare against + self._original_body = body_attrs.copy() + except ValueError: + # Server returned not parse-able response (202, 204, etc) + # Do simply nothing + pass + + headers = self._consume_header_attrs(response.headers) + self._header.attributes.update(headers) + self._header.clean() + self._update_location() + dict.update(self, self.to_dict()) diff --git a/openstack/identity/v3/role.py b/openstack/identity/v3/role.py index 7c7d6b1a29..283847edcd 100644 --- a/openstack/identity/v3/role.py +++ b/openstack/identity/v3/role.py @@ -10,34 +10,33 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service -from openstack import resource2 as resource +from openstack import resource class Role(resource.Resource): resource_key = 'role' resources_key = 'roles' base_path = '/roles' - service = identity_service.IdentityService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - put_create = True + commit_method = 'PATCH' - _query_mapping = resource.QueryParameters( - 'name', 'domain_id') + _query_mapping = resource.QueryParameters('name', 'domain_id') # Properties - #: References the domain ID which owns the role; if a domain ID is not - #: specified by the client, the Identity service implementation will - #: default it to the domain ID to which the client's token is scoped. - #: *Type: string* - domain_id = resource.Body('domain_id') #: Unique role name, within the owning domain. *Type: string* name = resource.Body('name') + #: User-facing description of the role. *Type: string* + description = resource.Body('description') + #: References the domain ID which owns the role. *Type: string* + domain_id = resource.Body('domain_id') + #: The resource options for the role. Available resource options are + #: immutable. + options = resource.Body('options', type=dict) #: The links for the service resource. links = resource.Body('links') diff --git a/openstack/identity/v3/role_assignment.py b/openstack/identity/v3/role_assignment.py new file mode 100644 index 0000000000..611cc7e8c9 --- /dev/null +++ b/openstack/identity/v3/role_assignment.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class RoleAssignment(resource.Resource): + resource_key = 'role_assignment' + resources_key = 'role_assignments' + base_path = '/role_assignments' + + # capabilities + allow_list = True + + _query_mapping = resource.QueryParameters( + 'group_id', + 'role_id', + 'scope_domain_id', + 'scope_project_id', + 'user_id', + 'effective', + 'include_names', + 'include_subtree', + role_id='role.id', + user_id='user.id', + group_id='group.id', + scope_project_id='scope.project.id', + scope_domain_id='scope.domain.id', + scope_system='scope.system', + inherited_to='scope.OS-INHERIT:inherited_to', + ) + + # Properties + #: The links for the service resource. + links = resource.Body('links') + #: The role (dictionary contains only id) + role = resource.Body('role', type=dict) + #: The scope (either domain or project; dictionary contains only id) + scope = resource.Body('scope', type=dict) + #: The user (dictionary contains only id) + user = resource.Body('user', type=dict) + #: The group (dictionary contains only id) + group = resource.Body('group', type=dict) diff --git a/openstack/identity/v3/role_domain_group_assignment.py b/openstack/identity/v3/role_domain_group_assignment.py new file mode 100644 index 0000000000..e65ef8c9ed --- /dev/null +++ b/openstack/identity/v3/role_domain_group_assignment.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class RoleDomainGroupAssignment(resource.Resource): + resource_key = 'role' + resources_key = 'roles' + base_path = '/domains/%(domain_id)s/groups/%(group_id)s/roles' + + # capabilities + allow_list = True + + # Properties + #: name of the role *Type: string* + name = resource.Body('name') + #: The links for the service resource. + links = resource.Body('links') + #: The ID of the domain to list assignment from. *Type: string* + domain_id = resource.URI('domain_id') + #: The ID of the group to list assignment from. *Type: string* + group_id = resource.URI('group_id') diff --git a/openstack/identity/v3/role_domain_user_assignment.py b/openstack/identity/v3/role_domain_user_assignment.py new file mode 100644 index 0000000000..242932cabe --- /dev/null +++ b/openstack/identity/v3/role_domain_user_assignment.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class RoleDomainUserAssignment(resource.Resource): + resource_key = 'role' + resources_key = 'roles' + base_path = '/domains/%(domain_id)s/users/%(user_id)s/roles' + + # capabilities + allow_list = True + + # Properties + #: name of the role *Type: string* + name = resource.Body('name') + #: The links for the service resource. + links = resource.Body('links') + #: The ID of the domain to list assignment from. *Type: string* + domain_id = resource.URI('domain_id') + #: The ID of the user to list assignment from. *Type: string* + user_id = resource.URI('user_id') diff --git a/openstack/identity/v3/role_project_group_assignment.py b/openstack/identity/v3/role_project_group_assignment.py new file mode 100644 index 0000000000..95f2f18633 --- /dev/null +++ b/openstack/identity/v3/role_project_group_assignment.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class RoleProjectGroupAssignment(resource.Resource): + resource_key = 'role' + resources_key = 'roles' + base_path = '/projects/%(project_id)s/groups/%(group_id)s/roles' + + # capabilities + allow_list = True + + # Properties + #: name of the role *Type: string* + name = resource.Body('name') + #: The links for the service resource. + links = resource.Body('links') + #: The ID of the project to list assignment from. *Type: string* + project_id = resource.URI('project_id') + #: The ID of the group to list assignment from. *Type: string* + group_id = resource.URI('group_id') diff --git a/openstack/identity/v3/role_project_user_assignment.py b/openstack/identity/v3/role_project_user_assignment.py new file mode 100644 index 0000000000..c5c99bdf2d --- /dev/null +++ b/openstack/identity/v3/role_project_user_assignment.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class RoleProjectUserAssignment(resource.Resource): + resource_key = 'role' + resources_key = 'roles' + base_path = '/projects/%(project_id)s/users/%(user_id)s/roles' + + # capabilities + allow_list = True + + # Properties + #: name of the role *Type: string* + name = resource.Body('name') + #: The links for the service resource. + links = resource.Body('links') + #: The ID of the project to list assignment from. *Type: string* + project_id = resource.URI('project_id') + #: The ID of the user to list assignment from. *Type: string* + user_id = resource.URI('user_id') diff --git a/openstack/identity/v3/role_system_group_assignment.py b/openstack/identity/v3/role_system_group_assignment.py new file mode 100644 index 0000000000..a9dd03577c --- /dev/null +++ b/openstack/identity/v3/role_system_group_assignment.py @@ -0,0 +1,28 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class RoleSystemGroupAssignment(resource.Resource): + resource_key = 'role' + resources_key = 'roles' + base_path = '/system/groups/%(group_id)s/roles' + + # capabilities + allow_list = True + + # Properties + #: The ID of the group to list assignment from. *Type: string* + group_id = resource.URI('group_id') + #: The name of the system to list assignment from. *Type: string* + system_id = resource.URI('system_id') diff --git a/openstack/identity/v3/role_system_user_assignment.py b/openstack/identity/v3/role_system_user_assignment.py new file mode 100644 index 0000000000..e11781daac --- /dev/null +++ b/openstack/identity/v3/role_system_user_assignment.py @@ -0,0 +1,28 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class RoleSystemUserAssignment(resource.Resource): + resource_key = 'role' + resources_key = 'roles' + base_path = '/system/users/%(user_id)s/roles' + + # capabilities + allow_list = True + + # Properties + #: The name of the system to list assignment from. *Type: string* + system_id = resource.URI('system_id') + #: The ID of the user to list assignment from. *Type: string* + user_id = resource.URI('user_id') diff --git a/openstack/identity/v3/service.py b/openstack/identity/v3/service.py index 4eb7c13caa..d7a4a4bbf3 100644 --- a/openstack/identity/v3/service.py +++ b/openstack/identity/v3/service.py @@ -10,23 +10,26 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service -from openstack import resource2 as resource +from openstack import resource class Service(resource.Resource): resource_key = 'service' resources_key = 'services' base_path = '/services' - service = identity_service.IdentityService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - patch_update = True + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'name', + 'type', + ) # Properties #: User-facing description of the service. *Type: string* diff --git a/openstack/identity/v3/service_provider.py b/openstack/identity/v3/service_provider.py new file mode 100644 index 0000000000..7185a08070 --- /dev/null +++ b/openstack/identity/v3/service_provider.py @@ -0,0 +1,48 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ServiceProvider(resource.Resource): + resource_key = 'service_provider' + resources_key = 'service_providers' + base_path = '/OS-FEDERATION/service_providers' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + create_method = 'PUT' + create_exclude_id_from_body = True + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'id', + is_enabled='enabled', + ) + + # Properties + #: The URL to authenticate against. + auth_url = resource.Body('auth_url') + #: A description of this service provider. + description = resource.Body('description') + #: If the service provider is currently enabled. + is_enabled = resource.Body('enabled', type=bool) + #: The identifier of the service provider. + name = resource.Body('id') + #: The prefix of the RelayState SAML attribute. + relay_state_prefix = resource.Body('relay_state_prefix') + #: The service provider's URL. + sp_url = resource.Body('sp_url') diff --git a/openstack/identity/v3/system.py b/openstack/identity/v3/system.py new file mode 100644 index 0000000000..70652202b6 --- /dev/null +++ b/openstack/identity/v3/system.py @@ -0,0 +1,87 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource +from openstack import utils + + +class System(resource.Resource): + resource_key = 'system' + base_path = '/system' + + # capabilities + + def assign_role_to_user(self, session, user, role): + """Assign role to user on system""" + url = utils.urljoin(self.base_path, 'users', user.id, 'roles', role.id) + resp = session.put( + url, + ) + if resp.status_code == 204: + return True + return False + + def validate_user_has_role(self, session, user, role): + """Validates that a user has a role on a system""" + url = utils.urljoin(self.base_path, 'users', user.id, 'roles', role.id) + resp = session.head( + url, + ) + if resp.status_code == 204: + return True + return False + + def unassign_role_from_user(self, session, user, role): + """Unassigns a role from a user on a system""" + url = utils.urljoin(self.base_path, 'users', user.id, 'roles', role.id) + resp = session.delete( + url, + ) + if resp.status_code == 204: + return True + return False + + def assign_role_to_group(self, session, group, role): + """Assign role to group on system""" + url = utils.urljoin( + self.base_path, 'groups', group.id, 'roles', role.id + ) + resp = session.put( + url, + ) + if resp.status_code == 204: + return True + return False + + def validate_group_has_role(self, session, group, role): + """Validates that a group has a role on a system""" + url = utils.urljoin( + self.base_path, 'groups', group.id, 'roles', role.id + ) + resp = session.head( + url, + ) + if resp.status_code == 204: + return True + return False + + def unassign_role_from_group(self, session, group, role): + """Unassigns a role from a group on a system""" + url = utils.urljoin( + self.base_path, 'groups', group.id, 'roles', role.id + ) + resp = session.delete( + url, + ) + if resp.status_code == 204: + return True + return False diff --git a/openstack/identity/v3/token.py b/openstack/identity/v3/token.py new file mode 100644 index 0000000000..54518858e3 --- /dev/null +++ b/openstack/identity/v3/token.py @@ -0,0 +1,115 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from keystoneauth1 import adapter + +from openstack import exceptions +from openstack import resource + + +class Token(resource.Resource): + resource_key = 'token' + base_path = '/auth/tokens' + + # capabilities + allow_fetch = False + allow_delete = False + allow_list = False + allow_head = False + + # Properties + #: An authentication token. This is used rather than X-Auth-Token to allow + #: users check or revoke a token other than their own. + subject_token = resource.Header('x-subject-token') + + #: A list of one or two audit IDs. An audit ID is a unique, randomly + #: generated, URL-safe string that you can use to track a token. The first + #: audit ID is the current audit ID for the token. The second audit ID is + #: present for only re-scoped tokens and is the audit ID from the token + #: before it was re-scoped. A re- scoped token is one that was exchanged + #: for another token of the same or different scope. You can use these + #: audit IDs to track the use of a token or chain of tokens across multiple + #: requests and endpoints without exposing the token ID to non-privileged + #: users. + audit_ids = resource.Body('audit_ids', type=list) + #: The service catalog. + catalog = resource.Body('catalog', type=list, list_type=dict) + #: The date and time when the token expires. + expires_at = resource.Body('expires_at') + #: The date and time when the token was issued. + issued_at = resource.Body('issued_at') + #: The authentication method. + methods = resource.Body('methods', type=list) + #: The user that owns the token. + user = resource.Body('user', type=dict) + #: The project that the token is scoped to, if any. + project = resource.Body('project', type=dict) + #: The domain that the token is scoped to, if any. + domain = resource.Body('domain', type=dict) + #: Whether the project, if set, is acting as a domain. + is_domain = resource.Body('is_domain', type=bool) + #: The parts of the system the token is scoped to, if system-scoped. + system = resource.Body('system', type=dict) + #: The roles associated with the user. + roles = resource.Body('roles', type=list, list_type=dict) + + @classmethod + def validate( + cls, + session: adapter.Adapter, + token: str, + *, + nocatalog: bool = False, + allow_expired: bool = False, + ) -> 'Token': + path = cls.base_path + + params: dict[str, bool] = {} + if nocatalog: + params['nocatalog'] = nocatalog + if allow_expired: + params['allow_expired'] = allow_expired + + response = session.get( + path, headers={'x-subject-token': token}, params=params + ) + exceptions.raise_from_response(response) + + ret = cls() + ret._translate_response( + response, resource_response_key=cls.resource_key + ) + return ret + + @classmethod + def check( + cls, + session: adapter.Adapter, + token: str, + *, + allow_expired: bool = False, + ) -> bool: + params: dict[str, bool] = {} + if allow_expired: + params['allow_expired'] = allow_expired + + response = session.head( + cls.base_path, headers={'x-subject-token': token}, params=params + ) + return response.status_code == 200 + + @classmethod + def revoke(cls, session: adapter.Adapter, token: str) -> None: + response = session.delete( + cls.base_path, headers={'x-subject-token': token} + ) + exceptions.raise_from_response(response) diff --git a/openstack/identity/v3/trust.py b/openstack/identity/v3/trust.py index 5141e75147..d8f7347513 100644 --- a/openstack/identity/v3/trust.py +++ b/openstack/identity/v3/trust.py @@ -11,32 +11,28 @@ # under the License. -from openstack.identity import identity_service -from openstack import resource2 as resource +from openstack import resource class Trust(resource.Resource): resource_key = 'trust' resources_key = 'trusts' base_path = '/OS-TRUST/trusts' - service = identity_service.IdentityService() # capabilities allow_create = True - allow_get = True + allow_fetch = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'trustor_user_id', 'trustee_user_id') + 'trustor_user_id', 'trustee_user_id' + ) # Properties #: A boolean indicating whether the trust can be issued by the trustee as #: a regulart trust. Default is ``False``. allow_redelegation = resource.Body('allow_redelegation', type=bool) - #: If ``impersonation`` is set to ``False``, then the token's ``user`` - #: attribute will represent that of the trustee. *Type: bool* - is_impersonation = resource.Body('impersonation', type=bool) #: Specifies the expiration time of the trust. A trust may be revoked #: ahead of expiration. If the value represents a time in the past, #: the trust is deactivated. diff --git a/openstack/identity/v3/user.py b/openstack/identity/v3/user.py index 17f9808942..d05cb64aa3 100644 --- a/openstack/identity/v3/user.py +++ b/openstack/identity/v3/user.py @@ -10,23 +10,28 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service -from openstack import resource2 as resource +from openstack import resource class User(resource.Resource): resource_key = 'user' resources_key = 'users' base_path = '/users' - service = identity_service.IdentityService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - patch_update = True + commit_method = 'PATCH' + + _query_mapping = resource.QueryParameters( + 'domain_id', + 'name', + 'password_expires_at', + is_enabled='enabled', + ) # Properties #: References the user's default project ID against which to authorize, @@ -61,8 +66,10 @@ class User(resource.Resource): #: The default form of credential used during authentication. #: *Type: string* password = resource.Body('password') - #: The date and time when the pasword expires. The time zone is UTC. + #: The date and time when the password expires. The time zone is UTC. #: A None value means the password never expires. #: This is a response object attribute, not valid for requests. #: *New in version 3.7* password_expires_at = resource.Body('password_expires_at') + #: A dictionary of users extra options. + options = resource.Body('options', type=dict, default={}) diff --git a/openstack/identity/version.py b/openstack/identity/version.py index bfea2a3a2c..ceafc70a75 100644 --- a/openstack/identity/version.py +++ b/openstack/identity/version.py @@ -10,7 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.identity import identity_service +import typing as ty + +from keystoneauth1 import adapter +import typing_extensions as ty_ext + from openstack import resource @@ -18,22 +22,31 @@ class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' - service = identity_service.IdentityService( - version=identity_service.IdentityService.UNVERSIONED - ) # capabilities allow_list = True # Properties - media_types = resource.prop('media-types') - status = resource.prop('status') - updated = resource.prop('updated') + media_types = resource.Body('media-types') + status = resource.Body('status') + updated = resource.Body('updated') @classmethod - def list(cls, session, **params): - resp = session.get(cls.base_path, endpoint_filter=cls.service, - params=params) + def list( + cls, + session: adapter.Adapter, + paginated: bool = True, + base_path: str | None = None, + allow_unknown_params: bool = False, + *, + microversion: str | None = None, + headers: dict[str, str] | None = None, + **params: ty.Any, + ) -> ty.Generator[ty_ext.Self, None, None]: + if base_path is None: + base_path = cls.base_path + + resp = session.get(base_path, params=params) resp = resp.json() for data in resp[cls.resources_key]['values']: yield cls.existing(**data) diff --git a/openstack/image/_download.py b/openstack/image/_download.py new file mode 100644 index 0000000000..605128053b --- /dev/null +++ b/openstack/image/_download.py @@ -0,0 +1,172 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections.abc +import hashlib +import io +import typing as ty + +from openstack import exceptions +from openstack import utils + + +def _verify_checksum( + hasher: ty.Any, + expected_hash: str | None, + hash_algo: str | None = None, +) -> None: + """Verify checksum using the provided hasher. + + :param hasher: A hashlib hash object + :param expected_hash: The expected hexdigest value + :param hash_algo: Optional name of the hash algorithm for error messages + :raises: InvalidResponse if the hash doesn't match + """ + if expected_hash: + digest = hasher.hexdigest() + if digest != expected_hash: + algo_msg = f" ({hash_algo})" if hash_algo else "" + raise exceptions.InvalidResponse( + f"checksum mismatch{algo_msg}: {expected_hash} != {digest}" + ) + + +def _integrity_iter( + iterable: collections.abc.Iterable[bytes], + hasher: ty.Any, + expected_hash: str | None, + hash_algo: str | None, +) -> collections.abc.Iterator[bytes]: + """Check image data integrity + + :param iterable: Iterable containing the image data chunks + :param hasher: A hashlib hash object + :param expected_hash: The expected hexdigest value + :param hash_algo: The hash algorithm + :yields: Chunks of data while computing hash + :raises: InvalidResponse if the hash doesn't match + """ + for chunk in iterable: + hasher.update(chunk) + yield chunk + _verify_checksum(hasher, expected_hash, hash_algo) + + +def _write_chunks( + fd: io.IOBase, chunks: collections.abc.Iterable[bytes] +) -> None: + """Write chunks to file descriptor.""" + for chunk in chunks: + fd.write(chunk) + + +class DownloadMixin: + id: str + base_path: str + + def fetch( + self, + session, + requires_id=True, + base_path=None, + error_message=None, + skip_cache=False, + *, + resource_response_key=None, + microversion=None, + **params, + ): ... + + def download( + self, session, stream=False, output=None, chunk_size=1024 * 1024 + ): + """Download the data contained in an image. + + Checksum validation uses the hash algorithm metadata fields + (hash_value + hash_algo) if available, otherwise falls back to MD5 via + 'checksum' or 'Content-MD5'. No validation is performed if neither is + available. + """ + + # Fetch image metadata first to get hash info before downloading. + # This prevents race conditions and the need for a second conditional + # metadata retrieval if Content-MD5 is missing (story/1619675). + details = self.fetch(session) + meta_checksum = getattr(details, 'checksum', None) + meta_hash_value = getattr(details, 'hash_value', None) + meta_hash_algo = getattr(details, 'hash_algo', None) + + url = utils.urljoin(self.base_path, self.id, 'file') + resp = session.get(url, stream=stream) + + hasher = None + expected_hash = None + hash_algo = None + header_checksum = resp.headers.get("Content-MD5") + + if meta_hash_value and meta_hash_algo: + try: + hasher = hashlib.new(str(meta_hash_algo)) + expected_hash = meta_hash_value + hash_algo = meta_hash_algo + except ValueError as ve: + if not str(ve).startswith('unsupported hash type'): + raise exceptions.SDKException( + f"Unsupported hash algorithm '{meta_hash_algo}': {ve}" + ) + + # Fall back to MD5 from metadata or header + if not hasher: + md5_source = meta_checksum or header_checksum + if md5_source: + hasher = hashlib.md5(usedforsecurity=False) + expected_hash = md5_source + hash_algo = 'md5' + + if hasher is None: + session.log.warning( + "Unable to verify the integrity of image %s " + "- no hash available", + self.id, + ) + + if output: + try: + chunks = resp.iter_content(chunk_size=chunk_size) + if hasher is not None: + chunks = _integrity_iter( + chunks, hasher, expected_hash, hash_algo + ) + + if isinstance(output, io.IOBase): + _write_chunks(output, chunks) + else: + with open(output, 'wb') as fd: + _write_chunks(fd, chunks) + + return resp + except Exception as e: + raise exceptions.SDKException(f"Unable to download image: {e}") + + if stream: + # Set content-md5 header for backward compatibility with callers + # who expect hash info in the response when streaming + if hash_algo == 'md5' and expected_hash: + resp.headers['content-md5'] = expected_hash + return resp + + if hasher is not None: + # Loads entire image into memory! + hasher.update(resp.content) + _verify_checksum(hasher, expected_hash, hash_algo) + + return resp diff --git a/openstack/image/image_service.py b/openstack/image/image_service.py index 55c43fd5f9..7c18b43722 100644 --- a/openstack/image/image_service.py +++ b/openstack/image/image_service.py @@ -10,18 +10,17 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack import service_filter +from openstack.image.v1 import _proxy as _proxy_v1 +from openstack.image.v2 import _proxy as _proxy_v2 +from openstack import service_description -class ImageService(service_filter.ServiceFilter): +class ImageService( + service_description.ServiceDescription[_proxy_v1.Proxy | _proxy_v2.Proxy] +): """The image service.""" - valid_versions = [ - service_filter.ValidVersion('v2'), - service_filter.ValidVersion('v1') - ] - - def __init__(self, version=None): - """Create an image service.""" - super(ImageService, self).__init__(service_type='image', - version=version) + supported_versions = { + '1': _proxy_v1.Proxy, + '2': _proxy_v2.Proxy, + } diff --git a/openstack/image/image_signer.py b/openstack/image/image_signer.py new file mode 100644 index 0000000000..13f2765c37 --- /dev/null +++ b/openstack/image/image_signer.py @@ -0,0 +1,71 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from cryptography.hazmat.backends import default_backend +from cryptography.hazmat.primitives.asymmetric import padding +from cryptography.hazmat.primitives.asymmetric import utils +from cryptography.hazmat.primitives import hashes +from cryptography.hazmat.primitives import serialization + +from openstack import exceptions +from openstack.image.iterable_chunked_file import IterableChunkedFile + +HASH_METHODS = { + 'SHA-224': hashes.SHA224(), + 'SHA-256': hashes.SHA256(), + 'SHA-384': hashes.SHA384(), + 'SHA-512': hashes.SHA512(), +} + + +class ImageSigner: + """Image file signature generator. + + Generates signatures for files using a specified private key file. + """ + + def __init__(self, hash_method='SHA-256', padding_method='RSA-PSS'): + padding_types = { + 'RSA-PSS': padding.PSS( + mgf=padding.MGF1(HASH_METHODS[hash_method]), + salt_length=padding.PSS.MAX_LENGTH, + ) + } + # informational attributes + self.hash_method = hash_method + self.padding_method = padding_method + # runtime objects + self.private_key = None + self.hash = HASH_METHODS[hash_method] + self.hasher = hashes.Hash(self.hash, default_backend()) + self.padding = padding_types[padding_method] + + def load_private_key(self, file_path, password=None): + with open(file_path, 'rb') as key_file: + self.private_key = serialization.load_pem_private_key( + key_file.read(), password=password, backend=default_backend() + ) + + def generate_signature(self, file_obj): + if not self.private_key: + raise exceptions.SDKException("private_key not set") + + file_obj.seek(0) + chunked_file = IterableChunkedFile(file_obj) + for chunk in chunked_file: + self.hasher.update(chunk) + file_obj.seek(0) + digest = self.hasher.finalize() + signature = self.private_key.sign( + digest, self.padding, utils.Prehashed(self.hash) + ) + return signature diff --git a/openstack/image/iterable_chunked_file.py b/openstack/image/iterable_chunked_file.py new file mode 100644 index 0000000000..3b9d9569d8 --- /dev/null +++ b/openstack/image/iterable_chunked_file.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + + +class IterableChunkedFile: + """File object chunk iterator using yield. + + Represents a local file as an iterable object by splitting the file + into chunks. Avoids the file from being completely loaded into memory. + """ + + def __init__(self, file_object, chunk_size=1024 * 1024 * 128, close=False): + self.close_after_read = close + self.file_object = file_object + self.chunk_size = chunk_size + + def __iter__(self): + try: + while True: + data = self.file_object.read(self.chunk_size) + if not data: + break + yield data + finally: + if self.close_after_read: + self.file_object.close() + + def __len__(self): + return len(self.file_object) diff --git a/openstack/image/v1/_proxy.py b/openstack/image/v1/_proxy.py index aa4ba6fbee..1f8930c4bc 100644 --- a/openstack/image/v1/_proxy.py +++ b/openstack/image/v1/_proxy.py @@ -10,34 +10,326 @@ # License for the specific language governing permissions and limitations # under the License. +import os +import typing as ty +import warnings + +from openstack import exceptions as exc from openstack.image.v1 import image as _image from openstack import proxy +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + + +def _get_name_and_filename(name, image_format): + # See if name points to an existing file + if os.path.exists(name): + # Neat. Easy enough + return os.path.splitext(os.path.basename(name))[0], name + + # Try appending the disk format + name_with_ext = '.'.join((name, image_format)) + if os.path.exists(name_with_ext): + return os.path.basename(name), name_with_ext + + return name, None + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['1']] = '1' + + retriable_status_codes = [503] + + _IMAGE_MD5_KEY = 'owner_specified.openstack.md5' + _IMAGE_SHA256_KEY = 'owner_specified.openstack.sha256' + _IMAGE_OBJECT_KEY = 'owner_specified.openstack.object' + # NOTE(shade) shade keys were owner_specified.shade.md5 - we need to add + # those to freshness checks so that a shade->sdk transition + # doesn't result in a re-upload + _SHADE_IMAGE_MD5_KEY = 'owner_specified.shade.md5' + _SHADE_IMAGE_SHA256_KEY = 'owner_specified.shade.sha256' + _SHADE_IMAGE_OBJECT_KEY = 'owner_specified.shade.object' -class Proxy(proxy.BaseProxy): + # ====== IMAGES ====== + def create_image( + self, + name, + filename=None, + container=None, + md5=None, + sha256=None, + disk_format=None, + container_format=None, + disable_vendor_agent=True, + allow_duplicates=False, + meta=None, + data=None, + validate_checksum=False, + tags=None, + **kwargs, + ): + """Create an image and optionally upload data. + + Create a new image. If ``filename`` or ``data`` are provided, it will + also upload data to this image. + + :param str name: Name of the image to create. If it is a path name + of an image, the name will be constructed from the extensionless + basename of the path. + :param str filename: The path to the file to upload, if needed. + (optional, defaults to None) + :param data: Image data (string or file-like object). It is mutually + exclusive with filename + :param str container: Name of the container in swift where images + should be uploaded for import if the cloud requires such a thing. + (optional, defaults to 'images') + :param str md5: md5 sum of the image file. If not given, an md5 will + be calculated. + :param str sha256: sha256 sum of the image file. If not given, an md5 + will be calculated. + :param str disk_format: The disk format the image is in. (optional, + defaults to the os-client-config config value for this cloud) + :param str container_format: The container format the image is in. + (optional, defaults to the os-client-config config value for this + cloud) + :param list tags: List of tags for this image. Each tag is a string + of at most 255 chars. + :param bool disable_vendor_agent: Whether or not to append metadata + flags to the image to inform the cloud in question to not expect a + vendor agent to be runing. (optional, defaults to True) + :param allow_duplicates: If true, skips checks that enforce unique + image name. (optional, defaults to False) + :param meta: A dict of key/value pairs to use for metadata that + bypasses automatic type conversion. + :param bool validate_checksum: If true and cloud returns checksum, + compares return value with the one calculated or passed into this + call. If value does not match - raises exception. Default is + 'false' + + Additional kwargs will be passed to the image creation as additional + metadata for the image and will have all values converted to string + except for min_disk, min_ram, size and virtual_size which will be + converted to int. + + If you are sure you have all of your data types correct or have an + advanced need to be explicit, use meta. If you are just a normal + consumer, using kwargs is likely the right choice. + + If a value is in meta and kwargs, meta wins. + + :returns: The results of image creation + :rtype: :class:`~openstack.image.v1.image.Image` + :raises: SDKException if there are problems uploading + """ + # these were previously provided for API (method) compatibility; that + # was a bad idea + if ( + 'use_import' in kwargs + or 'stores' in kwargs + or 'all_stores' in kwargs + or 'all_stores_must_succeed' in kwargs + ): + raise exc.InvalidRequest( + "Glance v1 does not support stores or image import" + ) + + # silently ignore these; they were never supported and were only given + # for API (method) compatibility + kwargs.pop('wait') + kwargs.pop('timeout') + + if container is None: + container = self._connection._OBJECT_AUTOCREATE_CONTAINER + + if not meta: + meta = {} + + if not disk_format: + disk_format = self._connection.config.config['image_format'] + + if not container_format: + # https://docs.openstack.org/image-guide/image-formats.html + container_format = 'bare' + + if data and filename: + raise exc.SDKException( + 'Passing filename and data simultaneously is not supported' + ) + + # If there is no filename, see if name is actually the filename + if not filename and not data: + name, filename = _get_name_and_filename( + name, + self._connection.config.config['image_format'], + ) + + if validate_checksum and data and not isinstance(data, bytes): + raise exc.SDKException( + 'Validating checksum is not possible when data is not a ' + 'direct binary object' + ) + + if not (md5 or sha256) and validate_checksum: + if filename: + md5, sha256 = utils._get_file_hashes(filename) + elif data and isinstance(data, bytes): + md5, sha256 = utils._calculate_data_hashes(data) + + if allow_duplicates: + current_image = None + else: + current_image = self.find_image(name) + if current_image: + # NOTE(pas-ha) 'properties' may be absent or be None + props = current_image.get('properties') or {} + md5_key = props.get( + self._IMAGE_MD5_KEY, + props.get(self._SHADE_IMAGE_MD5_KEY, ''), + ) + sha256_key = props.get( + self._IMAGE_SHA256_KEY, + props.get(self._SHADE_IMAGE_SHA256_KEY, ''), + ) + up_to_date = utils._hashes_up_to_date( + md5=md5, + sha256=sha256, + md5_key=md5_key, + sha256_key=sha256_key, + ) + if up_to_date: + self.log.debug( + "image %(name)s exists and is up to date", + {'name': name}, + ) + return current_image + else: + self.log.debug( + "image %(name)s exists, but contains different " + "checksums. Updating.", + {'name': name}, + ) + + if disable_vendor_agent: + kwargs.update( + self._connection.config.config['disable_vendor_agent'] + ) + + # If a user used the v1 calling format, they will have + # passed a dict called properties along + properties = kwargs.pop('properties', {}) + properties[self._IMAGE_MD5_KEY] = md5 or '' + properties[self._IMAGE_SHA256_KEY] = sha256 or '' + properties[self._IMAGE_OBJECT_KEY] = '/'.join([container, name]) + kwargs.update(properties) + image_kwargs = {'properties': kwargs} + if disk_format: + image_kwargs['disk_format'] = disk_format + if container_format: + image_kwargs['container_format'] = container_format + if tags: + image_kwargs['tags'] = tags + + if filename or data: + image = self._upload_image( + name, + filename=filename, + data=data, + meta=meta, + validate_checksum=validate_checksum, + **image_kwargs, + ) + else: + image = self._create(_image.Image, name=name, **kwargs) + + return image def upload_image(self, **attrs): """Upload a new image from attributes + .. warning: + This method is deprecated - and also doesn't work very well. + Please stop using it immediately and switch to + `create_image`. + :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.image.v1.image.Image`, - comprised of the properties on the Image class. + a :class:`~openstack.image.v1.image.Image`, + comprised of the properties on the Image class. :returns: The results of image creation :rtype: :class:`~openstack.image.v1.image.Image` """ + warnings.warn( + "upload_image is deprecated. Use create_image instead.", + os_warnings.RemovedInSDK50Warning, + ) return self._create(_image.Image, **attrs) + def _upload_image( + self, + name, + filename, + data, + meta, + **image_kwargs, + ): + if filename and not data: + image_data = open(filename, 'rb') + else: + image_data = data + image_kwargs['properties'].update(meta) + image_kwargs['name'] = name + + # TODO(mordred) Convert this to use image Resource + image = self._connection._get_and_munchify( + 'image', self.post('/images', json=image_kwargs) + ) + checksum = image_kwargs['properties'].get(self._IMAGE_MD5_KEY, '') + + try: + # Let us all take a brief moment to be grateful that this + # is not actually how OpenStack APIs work anymore + headers = { + 'x-glance-registry-purge-props': 'false', + } + if checksum: + headers['x-image-meta-checksum'] = checksum + + image = self._connection._get_and_munchify( + 'image', + self.put( + f'/images/{image.id}', + headers=headers, + data=image_data, + ), + ) + except exc.HttpException: + self.log.debug("Deleting failed upload of image %s", name) + try: + self.delete(f'/images/{image.id}') + except exc.HttpException: + # We're just trying to clean up - if it doesn't work - shrug + self.log.warning( + "Failed deleting image after we failed uploading it.", + exc_info=True, + ) + raise + return image + + def _existing_image(self, **kwargs): + return _image.Image.existing(connection=self._connection, **kwargs) + def delete_image(self, image, ignore_missing=True): """Delete an image :param image: The value can be either the ID of an image or a - :class:`~openstack.image.v1.image.Image` instance. + :class:`~openstack.image.v1.image.Image` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the image does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent image. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the image does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent image. :returns: ``None`` """ @@ -48,47 +340,203 @@ def find_image(self, name_or_id, ignore_missing=True): :param name_or_id: The name or ID of a image. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.image.v1.image.Image` or None """ - return self._find(_image.Image, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _image.Image, name_or_id, ignore_missing=ignore_missing + ) def get_image(self, image): """Get a single image :param image: The value can be the ID of an image or a - :class:`~openstack.image.v1.image.Image` instance. + :class:`~openstack.image.v1.image.Image` instance. :returns: One :class:`~openstack.image.v1.image.Image` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_image.Image, image) def images(self, **query): """Return a generator of images - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of image objects :rtype: :class:`~openstack.image.v1.image.Image` """ - return self._list(_image.Image, paginated=True, **query) + return self._list(_image.Image, base_path='/images/detail', **query) def update_image(self, image, **attrs): """Update a image :param image: Either the ID of a image or a - :class:`~openstack.image.v1.image.Image` instance. - :attrs kwargs: The attributes to update on the image represented - by ``value``. + :class:`~openstack.image.v1.image.Image` instance. + :param attrs: The attributes to update on the image represented + by ``image``. :returns: The updated image :rtype: :class:`~openstack.image.v1.image.Image` """ return self._update(_image.Image, image, **attrs) + + def download_image( + self, + image, + stream=False, + output=None, + chunk_size=1024 * 1024, + ): + """Download an image + + This will download an image to memory when ``stream=False``, or allow + streaming downloads using an iterator when ``stream=True``. + For examples of working with streamed responses, see + :ref:`download_image-stream-true`. + + :param image: The value can be either the ID of an image or a + :class:`~openstack.image.v2.image.Image` instance. + :param bool stream: When ``True``, return a :class:`requests.Response` + instance allowing you to iterate over the + response data stream instead of storing its entire + contents in memory. See + :meth:`requests.Response.iter_content` for more + details. *NOTE*: If you do not consume + the entirety of the response you must explicitly + call :meth:`requests.Response.close` or otherwise + risk inefficiencies with the ``requests`` + library's handling of connections. + + When ``False``, return the entire contents of the response. + :param output: Either a file object or a path to store data into. + :param int chunk_size: size in bytes to read from the wire and buffer + at one time. Defaults to 1024 * 1024 = 1 MiB + + :returns: When output is not given - the bytes comprising the given + Image when stream is False, otherwise a :class:`requests.Response` + instance. When output is given - a + :class:`~openstack.image.v2.image.Image` instance. + """ + + image = self._get_resource(_image.Image, image) + + return image.download( + self, + stream=stream, + output=output, + chunk_size=chunk_size, + ) + + def _update_image_properties(self, image, meta, properties): + properties.update(meta) + img_props = {} + for k, v in iter(properties.items()): + if image.properties.get(k, None) != v: + img_props[f'x-image-meta-{k}'] = v + if not img_props: + return False + self.put(f'/images/{image.id}', headers=img_props) + return True + + def update_image_properties( + self, + image=None, + meta=None, + **kwargs, + ): + """ + Update the properties of an existing image. + + :param image: Name or id of an image or an Image object. + :param meta: A dict of key/value pairs to use for metadata that + bypasses automatic type conversion. + + Additional kwargs will be passed to the image creation as additional + metadata for the image and will have all values converted to string + except for min_disk, min_ram, size and virtual_size which will be + converted to int. + """ + + if isinstance(image, str): + image = self._connection.get_image(image) + + if not meta: + meta = {} + + img_props = {} + for k, v in iter(kwargs.items()): + if v and k in ['ramdisk', 'kernel']: + v = self._connection.get_image_id(v) + k = f'{k}_id' + img_props[k] = v + + return self._update_image_properties(image, meta, img_props) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/image/v1/image.py b/openstack/image/v1/image.py index bbe0f55df0..9868d0c341 100644 --- a/openstack/image/v1/image.py +++ b/openstack/image/v1/image.py @@ -10,64 +10,194 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.image import image_service +import typing as ty + +from keystoneauth1 import adapter +import typing_extensions as ty_ext + +from openstack import exceptions +from openstack.image import _download from openstack import resource -class Image(resource.Resource): +class Image(resource.Resource, _download.DownloadMixin): resource_key = 'image' resources_key = 'images' base_path = '/images' - service = image_service.ImageService() # capabilities allow_create = True - allow_retrieve = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True + # Store all unknown attributes under 'properties' in the object. + # Remotely they would be still in the resource root + _store_unknown_attrs_as_properties = True + + _query_mapping = resource.QueryParameters( + 'name', + 'container_format', + 'disk_format', + 'status', + 'size_min', + 'size_max', + ) + #: Hash of the image data used. The Image service uses this value #: for verification. - checksum = resource.prop('checksum') + checksum = resource.Body('checksum') #: The container format refers to whether the VM image is in a file #: format that also contains metadata about the actual VM. #: Container formats include OVF and Amazon AMI. In addition, #: a VM image might not have a container format - instead, #: the image is just a blob of unstructured data. - container_format = resource.prop('container_format') + container_format = resource.Body('container_format') #: A URL to copy an image from - copy_from = resource.prop('copy_from') + copy_from = resource.Body('copy_from') #: The timestamp when this image was created. - created_at = resource.prop('created_at') + created_at = resource.Body('created_at') #: Valid values are: aki, ari, ami, raw, iso, vhd, vdi, qcow2, or vmdk. #: The disk format of a VM image is the format of the underlying #: disk image. Virtual appliance vendors have different formats for #: laying out the information contained in a VM disk image. - disk_format = resource.prop('disk_format') + disk_format = resource.Body('disk_format') #: Defines whether the image can be deleted. #: *Type: bool* - is_protected = resource.prop('protected', type=bool) + is_protected = resource.Body('protected', type=bool) #: ``True`` if this is a public image. #: *Type: bool* - is_public = resource.prop('is_public', type=bool) + is_public = resource.Body('is_public', type=bool) #: A location for the image identified by a URI - location = resource.prop('location') + location = resource.Body('location') #: The minimum disk size in GB that is required to boot the image. - min_disk = resource.prop('min_disk') + min_disk = resource.Body('min_disk') #: The minimum amount of RAM in MB that is required to boot the image. - min_ram = resource.prop('min_ram') + min_ram = resource.Body('min_ram') #: Name for the image. Note that the name of an image is not unique #: to a Glance node. The API cannot expect users to know the names #: of images owned by others. - name = resource.prop('name') + name = resource.Body('name') #: The ID of the owner, or project, of the image. - owner_id = resource.prop('owner') + owner = resource.Body('owner', alias='owner_id') + #: The ID of the owner, or project, of the image. (backwards compat) + owner_id = resource.Body('owner', alias='owner') #: Properties, if any, that are associated with the image. - properties = resource.prop('properties') + properties = resource.Body('properties') #: The size of the image data, in bytes. - size = resource.prop('size') + size = resource.Body('size') #: The image status. - status = resource.prop('status') + status = resource.Body('status') #: The timestamp when this image was last updated. - updated_at = resource.prop('updated_at') + updated_at = resource.Body('updated_at') + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[True] = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[False], + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self: ... + + # excuse the duplication here: it's mypy's fault + # https://github.com/python/mypy/issues/14764 + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: + """Find a resource by its name or id. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param name_or_id: This resource's identifier, if needed by + the request. The default is ``None``. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict params: Any additional parameters to be passed into + underlying methods, such as to + :meth:`~openstack.resource.Resource.existing` + in order to pass on URI parameters. + + :return: The :class:`Resource` object matching the given name or id + or None if nothing matches. + :raises: :class:`openstack.exceptions.DuplicateResource` if more + than one resource is found for this request. + :raises: :class:`openstack.exceptions.NotFoundException` if nothing + is found and ignore_missing is ``False``. + """ + session = cls._get_session(session) + # Try to short-circuit by looking directly for a matching ID. + try: + match = cls.existing( + id=name_or_id, + connection=session._get_connection(), # type: ignore + **params, + ) + return match.fetch(session, **params) + except exceptions.NotFoundException: + pass + + params['name'] = name_or_id + + data = cls.list( + session, + base_path='/images/detail', + all_projects=all_projects, + **params, + ) + + result = cls._get_one_match(name_or_id, data) + if result is not None: + return result + + if ignore_missing: + return None + raise exceptions.NotFoundException( + f"No {cls.__name__} found for {name_or_id}" + ) diff --git a/openstack/image/v2/_proxy.py b/openstack/image/v2/_proxy.py index 8d6ab6793f..7cb691a800 100644 --- a/openstack/image/v2/_proxy.py +++ b/openstack/image/v2/_proxy.py @@ -10,32 +10,545 @@ # License for the specific language governing permissions and limitations # under the License. +import os +import time +import typing as ty +import warnings + from openstack import exceptions +from openstack.image.v2 import cache as _cache from openstack.image.v2 import image as _image +from openstack.image.v2 import image_tasks as _image_tasks from openstack.image.v2 import member as _member -from openstack import proxy2 -from openstack import resource2 +from openstack.image.v2 import metadef_namespace as _metadef_namespace +from openstack.image.v2 import metadef_object as _metadef_object +from openstack.image.v2 import metadef_property as _metadef_property +from openstack.image.v2 import metadef_resource_type as _metadef_resource_type +from openstack.image.v2 import metadef_schema as _metadef_schema +from openstack.image.v2 import schema as _schema +from openstack.image.v2 import service_info as _si +from openstack.image.v2 import task as _task +from openstack import proxy +from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + +# Rackspace returns this for intermittent import errors +_IMAGE_ERROR_396 = "Image cannot be imported. Error code: '396'" +_INT_PROPERTIES = ('min_disk', 'min_ram', 'size', 'virtual_size') +_RAW_PROPERTIES = ('is_protected', 'protected', 'tags') + + +def _get_name_and_filename(name, image_format): + # See if name points to an existing file + if os.path.exists(name) and os.path.isfile(name): + # Neat. Easy enough + return os.path.splitext(os.path.basename(name))[0], name + + # Try appending the disk format + name_with_ext = '.'.join((name, image_format)) + if os.path.exists(name_with_ext) and os.path.isfile(name): + return os.path.basename(name), name_with_ext + + return name, None + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['2']] = '2' + + _resource_registry = { + "cache": _cache.Cache, + "image": _image.Image, + "image_member": _member.Member, + "metadef_namespace": _metadef_namespace.MetadefNamespace, + "metadef_resource_type": _metadef_resource_type.MetadefResourceType, + "metadef_resource_type_association": _metadef_resource_type.MetadefResourceTypeAssociation, # noqa + "schema": _schema.Schema, + "info_import": _si.Import, + "info_store": _si.Store, + "task": _task.Task, + } + + retriable_status_codes = [503] + + _IMAGE_MD5_KEY = 'owner_specified.openstack.md5' + _IMAGE_SHA256_KEY = 'owner_specified.openstack.sha256' + _IMAGE_OBJECT_KEY = 'owner_specified.openstack.object' + + # NOTE(shade) shade keys were owner_specified.shade.md5 - we need to add + # those to freshness checks so that a shade->sdk transition + # doesn't result in a re-upload + _SHADE_IMAGE_MD5_KEY = 'owner_specified.shade.md5' + _SHADE_IMAGE_SHA256_KEY = 'owner_specified.shade.sha256' + _SHADE_IMAGE_OBJECT_KEY = 'owner_specified.shade.object' + + # ====== CACHE MANAGEMENT====== + def get_image_cache(self): + return self._get(_cache.Cache, requires_id=False) + + def cache_delete_image(self, image, ignore_missing=True): + """Delete an image from cache. + + :param image: The value can be either the ID of an image or a + :class:`~openstack.image.v2.image.Image` instance. + :param bool ignore_missing: When set to ``False``, + :class:`~openstack.exceptions.NotFoundException` will be raised + when the image or cache entry does not exist. + :returns: ``None`` + """ + return self._delete(_cache.Cache, image, ignore_missing=ignore_missing) + + def queue_image(self, image_id): + """Queue image(s) for caching.""" + cache = self._get_resource(_cache.Cache, None) + return cache.queue(self, image_id) + + def clear_cache(self, target='both'): + """Clear all images from cache, queue or both + + :param target: Specify which target you want to clear + One of: ``both``(default), ``cache``, ``queue``. + """ + cache = self._get_resource(_cache.Cache, None) + return cache.clear(self, target) + + # ====== IMAGES ====== + + def _make_v2_image_params(self, meta, properties): + ret: dict = {} + for k, v in iter(properties.items()): + if k in _INT_PROPERTIES: + ret[k] = int(v) + elif k in _RAW_PROPERTIES: + ret[k] = v + else: + if v is None: + ret[k] = None + else: + ret[k] = str(v) + ret.update(meta) + return ret + + def create_image( + self, + name, + *, + filename=None, + data=None, + container=None, + md5=None, + sha256=None, + disk_format=None, + container_format=None, + tags=None, + disable_vendor_agent=True, + allow_duplicates=False, + meta=None, + wait=False, + timeout=3600, + validate_checksum=False, + use_import=False, + import_method=None, + uri=None, + remote_region=None, + remote_image_id=None, + remote_service_interface=None, + stores=None, + all_stores=None, + all_stores_must_succeed=None, + **kwargs, + ): + """Create an image and optionally upload data + + Create a new image. If ``filename`` or ``data`` are provided, it will + also upload data to this image. + + Note that uploading image data is actually quite a complicated + procedure. There are three ways to upload an image: + + * Image upload + * Image import + * Image tasks + + If the image tasks API is enabled, this must be used. However, this API + is deprecated since the Image service's Mitaka (12.0.0) release and is + now admin-only. Assuming this API is not enabled, you may choose + between image upload or image import. Image import is more powerful and + allows you to upload data from multiple sources including other glance + instances. It should be preferred on all services that support it. + + :param str name: Name of the image to create. If it is a pathname + of an image, the name will be constructed from the extensionless + basename of the path. + :param str filename: The path to the file to upload, if needed. + (optional, defaults to None) + :param data: Image data (string or file-like object). It is mutually + exclusive with filename + :param str container: Name of the container in swift where images + should be uploaded for import if the cloud requires such a thing. + (optional, defaults to 'images') + :param str md5: md5 sum of the image file. If not given, an md5 will + be calculated. + :param str sha256: sha256 sum of the image file. If not given, an md5 + will be calculated. + :param str disk_format: The disk format the image is in. (optional, + defaults to the os-client-config config value for this cloud) + :param str container_format: The container format the image is in. + (optional, defaults to the os-client-config config value for this + cloud) + :param list tags: List of tags for this image. Each tag is a string + of at most 255 chars. + :param bool disable_vendor_agent: Whether or not to append metadata + flags to the image to inform the cloud in question to not expect a + vendor agent to be runing. (optional, defaults to True) + :param allow_duplicates: If true, skips checks that enforce unique + image name. (optional, defaults to False) + :param meta: A dict of key/value pairs to use for metadata that + bypasses automatic type conversion. + :param bool wait: If true, waits for image to be created. Defaults to + true - however, be aware that one of the upload methods is always + synchronous. + :param timeout: Seconds to wait for image creation. None is forever. + :param bool validate_checksum: If true and cloud returns checksum, + compares return value with the one calculated or passed into this + call. If value does not match - raises exception. Default is + 'false' + :param bool use_import: Use the 'glance-direct' method of the + interoperable image import mechanism to import the image. This + defaults to false because it is harder on the target cloud so + should only be used when needed, such as when the user needs the + cloud to transform image format. If the cloud has disabled direct + uploads, this will default to true. If you wish to use other import + methods, use the ``import_image`` method instead. + :param import_method: Method to use for importing the image. Not all + deployments support all methods. One of: ``glance-direct`` + (default), ``web-download``, ``glance-download`` (``copy-image`` is + not used with create). Use of ``glance-direct`` requires the image + be first staged. + :param uri: Required only if using the ``web-download`` import method. + This url is where the data is made available to the Image + service. + :param remote_region: The remote glance region to download the image + from when using glance-download. + :param remote_image_id: The ID of the image to import from the + remote glance when using glance-download. + :param remote_service_interface: The remote glance service interface to + use when using glance-download. + :param stores: List of stores to be used when enabled_backends is + activated in glance. List values can be the id of a store or a + :class:`~openstack.image.v2.service_info.Store` instance. + Implies ``use_import`` equals ``True``. + :param all_stores: Upload to all available stores. Mutually exclusive + with ``store`` and ``stores``. + Implies ``use_import`` equals ``True``. + :param all_stores_must_succeed: When set to True, if an error occurs + during the upload in at least one store, the worfklow fails, the + data is deleted from stores where copying is done (not staging), + and the state of the image is unchanged. When set to False, the + workflow will fail (data deleted from stores, …) only if the import + fails on all stores specified by the user. In case of a partial + success, the locations added to the image will be the stores where + the data has been correctly uploaded. + Default is True. + Implies ``use_import`` equals ``True``. + + Additional kwargs will be passed to the image creation as additional + metadata for the image and will have all values converted to string + except for min_disk, min_ram, size and virtual_size which will be + converted to int. + + If you are sure you have all of your data types correct or have an + advanced need to be explicit, use meta. If you are just a normal + consumer, using kwargs is likely the right choice. + + If a value is in meta and kwargs, meta wins. + + :returns: The results of image creation + :rtype: :class:`~openstack.image.v2.image.Image` + :raises: SDKException if there are problems uploading + """ + if filename and data: + raise exceptions.SDKException( + 'filename and data are mutually exclusive' + ) + + if container is None: + container = self._connection._OBJECT_AUTOCREATE_CONTAINER + + if not meta: + meta = {} + + if not disk_format: + disk_format = self._connection.config.config['image_format'] + if not container_format: + # https://docs.openstack.org/image-guide/image-formats.html + container_format = 'bare' -class Proxy(proxy2.BaseProxy): + # If there is no filename, see if name is actually the filename + if not filename and not data: + name, filename = _get_name_and_filename( + name, + self._connection.config.config['image_format'], + ) - def upload_image(self, container_format=None, disk_format=None, - data=None, **attrs): - """Upload a new image from attributes + if validate_checksum and data and not isinstance(data, bytes): + raise exceptions.SDKException( + 'Validating checksum is not possible when data is not a ' + 'direct binary object' + ) + + if not (md5 or sha256) and validate_checksum: + if filename: + md5, sha256 = utils._get_file_hashes(filename) + elif data and isinstance(data, bytes): + md5, sha256 = utils._calculate_data_hashes(data) + + if allow_duplicates: + current_image = None + else: + current_image = self.find_image(name) + if current_image: + # NOTE(pas-ha) 'properties' may be absent or be None + props = current_image.get('properties') or {} + md5_key = props.get( + self._IMAGE_MD5_KEY, + props.get(self._SHADE_IMAGE_MD5_KEY, ''), + ) + sha256_key = props.get( + self._IMAGE_SHA256_KEY, + props.get(self._SHADE_IMAGE_SHA256_KEY, ''), + ) + up_to_date = utils._hashes_up_to_date( + md5=md5, + sha256=sha256, + md5_key=md5_key, + sha256_key=sha256_key, + ) + if up_to_date: + self.log.debug( + "image %(name)s exists and is up to date", + {'name': name}, + ) + return current_image + else: + self.log.debug( + "image %(name)s exists, but contains different " + "checksums. Updating.", + {'name': name}, + ) + + if disable_vendor_agent: + kwargs.update( + self._connection.config.config['disable_vendor_agent'] + ) + + # If a user used the v1 calling format, they will have + # passed a dict called properties along + properties = kwargs.pop('properties', {}) + properties[self._IMAGE_MD5_KEY] = md5 or '' + properties[self._IMAGE_SHA256_KEY] = sha256 or '' + properties[self._IMAGE_OBJECT_KEY] = '/'.join([container, name]) + kwargs.update(properties) + image_kwargs = {'properties': kwargs} + if disk_format: + image_kwargs['disk_format'] = disk_format + if container_format: + image_kwargs['container_format'] = container_format + if tags: + image_kwargs['tags'] = tags + + if filename or data or import_method: + image = self._upload_image( + name, + filename=filename, + data=data, + meta=meta, + wait=wait, + timeout=timeout, + validate_checksum=validate_checksum, + use_import=use_import, + import_method=import_method, + uri=uri, + remote_region=remote_region, + remote_image_id=remote_image_id, + remote_service_interface=remote_service_interface, + stores=stores, + all_stores=all_stores, + all_stores_must_succeed=all_stores_must_succeed, + **image_kwargs, + ) + else: + properties = image_kwargs.pop('properties', {}) + image_kwargs.update(self._make_v2_image_params(meta, properties)) + image_kwargs['name'] = name + image = self._create(_image.Image, **image_kwargs) # type: ignore[arg-type] + + return image + + def import_image( + self, + image, + method='glance-direct', + *, + uri=None, + remote_region=None, + remote_image_id=None, + remote_service_interface=None, + store=None, + stores=None, + all_stores=None, + all_stores_must_succeed=None, + ): + """Import data to an existing image + + Interoperable image import process are introduced in the Image API + v2.6. It mainly allow image importing from an external url and let + Image Service download it by itself without sending binary data at + image creation. + + :param image: The value can be the ID of a image or a + :class:`~openstack.image.v2.image.Image` instance. + :param method: Method to use for importing the image. Not all + deployments support all methods. One of: ``glance-direct`` + (default), ``web-download``, ``glance-download``, or + ``copy-image``. Use of ``glance-direct`` requires the image be + first staged. + :param uri: Required only if using the ``web-download`` import method. + This url is where the data is made available to the Image + service. + :param remote_region: The remote glance region to download the image + from when using glance-download. + :param remote_image_id: The ID of the image to import from the + remote glance when using glance-download. + :param remote_service_interface: The remote glance service interface to + use when using glance-download. + :param store: Used when enabled_backends is activated in glance. The + value can be the id of a store or a. + :class:`~openstack.image.v2.service_info.Store` instance. + :param stores: List of stores to be used when enabled_backends is + activated in glance. List values can be the id of a store or a + :class:`~openstack.image.v2.service_info.Store` instance. + :param all_stores: Upload to all available stores. Mutually exclusive + with ``store`` and ``stores``. + :param all_stores_must_succeed: When set to True, if an error occurs + during the upload in at least one store, the worfklow fails, the + data is deleted from stores where copying is done (not staging), + and the state of the image is unchanged. When set to False, the + workflow will fail (data deleted from stores, …) only if the + import fails on all stores specified by the user. In case of + a partial success, the locations added to the image will be + the stores where the data has been correctly uploaded. + Default is True. + + :returns: The raw response from the request. + """ + image = self._get_resource(_image.Image, image) + + if all_stores and (store or stores): + raise exceptions.InvalidRequest( + "all_stores is mutually exclusive with store and stores" + ) + + if store is not None: + if stores: + raise exceptions.InvalidRequest( + "store and stores are mutually exclusive" + ) + store = self._get_resource(_si.Store, store) + + stores = stores or [] + new_stores = [] + for s in stores: + new_stores.append(self._get_resource(_si.Store, s)) + stores = new_stores + + # as for the standard image upload function, container_format and + # disk_format are required for using image import process + if not all([image.container_format, image.disk_format]): + raise exceptions.InvalidRequest( + "Both container_format and disk_format are required for " + "importing an image" + ) + + return image.import_image( + self, + method=method, + uri=uri, + remote_region=remote_region, + remote_image_id=remote_image_id, + remote_service_interface=remote_service_interface, + store=store, + stores=stores, + all_stores=all_stores, + all_stores_must_succeed=all_stores_must_succeed, + ) + + def stage_image(self, image, *, filename=None, data=None): + """Stage binary image data + + :param image: The value can be the ID of a image or a + :class:`~openstack.image.v2.image.Image` instance. + :param filename: Optional name of the file to read data from. + :param data: Optional data to be uploaded as an image. + + :returns: The results of image creation + :rtype: :class:`~openstack.image.v2.image.Image` + """ + if filename and data: + raise exceptions.SDKException( + 'filename and data are mutually exclusive' + ) + + image = self._get_resource(_image.Image, image) + + if 'queued' != image.status: + raise exceptions.SDKException( + 'Image stage is only possible for images in the queued state. ' + f'Current state is {image.status}' + ) + + if filename: + image.data = open(filename, 'rb') + elif data: + image.data = data + image.stage(self) + + # Stage does not return content, but updates the object + image.fetch(self) + + return image + + def upload_image( + self, + container_format=None, + disk_format=None, + data=None, + **attrs, + ): + """Create and upload a new image from attributes + + .. warning: + + This method is deprecated - and also doesn't work very well. + Please stop using it immediately and switch to `create_image`. :param container_format: Format of the container. - A valid value is ami, ari, aki, bare, - ovf, ova, or docker. + A valid value is ami, ari, aki, bare, ovf, ova, or docker. :param disk_format: The format of the disk. A valid value is ami, - ari, aki, vhd, vmdk, raw, qcow2, vdi, or iso. + ari, aki, vhd, vmdk, raw, qcow2, vdi, or iso. :param data: The data to be uploaded as an image. :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.image.v2.image.Image`, - comprised of the properties on the Image class. + a :class:`~openstack.image.v2.image.Image`, comprised of the + properties on the Image class. :returns: The results of image creation :rtype: :class:`~openstack.image.v2.image.Image` """ + warnings.warn( + "upload_image is deprecated. Use create_image instead.", + os_warnings.RemovedInSDK50Warning, + ) # container_format and disk_format are required to be set # on the image by the time upload_image is called, but they're not # required by the _create call. Enforce them here so that we don't @@ -44,11 +557,15 @@ def upload_image(self, container_format=None, disk_format=None, # not being set. if not all([container_format, disk_format]): raise exceptions.InvalidRequest( - "Both container_format and disk_format are required") + "Both container_format and disk_format are required" + ) - img = self._create(_image.Image, disk_format=disk_format, - container_format=container_format, - **attrs) + img = self._create( + _image.Image, + disk_format=disk_format, + container_format=container_format, + **attrs, + ) # TODO(briancurtin): Perhaps we should run img.upload_image # in a background thread and just return what is called by @@ -56,80 +573,395 @@ def upload_image(self, container_format=None, disk_format=None, # return anything anyway. Otherwise this blocks while uploading # significant amounts of image data. img.data = data - img.upload(self.session) + img.upload(self) return img - def download_image(self, image): + def _upload_image( + self, + name, + *, + filename=None, + data=None, + meta=None, + wait=False, + timeout=None, + validate_checksum=True, + use_import=False, + import_method=None, + uri=None, + remote_region=None, + remote_image_id=None, + remote_service_interface=None, + stores=None, + all_stores=None, + all_stores_must_succeed=None, + **kwargs, + ): + # We can never have nice things. Glance v1 took "is_public" as a + # boolean. Glance v2 takes "visibility". If the user gives us + # is_public, we know what they mean. If they give us visibility, they + # know that they mean. + if 'is_public' in kwargs['properties']: + warnings.warn( + "The 'is_public' property is not supported by Glance v2: use " + "'visibility=public/private' instead", + os_warnings.RemovedInSDK60Warning, + ) + + is_public = kwargs['properties'].pop('is_public') + if is_public: + kwargs['visibility'] = 'public' + else: + kwargs['visibility'] = 'private' + + try: + # This makes me want to die inside + if self._connection.image_api_use_tasks: + if use_import: + raise exceptions.SDKException( + "The Glance Task API and Import API are mutually " + "exclusive. Either disable image_api_use_tasks in " + "config, or do not request using import" + ) + return self._upload_image_task( + name, + filename, + data=data, + meta=meta, + wait=wait, + timeout=timeout, + **kwargs, + ) + else: + return self._upload_image_put( + name, + filename, + data=data, + meta=meta, + validate_checksum=validate_checksum, + use_import=use_import, + import_method=import_method, + uri=uri, + remote_region=remote_region, + remote_image_id=remote_image_id, + remote_service_interface=remote_service_interface, + stores=stores, + all_stores=all_stores, + all_stores_must_succeed=all_stores_must_succeed, + **kwargs, + ) + except exceptions.SDKException: + self.log.debug("Image creation failed", exc_info=True) + raise + except Exception as e: + raise exceptions.SDKException( + f"Image creation failed: {e!s}" + ) from e + + def _upload_image_put( + self, + name, + filename, + data, + meta, + validate_checksum, + use_import=False, + import_method=None, + uri=None, + remote_region=None, + remote_image_id=None, + remote_service_interface=None, + stores=None, + all_stores=None, + all_stores_must_succeed=None, + **image_kwargs, + ): + if all_stores and stores: + raise exceptions.InvalidRequest( + "all_stores is mutually exclusive with stores" + ) + + # use of any of these imply use_import=True + if stores or all_stores or all_stores_must_succeed: + use_import = True + + if use_import and not import_method: + import_method = 'glance-direct' + + if filename and not data: + image_data = open(filename, 'rb') + else: + image_data = data + + properties = image_kwargs.pop('properties', {}) + + image_kwargs.update(self._make_v2_image_params(meta, properties)) + image_kwargs['name'] = name + + image = self._create(_image.Image, **image_kwargs) + image.data = image_data + + supports_import = ( + image.image_import_methods + and import_method in image.image_import_methods + ) + if use_import and not supports_import: + raise exceptions.SDKException( + "Importing image was requested but the cloud does not " + "support the image import method." + ) + + try: + if not use_import: + response = image.upload(self) + exceptions.raise_from_response(response) + if use_import: + kwargs = {} + if stores is not None: + kwargs['stores'] = stores + else: + kwargs['all_stores'] = all_stores + kwargs['all_stores_must_succeed'] = all_stores_must_succeed + if import_method == 'glance-direct': + image.stage(self) + elif import_method == 'web-download': + kwargs['uri'] = uri + elif import_method == 'glance-download': + kwargs.update( + remote_region=remote_region, + remote_image_id=remote_image_id, + remote_service_interface=remote_service_interface, + ) + self.import_image(image, method=import_method, **kwargs) + + # image_kwargs are flat here + md5 = image_kwargs.get(self._IMAGE_MD5_KEY) + sha256 = image_kwargs.get(self._IMAGE_SHA256_KEY) + if validate_checksum and (md5 or sha256): + # Verify that the hash computed remotely matches the local + # value + data = image.fetch(self) + checksum = data.get('checksum') + if checksum: + valid = checksum == md5 or checksum == sha256 + if not valid: + raise Exception('Image checksum verification failed') + except Exception: + self.log.debug("Deleting failed upload of image %s", name) + self.delete_image(image.id) + raise + + return image + + def _upload_image_task( + self, + name, + filename, + data, + wait, + timeout, + meta, + **image_kwargs, + ): + if not self._connection.has_service('object-store'): + raise exceptions.SDKException( + f"The cloud {self._connection.config.name} is configured to " + f"use tasks for image upload, but no object-store service is " + f"available. Aborting." + ) + + properties = image_kwargs.get('properties', {}) + md5 = properties[self._IMAGE_MD5_KEY] + sha256 = properties[self._IMAGE_SHA256_KEY] + container = properties[self._IMAGE_OBJECT_KEY].split('/', 1)[0] + image_kwargs.pop('disk_format', None) + image_kwargs.pop('container_format', None) + + self._connection.create_container(container) + self._connection.create_object( + container, + name, + filename, + md5=md5, + sha256=sha256, + data=data, + metadata={self._connection._OBJECT_AUTOCREATE_KEY: 'true'}, + **{ + 'content-type': 'application/octet-stream', + 'x-delete-after': str(24 * 60 * 60), + }, + ) + # TODO(mordred): Can we do something similar to what nodepool does + # using glance properties to not delete then upload but instead make a + # new "good" image and then mark the old one as "bad" + task_args = { + 'type': 'import', + 'input': { + 'import_from': f'{container}/{name}', + 'image_properties': {'name': name}, + }, + } + + glance_task = self.create_task(**task_args) + if wait: + start = time.time() + + try: + glance_task = self.wait_for_task( + task=glance_task, status='success', wait=timeout + ) + + image_id = glance_task.result['image_id'] + image = self.get_image(image_id) + # NOTE(gtema): Since we might move unknown attributes of + # the image under properties - merge current with update + # properties not to end up removing "existing" properties + props = image.properties.copy() + props.update(image_kwargs.pop('properties', {})) + image_kwargs['properties'] = props + + image = self.update_image(image, **image_kwargs) + self.log.debug( + "Image Task %s imported %s in %s", + glance_task.id, + image_id, + (time.time() - start), + ) + except exceptions.ResourceFailure as e: + glance_task = self.get_task(glance_task) + raise exceptions.SDKException( + f"Image creation failed: {e.message}", + extra_data=glance_task, + ) from e + finally: + # Clean up after ourselves. The object we created is not + # needed after the import is done. + self._connection.delete_object(container, name) + return image + else: + return glance_task + + def _existing_image(self, **kwargs): + return _image.Image.existing(connection=self._connection, **kwargs) + + def download_image( + self, + image, + *, + stream=False, + output=None, + chunk_size=1024 * 1024, + ): """Download an image + This will download an image to memory when ``stream=False``, or allow + streaming downloads using an iterator when ``stream=True``. + For examples of working with streamed responses, see + :ref:`download_image-stream-true`. + :param image: The value can be either the ID of an image or a - :class:`~openstack.image.v2.image.Image` instance. + :class:`~openstack.image.v2.image.Image` instance. + :param bool stream: When ``True``, return a :class:`requests.Response` + instance allowing you to iterate over the response data stream + instead of storing its entire contents in memory. See + :meth:`requests.Response.iter_content` for more details. + + *NOTE*: If you do not consume the entirety of the response you must + explicitly call :meth:`requests.Response.close` or otherwise risk + inefficiencies with the ``requests`` library's handling of + connections. + + When ``False``, return the entire contents of the response. + :param output: Either a file object or a path to store data into. + :param int chunk_size: size in bytes to read from the wire and buffer + at one time. Defaults to 1024 * 1024 = 1 MiB - :returns: The bytes comprising the given Image. + :returns: When output is not given - the bytes comprising the given + Image when stream is False, otherwise a :class:`requests.Response` + instance. When output is given - a + :class:`~openstack.image.v2.image.Image` instance. """ + image = self._get_resource(_image.Image, image) - return image.download(self.session) - def delete_image(self, image, ignore_missing=True): + return image.download( + self, + stream=stream, + output=output, + chunk_size=chunk_size, + ) + + def delete_image(self, image, *, store=None, ignore_missing=True): """Delete an image :param image: The value can be either the ID of an image or a - :class:`~openstack.image.v2.image.Image` instance. + :class:`~openstack.image.v2.image.Image` instance. + :param store: The value can be either the ID of a store or a + :class:`~openstack.image.v2.service_info.Store` instance that the + image is associated with. If specified, the image will only be + deleted from the specified store. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the image does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent image. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the image does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent image. :returns: ``None`` """ - self._delete(_image.Image, image, ignore_missing=ignore_missing) + if store: + store = self._get_resource(_si.Store, store) + store.delete_image(self, image, ignore_missing=ignore_missing) + else: + self._delete(_image.Image, image, ignore_missing=ignore_missing) def find_image(self, name_or_id, ignore_missing=True): """Find a single image :param name_or_id: The name or ID of a image. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.image.v2.image.Image` or None """ - return self._find(_image.Image, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _image.Image, + name_or_id, + ignore_missing=ignore_missing, + ) def get_image(self, image): """Get a single image :param image: The value can be the ID of a image or a - :class:`~openstack.image.v2.image.Image` instance. + :class:`~openstack.image.v2.image.Image` instance. :returns: One :class:`~openstack.image.v2.image.Image` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_image.Image, image) def images(self, **query): """Return a generator of images - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of image objects :rtype: :class:`~openstack.image.v2.image.Image` """ - return self._list(_image.Image, paginated=True, **query) + return self._list(_image.Image, **query) def update_image(self, image, **attrs): """Update a image :param image: Either the ID of a image or a - :class:`~openstack.image.v2.image.Image` instance. - :attrs kwargs: The attributes to update on the image represented - by ``value``. + :class:`~openstack.image.v2.image.Image` instance. + :param attrs: The attributes to update on the image represented + by ``image``. :returns: The updated image :rtype: :class:`~openstack.image.v2.image.Image` @@ -140,148 +972,1143 @@ def deactivate_image(self, image): """Deactivate an image :param image: Either the ID of a image or a - :class:`~openstack.image.v2.image.Image` instance. + :class:`~openstack.image.v2.image.Image` instance. :returns: None """ image = self._get_resource(_image.Image, image) - image.deactivate(self.session) + image.deactivate(self) def reactivate_image(self, image): - """Deactivate an image + """Reactivate an image :param image: Either the ID of a image or a - :class:`~openstack.image.v2.image.Image` instance. + :class:`~openstack.image.v2.image.Image` instance. :returns: None """ image = self._get_resource(_image.Image, image) - image.reactivate(self.session) + image.reactivate(self) + + def update_image_properties( + self, + image=None, + meta=None, + **kwargs, + ): + """Update the properties of an existing image + + :param image: The value can be the ID of a image or a + :class:`~openstack.image.v2.image.Image` instance. + :param meta: A dict of key/value pairs to use for metadata that + bypasses automatic type conversion. + + Additional kwargs will be passed to the image creation as additional + metadata for the image and will have all values converted to string + except for min_disk, min_ram, size and virtual_size which will be + converted to int. + """ + image = self._get_resource(_image.Image, image) + + if not meta: + meta = {} + + properties = {} + for k, v in iter(kwargs.items()): + if v and k in ['ramdisk', 'kernel']: + v = self._connection.get_image_id(v) + k = f'{k}_id' + properties[k] = v + + img_props = image.properties.copy() + + for k, v in iter(self._make_v2_image_params(meta, properties).items()): + if image.get(k, None) != v: + img_props[k] = v + if not img_props: + return False + + self.update_image(image, **img_props) + + return True + + def image_tasks(self, image): + """Return a generator of Image Tasks + + :param image: The value can be either the name of an image or a + :class:`~openstack.image.v2.image.Image` instance. + :return: A generator object of image tasks + :rtype: :class: ~openstack.image.v2.image_tasks.ImageTasks + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + image_id = resource.Resource._get_id(image) + return self._list(_image_tasks.ImageTasks, image_id=image_id) def add_tag(self, image, tag): """Add a tag to an image :param image: The value can be the ID of a image or a - :class:`~openstack.image.v2.image.Image` instance - that the member will be created for. - :param str tag: The tag to be added + :class:`~openstack.image.v2.image.Image` instance. + :param tag: The tag to be added. :returns: None """ image = self._get_resource(_image.Image, image) - image.add_tag(self.session, tag) + image.add_tag(self, tag) def remove_tag(self, image, tag): - """Remove a tag to an image + """Remove a tag from an image :param image: The value can be the ID of a image or a - :class:`~openstack.image.v2.image.Image` instance - that the member will be created for. - :param str tag: The tag to be removed + :class:`~openstack.image.v2.image.Image` instance. + :param tag: The tag to be removed. :returns: None """ image = self._get_resource(_image.Image, image) - image.remove_tag(self.session, tag) + image.remove_tag(self, tag) + # ====== IMAGE MEMBERS ====== def add_member(self, image, **attrs): """Create a new member from attributes :param image: The value can be the ID of a image or a - :class:`~openstack.image.v2.image.Image` instance - that the member will be created for. + :class:`~openstack.image.v2.image.Image` instance + that the member will be created for. :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.image.v2.member.Member`, - comprised of the properties on the Member class. + a :class:`~openstack.image.v2.member.Member`, + comprised of the properties on the Member class. + + See `Image Sharing Reference + `__ + for details. :returns: The results of member creation :rtype: :class:`~openstack.image.v2.member.Member` """ - image_id = resource2.Resource._get_id(image) + image_id = resource.Resource._get_id(image) return self._create(_member.Member, image_id=image_id, **attrs) - def remove_member(self, member, image, ignore_missing=True): + def remove_member(self, member, image=None, ignore_missing=True): """Delete a member :param member: The value can be either the ID of a member or a - :class:`~openstack.image.v2.member.Member` instance. + :class:`~openstack.image.v2.member.Member` instance. + :param image: The value can be either the ID of an image or a + :class:`~openstack.image.v2.image.Image` instance that the member + is part of. This is required if ``member`` is an ID. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the member does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent member. + :class:`~openstack.exceptions.NotFoundException` will be raised + when the member does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent member. :returns: ``None`` """ - image_id = resource2.Resource._get_id(image) - member_id = resource2.Resource._get_id(member) - self._delete(_member.Member, member_id=member_id, image_id=image_id, - ignore_missing=ignore_missing) + image_id = resource.Resource._get_id(image) + member_id = resource.Resource._get_id(member) + self._delete( + _member.Member, + None, + member_id=member_id, + image_id=image_id, + ignore_missing=ignore_missing, + ) def find_member(self, name_or_id, image, ignore_missing=True): """Find a single member :param name_or_id: The name or ID of a member. :param image: This is the image that the member belongs to, - the value can be the ID of a image or a - :class:`~openstack.image.v2.image.Image` instance. + the value can be the ID of a image or a + :class:`~openstack.image.v2.image.Image` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.image.v2.member.Member` or None """ - image_id = resource2.Resource._get_id(image) - return self._find(_member.Member, name_or_id, image_id=image_id, - ignore_missing=ignore_missing) + image_id = resource.Resource._get_id(image) + return self._find( + _member.Member, + name_or_id, + image_id=image_id, + ignore_missing=ignore_missing, + ) def get_member(self, member, image): """Get a single member on an image :param member: The value can be the ID of a member or a - :class:`~openstack.image.v2.member.Member` instance. + :class:`~openstack.image.v2.member.Member` instance. :param image: This is the image that the member belongs to. - The value can be the ID of a image or a - :class:`~openstack.image.v2.image.Image` instance. + The value can be the ID of a image or a + :class:`~openstack.image.v2.image.Image` instance. :returns: One :class:`~openstack.image.v2.member.Member` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - member_id = resource2.Resource._get_id(member) - image_id = resource2.Resource._get_id(image) - return self._get(_member.Member, member_id=member_id, - image_id=image_id) + member_id = resource.Resource._get_id(member) + image_id = resource.Resource._get_id(image) + return self._get( + _member.Member, member_id=member_id, image_id=image_id + ) - def members(self, image): + def members(self, image, **query): """Return a generator of members :param image: This is the image that the member belongs to, - the value can be the ID of a image or a - :class:`~openstack.image.v2.image.Image` instance. + the value can be the ID of a image or a + :class:`~openstack.image.v2.image.Image` instance. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of member objects :rtype: :class:`~openstack.image.v2.member.Member` """ - image_id = resource2.Resource._get_id(image) - return self._list(_member.Member, paginated=False, - image_id=image_id) + image_id = resource.Resource._get_id(image) + return self._list(_member.Member, image_id=image_id) def update_member(self, member, image, **attrs): """Update the member of an image :param member: Either the ID of a member or a - :class:`~openstack.image.v2.member.Member` instance. + :class:`~openstack.image.v2.member.Member` instance. :param image: This is the image that the member belongs to. - The value can be the ID of a image or a - :class:`~openstack.image.v2.image.Image` instance. - :attrs kwargs: The attributes to update on the member represented - by ``value``. + The value can be the ID of a image or a + :class:`~openstack.image.v2.image.Image` instance. + :param attrs: The attributes to update on the member represented + by ``member``. + + See `Image Sharing Reference + `__ + for details. :returns: The updated member :rtype: :class:`~openstack.image.v2.member.Member` """ - member_id = resource2.Resource._get_id(member) - image_id = resource2.Resource._get_id(image) - return self._update(_member.Member, member_id=member_id, - image_id=image_id, **attrs) + member_id = resource.Resource._get_id(member) + image_id = resource.Resource._get_id(image) + return self._update( + _member.Member, + None, + member_id=member_id, + image_id=image_id, + **attrs, + ) + + # ====== METADEF NAMESPACES ====== + def create_metadef_namespace(self, **attrs): + """Create a new metadef namespace from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + comprised of the properties on the MetadefNamespace class. + + :returns: The results of metadef namespace creation + :rtype: :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + """ + return self._create(_metadef_namespace.MetadefNamespace, **attrs) + + def delete_metadef_namespace(self, metadef_namespace, ignore_missing=True): + """Delete a metadef namespace + + :param metadef_namespace: The value can be either the name of a metadef + namespace or a + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + :param bool ignore_missing: When set to ``False``, + :class:`~openstack.exceptions.NotFoundException` will be raised + when the metadef namespace does not exist. + :returns: ``None`` + """ + self._delete( + _metadef_namespace.MetadefNamespace, + metadef_namespace, + ignore_missing=ignore_missing, + ) + + # NOTE(stephenfin): There is no 'find_metadef_namespace' since namespaces + # are identified by the namespace name, not an arbitrary UUID, meaning + # 'find_metadef_namespace' would be identical to 'get_metadef_namespace' + + def get_metadef_namespace(self, metadef_namespace): + """Get a single metadef namespace + + :param metadef_namespace: Either the name of a metadef namespace or an + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + + :returns: One + :class:`~~openstack.image.v2.metadef_namespace.MetadefNamespace` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + return self._get( + _metadef_namespace.MetadefNamespace, + metadef_namespace, + ) + + def metadef_namespaces(self, **query): + """Return a generator of metadef namespaces + + :returns: A generator object of metadef namespaces + :rtype: :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._list(_metadef_namespace.MetadefNamespace, **query) + + def update_metadef_namespace(self, metadef_namespace, **attrs): + """Update a server + + :param metadef_namespace: Either the name of a metadef namespace or an + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + :param attrs: The attributes to update on the metadef namespace + represented by ``metadef_namespace``. + + :returns: The updated metadef namespace + :rtype: :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + """ + # rather annoyingly, Glance insists on us providing the 'namespace' + # argument, even if we're not changing it... + if 'namespace' not in attrs: + attrs['namespace'] = resource.Resource._get_id(metadef_namespace) + + return self._update( + _metadef_namespace.MetadefNamespace, + metadef_namespace, + **attrs, + ) + + def add_tag_to_metadef_namespace(self, namespace, tag): + """Add a tag to a metadef namespace + + :param metadef_namespace: Either the name of a metadef namespace or an + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + :param str tag: The tag to be added. + + :returns: None + """ + namespace = self._get_resource( + _metadef_namespace.MetadefNamespace, namespace + ) + namespace.add_tag(self, tag) + + def remove_tag_from_metadef_namespace(self, namespace, tag): + """Remove a tag from a metadef namespace + + :param metadef_namespace: Either the name of a metadef namespace or an + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + :param str tag: The tag to be removed. + + :returns: None + """ + namespace = self._get_resource( + _metadef_namespace.MetadefNamespace, namespace + ) + namespace.remove_tag(self, tag) + + def remove_tags_from_metadef_namespace(self, namespace): + """Remove all tags from a metadef namespace + + :param metadef_namespace: Either the name of a metadef namespace or an + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + + :returns: None + """ + namespace = self._get_resource( + _metadef_namespace.MetadefNamespace, namespace + ) + namespace.remove_all_tags(self) + + # ====== METADEF OBJECT ====== + def create_metadef_object(self, namespace, **attrs): + """Create a new object from namespace + + :param namespace: The value can be either the name of a metadef + namespace or a + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.image.v2.metadef_object.MetadefObject`, + comprised of the properties on the Metadef object class. + + :returns: A metadef namespace + :rtype: :class:`~openstack.image.v2.metadef_object.MetadefObject` + """ + namespace_name = resource.Resource._get_id(namespace) + return self._create( + _metadef_object.MetadefObject, + namespace_name=namespace_name, + **attrs, + ) + + def get_metadef_object(self, metadef_object, namespace): + """Get a single metadef object + + :param metadef_object: The value can be the ID of a metadef_object + or a + :class:`~openstack.image.v2.metadef_object.MetadefObject` + instance. + :param namespace: The value can be either the name of a metadef + namespace or a + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + :returns: One :class:`~openstack.image.v2.metadef_object.MetadefObject` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + object_name = resource.Resource._get_id(metadef_object) + namespace_name = resource.Resource._get_id(namespace) + return self._get( + _metadef_object.MetadefObject, + namespace_name=namespace_name, + name=object_name, + ) + + def metadef_objects(self, namespace): + """Get metadef object list of the namespace + + :param namespace: The value can be either the name of a metadef + namespace or a + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + + :returns: One :class:`~openstack.image.v2.metadef_object.MetadefObject` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + namespace_name = resource.Resource._get_id(namespace) + return self._list( + _metadef_object.MetadefObject, + namespace_name=namespace_name, + ) + + def update_metadef_object(self, metadef_object, namespace, **attrs): + """Update a single metadef object + + :param metadef_object: The value can be the ID of a metadef_object or a + :class:`~openstack.image.v2.metadef_object.MetadefObject` instance. + :param namespace: The value can be either the name of a metadef + namespace or a + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + :param dict attrs: Keyword arguments which will be used to update + a :class:`~openstack.image.v2.metadef_object.MetadefObject` + + :returns: One :class:`~openstack.image.v2.metadef_object.MetadefObject` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + namespace_name = resource.Resource._get_id(namespace) + metadef_object = resource.Resource._get_id(metadef_object) + return self._update( + _metadef_object.MetadefObject, + metadef_object, + namespace_name=namespace_name, + **attrs, + ) + + def delete_metadef_object(self, metadef_object, namespace, **attrs): + """Removes a single metadef object + + :param metadef_object: The value can be the ID of a metadef_object or a + :class:`~openstack.image.v2.metadef_object.MetadefObject` instance. + :param namespace: The value can be either the name of a metadef + namespace or a + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + :param dict attrs: Keyword arguments which will be used to update + a :class:`~openstack.image.v2.metadef_object.MetadefObject` + + :returns: ``None`` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + namespace_name = resource.Resource._get_id(namespace) + return self._delete( + _metadef_object.MetadefObject, + metadef_object, + namespace_name=namespace_name, + **attrs, + ) + + def delete_all_metadef_objects(self, namespace): + """Delete all objects + + :param namespace: The value can be either the name of a metadef + namespace or a + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + :returns: ``None`` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + namespace = self._get_resource( + _metadef_namespace.MetadefNamespace, namespace + ) + return namespace.delete_all_objects(self) + + # ====== METADEF RESOURCE TYPES ====== + def metadef_resource_types(self, **query): + """Return a generator of metadef resource types + + :return: A generator object of metadef resource types + :rtype: + :class:`~openstack.image.v2.metadef_resource_type.MetadefResourceType` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._list(_metadef_resource_type.MetadefResourceType, **query) + + # ====== METADEF RESOURCE TYPES ASSOCIATION====== + def create_metadef_resource_type_association( + self, + metadef_namespace, + **attrs, + ): + """Creates a resource type association between a namespace + and the resource type specified in the body of the request. + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.image.v2.metadef_resource_type.MetadefResourceTypeAssociation` + comprised of the properties on the + MetadefResourceTypeAssociation class. + + :returns: The results of metadef resource type association creation + :rtype: + :class:`~openstack.image.v2.metadef_resource_type.MetadefResourceTypeAssociation` + """ + namespace_name = resource.Resource._get_id(metadef_namespace) + return self._create( + _metadef_resource_type.MetadefResourceTypeAssociation, + namespace_name=namespace_name, + **attrs, + ) + + def delete_metadef_resource_type_association( + self, + metadef_resource_type, + metadef_namespace, + ignore_missing=True, + ): + """Removes a resource type association in a namespace. + + :param metadef_resource_type: The value can be either the name of + a metadef resource type association or an + :class:`~openstack.image.v2.metadef_resource_type.MetadefResourceTypeAssociation` + instance. + :param metadef_namespace: The value can be either the name of metadef + namespace or an + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance + :param bool ignore_missing: When set to ``False``, + :class:`~openstack.exceptions.NotFoundException` will be raised + when the metadef resource type association does not exist. + :returns: ``None`` + """ + namespace_name = resource.Resource._get_id(metadef_namespace) + self._delete( + _metadef_resource_type.MetadefResourceTypeAssociation, + metadef_resource_type, + namespace_name=namespace_name, + ignore_missing=ignore_missing, + ) + + def metadef_resource_type_associations(self, metadef_namespace, **query): + """Return a generator of metadef resource type associations + + :param metadef_namespace: The value can be either the name of metadef + namespace or an + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance + :return: A generator object of metadef resource type associations + :rtype: + :class:`~openstack.image.v2.metadef_resource_type.MetadefResourceTypeAssociation` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + namespace_name = resource.Resource._get_id(metadef_namespace) + return self._list( + _metadef_resource_type.MetadefResourceTypeAssociation, + namespace_name=namespace_name, + **query, + ) + + # ====== METADEF PROPERTY ====== + def create_metadef_property(self, metadef_namespace, **attrs): + """Create a metadef property + + :param metadef_namespace: The value can be either the name of metadef + namespace or an + :class:`~openstack.image.v2.metadef_property.MetadefNamespace` + instance + :param attrs: The attributes to create on the metadef property + represented by ``metadef_property``. + + :returns: The created metadef property + :rtype: :class:`~openstack.image.v2.metadef_property.MetadefProperty` + """ + namespace_name = resource.Resource._get_id(metadef_namespace) + return self._create( + _metadef_property.MetadefProperty, + namespace_name=namespace_name, + **attrs, + ) + + def update_metadef_property( + self, metadef_property, metadef_namespace, **attrs + ): + """Update a metadef property + + :param metadef_property: The value can be either the name of metadef + property or an + :class:`~openstack.image.v2.metadef_property.MetadefProperty` + instance. + :param metadef_namespace: The value can be either the name of metadef + namespace or an + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance + :param attrs: The attributes to update on the metadef property + represented by ``metadef_property``. + + :returns: The updated metadef property + :rtype: :class:`~openstack.image.v2.metadef_property.MetadefProperty` + """ + namespace_name = resource.Resource._get_id(metadef_namespace) + metadef_property = resource.Resource._get_id(metadef_property) + return self._update( + _metadef_property.MetadefProperty, + metadef_property, + namespace_name=namespace_name, + **attrs, + ) + + def delete_metadef_property( + self, metadef_property, metadef_namespace, ignore_missing=True + ): + """Delete a metadef property + + :param metadef_property: The value can be either the name of metadef + property or an + :class:`~openstack.image.v2.metadef_property.MetadefProperty` + instance + :param metadef_namespace: The value can be either the name of metadef + namespace or an + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance + :param bool ignore_missing: When set to + ``False`` :class:`~openstack.exceptions.NotFoundException` will be + raised when the instance does not exist. When set to ``True``, + no exception will be set when attempting to delete a nonexistent + instance. + + :returns: ``None`` + """ + namespace_name = resource.Resource._get_id(metadef_namespace) + metadef_property = resource.Resource._get_id(metadef_property) + return self._delete( + _metadef_property.MetadefProperty, + metadef_property, + namespace_name=namespace_name, + ignore_missing=ignore_missing, + ) + + def metadef_properties(self, metadef_namespace, **query): + """Return a generator of metadef properties + + :param metadef_namespace: The value can be either the name of metadef + namespace or an + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of property objects + """ + namespace_name = resource.Resource._get_id(metadef_namespace) + return self._list( + _metadef_property.MetadefProperty, + requires_id=False, + namespace_name=namespace_name, + **query, + ) + + def get_metadef_property( + self, metadef_property, metadef_namespace, **query + ): + """Get a single metadef property + + :param metadef_property: The value can be either the name of metadef + property or an + :class:`~openstack.image.v2.metadef_property.MetadefProperty` + instance. + :param metadef_namespace: The value can be either the name of metadef + namespace or an + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance + + :returns: One + :class:`~~openstack.image.v2.metadef_property.MetadefProperty` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + namespace_name = resource.Resource._get_id(metadef_namespace) + return self._get( + _metadef_property.MetadefProperty, + metadef_property, + namespace_name=namespace_name, + **query, + ) + + def delete_all_metadef_properties(self, metadef_namespace): + """Delete all metadata definitions property inside a namespace. + + :param metadef_namespace: The value can be either the name of a metadef + namespace or a + :class:`~openstack.image.v2.metadef_namespace.MetadefNamespace` + instance. + + :returns: ``None`` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + namespace = self._get_resource( + _metadef_namespace.MetadefNamespace, metadef_namespace + ) + return namespace.delete_all_properties(self) + + # ====== SCHEMAS ====== + def get_images_schema(self): + """Get images schema + + :returns: One :class:`~openstack.image.v2.schema.Schema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _schema.Schema, + requires_id=False, + base_path='/schemas/images', + ) + + def get_image_schema(self): + """Get single image schema + + :returns: One :class:`~openstack.image.v2.schema.Schema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _schema.Schema, + requires_id=False, + base_path='/schemas/image', + ) + + def get_members_schema(self): + """Get image members schema + + :returns: One :class:`~openstack.image.v2.schema.Schema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _schema.Schema, + requires_id=False, + base_path='/schemas/members', + ) + + def get_member_schema(self): + """Get image member schema + + :returns: One :class:`~openstack.image.v2.schema.Schema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _schema.Schema, + requires_id=False, + base_path='/schemas/member', + ) + + def get_tasks_schema(self): + """Get image tasks schema + + :returns: One :class:`~openstack.image.v2.schema.Schema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _schema.Schema, + requires_id=False, + base_path='/schemas/tasks', + ) + + def get_task_schema(self): + """Get image task schema + + :returns: One :class:`~openstack.image.v2.schema.Schema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _schema.Schema, + requires_id=False, + base_path='/schemas/task', + ) + + def get_metadef_namespace_schema(self): + """Get metadata definition namespace schema + + :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _metadef_schema.MetadefSchema, + requires_id=False, + base_path='/schemas/metadefs/namespace', + ) + + def get_metadef_namespaces_schema(self): + """Get metadata definition namespaces schema + + :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _metadef_schema.MetadefSchema, + requires_id=False, + base_path='/schemas/metadefs/namespaces', + ) + + def get_metadef_resource_type_schema(self): + """Get metadata definition resource type association schema + + :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _metadef_schema.MetadefSchema, + requires_id=False, + base_path='/schemas/metadefs/resource_type', + ) + + def get_metadef_resource_types_schema(self): + """Get metadata definition resource type associations schema + + :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _metadef_schema.MetadefSchema, + requires_id=False, + base_path='/schemas/metadefs/resource_types', + ) + + def get_metadef_object_schema(self): + """Get metadata definition object schema + + :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _metadef_schema.MetadefSchema, + requires_id=False, + base_path='/schemas/metadefs/object', + ) + + def get_metadef_objects_schema(self): + """Get metadata definition objects schema + + :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _metadef_schema.MetadefSchema, + requires_id=False, + base_path='/schemas/metadefs/objects', + ) + + def get_metadef_property_schema(self): + """Get metadata definition property schema + + :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _metadef_schema.MetadefSchema, + requires_id=False, + base_path='/schemas/metadefs/property', + ) + + def get_metadef_properties_schema(self): + """Get metadata definition properties schema + + :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _metadef_schema.MetadefSchema, + requires_id=False, + base_path='/schemas/metadefs/properties', + ) + + def get_metadef_tag_schema(self): + """Get metadata definition tag schema + + :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _metadef_schema.MetadefSchema, + requires_id=False, + base_path='/schemas/metadefs/tag', + ) + + def get_metadef_tags_schema(self): + """Get metadata definition tags schema + + :returns: One :class:`~openstack.image.v2.metadef_schema.MetadefSchema` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _metadef_schema.MetadefSchema, + requires_id=False, + base_path='/schemas/metadefs/tags', + ) + + # ====== TASKS ====== + def tasks(self, **query): + """Return a generator of tasks + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of task objects + :rtype: :class:`~openstack.image.v2.task.Task` + """ + return self._list(_task.Task, **query) + + def get_task(self, task): + """Get task details + + :param task: The value can be the ID of a task or a + :class:`~openstack.image.v2.task.Task` instance. + + :returns: One :class:`~openstack.image.v2.task.Task` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_task.Task, task) + + def create_task(self, **attrs): + """Create a new task from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.image.v2.task.Task`, + comprised of the properties on the Task class. + + :returns: The results of task creation + :rtype: :class:`~openstack.image.v2.task.Task` + """ + return self._create(_task.Task, **attrs) + + def wait_for_task( + self, + task, + status='success', + failures=None, + interval=2, + wait=120, + ): + """Wait for a task to be in a particular status. + + :param task: The resource to wait on to reach the specified status. + The resource must have a ``status`` attribute. + :type resource: A :class:`~openstack.resource.Resource` object. + :param status: Desired status. + :param failures: Statuses that would be interpreted as failures. + :type failures: :py:class:`list` + :param interval: Number of seconds to wait before to consecutive + checks. Default to 2. + :param wait: Maximum number of seconds to wait before the change. + Default to 120. + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to the desired status failed to occur in specified seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + has transited to one of the failure statuses. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute. + """ + if failures is None: + failures = ['failure'] + else: + failures = [f.lower() for f in failures] + + if task.status.lower() == status.lower(): + return task + + name = f"{task.__class__.__name__}:{task.id}" + msg = f"Timeout waiting for {name} to transition to {status}" + + for count in utils.iterate_timeout( + timeout=wait, message=msg, wait=interval + ): + task = task.fetch(self) + + if not task: + raise exceptions.ResourceFailure( + f"{name} went away while waiting for {status}" + ) + + new_status = task.status + normalized_status = new_status.lower() + if normalized_status == status.lower(): + return task + elif normalized_status in failures: + if task.message == _IMAGE_ERROR_396: + task_args = {'input': task.input, 'type': task.type} + task = self.create_task(**task_args) + self.log.debug(f'Got error 396. Recreating task {task}') + else: + raise exceptions.ResourceFailure( + f"{name} transitioned to failure state {new_status}" + ) + + self.log.debug( + 'Still waiting for resource %s to reach state %s, ' + 'current state is %s', + name, + status, + new_status, + ) + + # ====== STORES ====== + def stores(self, details=False, **query): + """Return a generator of supported image stores + + :returns: A generator of store objects + :rtype: :class:`~openstack.image.v2.service_info.Store` + """ + if details: + query['base_path'] = utils.urljoin(_si.Store.base_path, 'detail') + return self._list(_si.Store, **query) + + # ====== IMPORTS ====== + def get_import_info(self): + """Get a info about image constraints + + :returns: One :class:`~openstack.image.v2.service_info.Import` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_si.Import, requires_id=False) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) + + def _get_cleanup_dependencies(self): + return {'image': {'before': ['identity']}} + + def _service_cleanup( + self, + dry_run=True, + client_status_queue=None, + identified_resources=None, + filters=None, + resource_evaluation_fn=None, + skip_resources=None, + ): + if self.should_skip_resource_cleanup("image", skip_resources): + return + + project_id = self.get_project_id() + + # Note that images cannot be deleted when they are still being used + for obj in self.images(owner=project_id): + self._service_cleanup_del_res( + self.delete_image, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) diff --git a/openstack/image/v2/cache.py b/openstack/image/v2/cache.py new file mode 100644 index 0000000000..b0ad60834d --- /dev/null +++ b/openstack/image/v2/cache.py @@ -0,0 +1,75 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class CachedImage(resource.Resource): + image_id = resource.Body('image_id') + hits = resource.Body('hits') + last_accessed = resource.Body('last_accessed') + last_modified = resource.Body('last_modified') + size = resource.Body('size') + + +class Cache(resource.Resource): + base_path = '/cache' + + allow_fetch = True + allow_delete = True + allow_create = True + + _max_microversion = '2.14' + + cached_images = resource.Body( + 'cached_images', + type=list, + list_type=CachedImage, + ) + queued_images = resource.Body('queued_images', type=list) + + def queue(self, session, image, *, microversion=None): + """Queue an image into cache. + :param session: The session to use for making this request + :param image: The image to be queued into cache. + :returns: The server response + """ + if microversion is None: + microversion = self._get_microversion(session) + image_id = resource.Resource._get_id(image) + url = utils.urljoin(self.base_path, image_id) + + response = session.put(url, microversion=microversion) + exceptions.raise_from_response(response) + return response + + # FIXME(stephenfin): This needs to be renamed as it conflicts with + # dict.clear + def clear(self, session, target='both'): # type: ignore[override] + """Clears the cache. + :param session: The session to use for making this request + :param target: Specify which target you want to clear + One of: ``both``(default), ``cache``, ``queue``. + :returns: The server response + """ + headers = {} + if target in ('cache', 'queue'): + headers = {'x-image-cache-clear-target': target} + elif target != "both": + raise exceptions.InvalidRequest( + 'Target must be "cache", "queue" or "both".' + ) + response = session.delete(self.base_path, headers=headers) + exceptions.raise_from_response(response) + return response diff --git a/openstack/image/v2/image.py b/openstack/image/v2/image.py index 1cd450afb0..914e975655 100644 --- a/openstack/image/v2/image.py +++ b/openstack/image/v2/image.py @@ -10,36 +10,54 @@ # License for the specific language governing permissions and limitations # under the License. -import hashlib -import logging +import typing as ty +from keystoneauth1 import adapter +import typing_extensions as ty_ext + +from openstack.common import tag from openstack import exceptions -from openstack.image import image_service -from openstack import resource2 +from openstack.image import _download +from openstack import resource from openstack import utils -_logger = logging.getLogger(__name__) - -class Image(resource2.Resource): +class Image(resource.Resource, tag.TagMixin, _download.DownloadMixin): resources_key = 'images' base_path = '/images' - service = image_service.ImageService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - patch_update = True + commit_method = 'PATCH' + commit_jsonpatch = True - _query_mapping = resource2.QueryParameters("name", "visibility", - "member_status", "owner", - "status", "size_min", - "size_max", "sort_key", - "sort_dir", "sort", "tag", - "created_at", "updated_at") + # Store all unknown attributes under 'properties' in the object. + # Remotely they would be still in the resource root + _store_unknown_attrs_as_properties = True + + _query_mapping = resource.QueryParameters( + "id", + "name", + "visibility", + "member_status", + "owner", + "status", + "size_min", + "size_max", + "protected", + "is_hidden", + "sort_key", + "sort_dir", + "sort", + "tag", + "created_at", + "updated_at", + is_hidden="os_hidden", + ) # NOTE: Do not add "self" support here. If you've used Python before, # you know that self, while not being a reserved word, has special @@ -55,164 +73,191 @@ class Image(resource2.Resource): # Properties #: Hash of the image data used. The Image service uses this value #: for verification. - checksum = resource2.Body('checksum') + checksum = resource.Body('checksum') #: The container format refers to whether the VM image is in a file #: format that also contains metadata about the actual VM. #: Container formats include OVF and Amazon AMI. In addition, #: a VM image might not have a container format - instead, #: the image is just a blob of unstructured data. - container_format = resource2.Body('container_format') + container_format = resource.Body('container_format') #: The date and time when the image was created. - created_at = resource2.Body('created_at') + created_at = resource.Body('created_at') #: Valid values are: aki, ari, ami, raw, iso, vhd, vdi, qcow2, or vmdk. #: The disk format of a VM image is the format of the underlying #: disk image. Virtual appliance vendors have different formats #: for laying out the information contained in a VM disk image. - disk_format = resource2.Body('disk_format') + disk_format = resource.Body('disk_format') + #: This field controls whether an image is displayed in the default + #: image-list response + is_hidden = resource.Body('os_hidden', type=bool) #: Defines whether the image can be deleted. #: *Type: bool* - is_protected = resource2.Body('protected', type=bool) + is_protected = resource.Body('protected', type=bool) + #: The algorithm used to compute a secure hash of the image data + #: for this image + hash_algo = resource.Body('os_hash_algo') + #: The hexdigest of the secure hash of the image data computed using + #: the algorithm whose name is the value of the os_hash_algo property. + hash_value = resource.Body('os_hash_value') #: The minimum disk size in GB that is required to boot the image. - min_disk = resource2.Body('min_disk') + min_disk = resource.Body('min_disk') #: The minimum amount of RAM in MB that is required to boot the image. - min_ram = resource2.Body('min_ram') + min_ram = resource.Body('min_ram') #: The name of the image. - name = resource2.Body('name') + name = resource.Body('name') #: The ID of the owner, or project, of the image. - owner_id = resource2.Body('owner') + owner = resource.Body('owner', alias='owner_id') + #: The ID of the owner, or project, of the image. (backwards compat) + owner_id = resource.Body('owner', alias='owner') + # TODO(mordred) This is not how this works in v2. I mean, it's how it + # should work, but it's not. We need to fix properties. They work right + # in shade, so we can draw some logic from there. #: Properties, if any, that are associated with the image. - properties = resource2.Body('properties', type=dict) + properties = resource.Body('properties') #: The size of the image data, in bytes. - size = resource2.Body('size', type=int) + size = resource.Body('size', type=int) #: When present, Glance will attempt to store the disk image data in the #: backing store indicated by the value of the header. When not present, #: Glance will store the disk image data in the backing store that is #: marked default. Valid values are: file, s3, rbd, swift, cinder, #: gridfs, sheepdog, or vsphere. - store = resource2.Body('store') + store = resource.Body('store') #: The image status. - status = resource2.Body('status') - #: Tags, if any, that are associated with the image. - tags = resource2.Body('tags') + status = resource.Body('status') #: The date and time when the image was updated. - updated_at = resource2.Body('updated_at') + updated_at = resource.Body('updated_at') #: The virtual size of the image. - virtual_size = resource2.Body('virtual_size') + virtual_size = resource.Body('virtual_size') #: The image visibility. - visibility = resource2.Body('visibility') + visibility = resource.Body('visibility') #: The URL for the virtual machine image file. - file = resource2.Body('file') + file = resource.Body('file') #: A list of URLs to access the image file in external store. #: This list appears if the show_multiple_locations option is set #: to true in the Image service's configuration file. - locations = resource2.Body('locations') + locations = resource.Body('locations') #: The URL to access the image file kept in external store. It appears #: when you set the show_image_direct_url option to true in the #: Image service's configuration file. - direct_url = resource2.Body('direct_url') - #: An image property. - path = resource2.Body('path') - #: Value of image property used in add or replace operations expressed - #: in JSON notation. For example, you must enclose strings in quotation - #: marks, and you do not enclose numeric values in quotation marks. - value = resource2.Body('value') + direct_url = resource.Body('direct_url') #: The URL to access the image file kept in external store. - url = resource2.Body('url') + url = resource.Body('url') #: The location metadata. - metadata = resource2.Body('metadata', type=dict) + metadata = resource.Body('metadata', type=dict) # Additional Image Properties - # http://docs.openstack.org/developer/glance/common-image-properties.html + # https://docs.openstack.org/glance/latest/user/common-image-properties.html # http://docs.openstack.org/cli-reference/glance-property-keys.html #: The CPU architecture that must be supported by the hypervisor. - architecture = resource2.Body("architecture") + architecture = resource.Body("architecture") #: The hypervisor type. Note that qemu is used for both QEMU and #: KVM hypervisor types. - hypervisor_type = resource2.Body("hypervisor-type") + hypervisor_type = resource.Body("hypervisor_type") #: Optional property allows created servers to have a different bandwidth #: cap than that defined in the network they are attached to. - instance_type_rxtx_factor = resource2.Body("instance_type_rxtx_factor", - type=float) + instance_type_rxtx_factor = resource.Body( + "instance_type_rxtx_factor", + type=float, + ) # For snapshot images, this is the UUID of the server used to #: create this image. - instance_uuid = resource2.Body('instance_uuid') + instance_uuid = resource.Body('instance_uuid') #: Specifies whether the image needs a config drive. #: `mandatory` or `optional` (default if property is not used). - needs_config_drive = resource2.Body('img_config_drive') + needs_config_drive = resource.Body('img_config_drive') #: The ID of an image stored in the Image service that should be used #: as the kernel when booting an AMI-style image. - kernel_id = resource2.Body('kernel_id') + kernel_id = resource.Body('kernel_id') #: The common name of the operating system distribution in lowercase - os_distro = resource2.Body('os_distro') + os_distro = resource.Body('os_distro') #: The operating system version as specified by the distributor. - os_version = resource2.Body('os_version') + os_version = resource.Body('os_version') #: Secure Boot is a security standard. When the instance starts, #: Secure Boot first examines software such as firmware and OS by #: their signature and only allows them to run if the signatures are valid. - needs_secure_boot = resource2.Body('os_secure_boot') + needs_secure_boot = resource.Body('os_secure_boot') + #: Time for graceful shutdown + os_shutdown_timeout = resource.Body('os_shutdown_timeout', type=int) #: The ID of image stored in the Image service that should be used as #: the ramdisk when booting an AMI-style image. - ramdisk_id = resource2.Body('ramdisk_id') + ramdisk_id = resource.Body('ramdisk_id') #: The virtual machine mode. This represents the host/guest ABI #: (application binary interface) used for the virtual machine. - vm_mode = resource2.Body('vm_mode') + vm_mode = resource.Body('vm_mode') #: The preferred number of sockets to expose to the guest. - hw_cpu_sockets = resource2.Body('hw_cpu_sockets', type=int) + hw_cpu_sockets = resource.Body('hw_cpu_sockets', type=int) #: The preferred number of cores to expose to the guest. - hw_cpu_cores = resource2.Body('hw_cpu_cores', type=int) + hw_cpu_cores = resource.Body('hw_cpu_cores', type=int) #: The preferred number of threads to expose to the guest. - hw_cpu_threads = resource2.Body('hw_cpu_threads', type=int) + hw_cpu_threads = resource.Body('hw_cpu_threads', type=int) #: Specifies the type of disk controller to attach disk devices to. #: One of scsi, virtio, uml, xen, ide, or usb. - hw_disk_bus = resource2.Body('hw_disk_bus') + hw_disk_bus = resource.Body('hw_disk_bus') + #: Used to pin the virtual CPUs (vCPUs) of instances to the + #: host's physical CPU cores (pCPUs). + hw_cpu_policy = resource.Body('hw_cpu_policy') + #: Defines how hardware CPU threads in a simultaneous + #: multithreading-based (SMT) architecture be used. + hw_cpu_thread_policy = resource.Body('hw_cpu_thread_policy') #: Adds a random-number generator device to the image's instances. - hw_rng_model = resource2.Body('hw_rng_model') + hw_rng_model = resource.Body('hw_rng_model') #: For libvirt: Enables booting an ARM system using the specified #: machine type. #: For Hyper-V: Specifies whether the Hyper-V instance will be a #: generation 1 or generation 2 VM. - hw_machine_type = resource2.Body('hw_machine_type') + hw_machine_type = resource.Body('hw_machine_type') #: Enables the use of VirtIO SCSI (virtio-scsi) to provide block device #: access for compute instances; by default, instances use VirtIO Block #: (virtio-blk). - hw_scsi_model = resource2.Body('hw_scsi_model') + hw_scsi_model = resource.Body('hw_scsi_model') #: Specifies the count of serial ports that should be provided. - hw_serial_port_count = resource2.Body('hw_serial_port_count', type=int) + hw_serial_port_count = resource.Body('hw_serial_port_count', type=int) #: The video image driver used. - hw_video_model = resource2.Body('hw_video_model') + hw_video_model = resource.Body('hw_video_model') #: Maximum RAM for the video image. - hw_video_ram = resource2.Body('hw_video_ram', type=int) + hw_video_ram = resource.Body('hw_video_ram', type=int) #: Enables a virtual hardware watchdog device that carries out the #: specified action if the server hangs. - hw_watchdog_action = resource2.Body('hw_watchdog_action') + hw_watchdog_action = resource.Body('hw_watchdog_action') #: The kernel command line to be used by the libvirt driver, instead #: of the default. - os_command_line = resource2.Body('os_command_line') + os_command_line = resource.Body('os_command_line') #: Specifies the model of virtual network interface device to use. - hw_vif_model = resource2.Body('hw_vif_model') + hw_vif_model = resource.Body('hw_vif_model') #: If true, this enables the virtio-net multiqueue feature. #: In this case, the driver sets the number of queues equal to the #: number of guest vCPUs. This makes the network performance scale #: across a number of vCPUs. - is_hw_vif_multiqueue_enabled = resource2.Body('hw_vif_multiqueue_enabled', - type=bool) + is_hw_vif_multiqueue_enabled = resource.Body( + 'hw_vif_multiqueue_enabled', + type=bool, + ) #: If true, enables the BIOS bootmenu. - is_hw_boot_menu_enabled = resource2.Body('hw_boot_menu', type=bool) + is_hw_boot_menu_enabled = resource.Body('hw_boot_menu', type=bool) #: The virtual SCSI or IDE controller used by the hypervisor. - vmware_adaptertype = resource2.Body('vmware_adaptertype') + vmware_adaptertype = resource.Body('vmware_adaptertype') #: A VMware GuestID which describes the operating system installed #: in the image. - vmware_ostype = resource2.Body('vmware_ostype') + vmware_ostype = resource.Body('vmware_ostype') #: If true, the root partition on the disk is automatically resized #: before the instance boots. - has_auto_disk_config = resource2.Body('auto_disk_config', type=bool) + has_auto_disk_config = resource.Body('auto_disk_config') #: The operating system installed on the image. - os_type = resource2.Body('os_type') + os_type = resource.Body('os_type') + #: The operating system admin username. + os_admin_user = resource.Body('os_admin_user') + #: A string boolean, which if "true", QEMU guest agent will be exposed + #: to the instance. + hw_qemu_guest_agent = resource.Body('hw_qemu_guest_agent', type=str) + #: If true, require quiesce on snapshot via QEMU guest agent. + os_require_quiesce = resource.Body('os_require_quiesce', type=bool) + #: The URL for the schema describing a virtual machine image. + schema = resource.Body('schema') def _action(self, session, action): """Call an action on an image ID.""" url = utils.urljoin(self.base_path, self.id, 'actions', action) - return session.post(url, endpoint_filter=self.service) + return session.post(url) def deactivate(self, session): """Deactivate an image @@ -229,49 +274,218 @@ def reactivate(self, session): """ self._action(session, "reactivate") - def add_tag(self, session, tag): - """Add a tag to an image""" - url = utils.urljoin(self.base_path, self.id, 'tags', tag) - session.put(url, endpoint_filter=self.service) - - def remove_tag(self, session, tag): - """Remove a tag from an image""" - url = utils.urljoin(self.base_path, self.id, 'tags', tag) - session.delete(url, endpoint_filter=self.service) + def upload(self, session, *, data=None): + """Upload data into an existing image - def upload(self, session): - """Upload data into an existing image""" - url = utils.urljoin(self.base_path, self.id, 'file') - session.put(url, endpoint_filter=self.service, data=self.data, - headers={"Content-Type": "application/octet-stream", - "Accept": ""}) - - def download(self, session): - """Download the data contained in an image""" - # TODO(briancurtin): This method should probably offload the get - # operation into another thread or something of that nature. + :param session: The session to use for making this request + :param data: Optional data to be uploaded. If not provided, the + `~Image.data` attribute will be used + :returns: The server response + """ + if data: + self.data = data url = utils.urljoin(self.base_path, self.id, 'file') - resp = session.get(url, endpoint_filter=self.service) - - # See the following bug report for details on why the checksum - # code may sometimes depend on a second GET call. - # https://bugs.launchpad.net/python-openstacksdk/+bug/1619675 - checksum = resp.headers.get("Content-MD5") - - if checksum is None: - # If we don't receive the Content-MD5 header with the download, - # make an additional call to get the image details and look at - # the checksum attribute. - details = self.get(session) - checksum = details.checksum - - if checksum is not None: - digest = hashlib.md5(resp.content).hexdigest() - if digest != checksum: - raise exceptions.InvalidResponse( - "checksum mismatch: %s != %s" % (checksum, digest)) + return session.put( + url, + data=self.data, + headers={"Content-Type": "application/octet-stream", "Accept": ""}, + ) + + def stage(self, session, *, data=None): + """Stage binary image data into an existing image + + :param session: The session to use for making this request + :param data: Optional data to be uploaded. If not provided, the + `~Image.data` attribute will be used + :returns: The server response + """ + if data: + self.data = data + + url = utils.urljoin(self.base_path, self.id, 'stage') + response = session.put( + url, + data=self.data, + headers={"Content-Type": "application/octet-stream", "Accept": ""}, + ) + self._translate_response(response, has_body=False) + return self + + def import_image( + self, + session, + method='glance-direct', + *, + uri=None, + remote_region=None, + remote_image_id=None, + remote_service_interface=None, + store=None, + stores=None, + all_stores=None, + all_stores_must_succeed=None, + ): + """Import Image via interoperable image import process""" + if all_stores and (store or stores): + raise exceptions.InvalidRequest( + 'all_stores is mutually exclusive with store and stores' + ) + if store and stores: + raise exceptions.InvalidRequest( + 'store and stores are mutually exclusive. stores should be ' + 'preferred.' + ) + if store: + stores = [store] + else: + stores = stores or [] + + url = utils.urljoin(self.base_path, self.id, 'import') + data: dict[str, ty.Any] = {'method': {'name': method}} + + if uri: + if method != 'web-download': + raise exceptions.InvalidRequest( + 'URI is only supported with method: "web-download"' + ) + data['method']['uri'] = uri + + if remote_region and remote_image_id: + if remote_service_interface: + data['method']['glance_service_interface'] = ( + remote_service_interface + ) + data['method']['glance_region'] = remote_region + data['method']['glance_image_id'] = remote_image_id + + if all_stores is not None: + data['all_stores'] = all_stores + if all_stores_must_succeed is not None: + data['all_stores_must_succeed'] = all_stores_must_succeed + if stores: + data['stores'] = [s.id for s in stores] + + headers = {} + # Backward compat + if store is not None: + headers = {'X-Image-Meta-Store': store.id} + + return session.post(url, json=data, headers=headers) + + def _consume_header_attrs(self, attrs): + self.image_import_methods = [] + _image_import_methods = attrs.pop('OpenStack-image-import-methods', '') + if _image_import_methods: + self.image_import_methods = _image_import_methods.split(',') + + return super()._consume_header_attrs(attrs) + + def _prepare_request( + self, + requires_id=None, + prepend_key=False, + patch=False, + base_path=None, + params=None, + **kwargs, + ): + request = super()._prepare_request( + requires_id=requires_id, + prepend_key=prepend_key, + patch=patch, + base_path=base_path, + params=params, + **kwargs, + ) + if patch: + headers = { + 'Content-Type': 'application/openstack-images-v2.1-json-patch', + 'Accept': '', + } + request.headers.update(headers) + + return request + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[True] = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[False], + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self: ... + + # excuse the duplication here: it's mypy's fault + # https://github.com/python/mypy/issues/14764 + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: + # Do a regular search first (ignoring missing) + result = super().find( + session, + name_or_id, + ignore_missing=True, + list_base_path=list_base_path, + microversion=microversion, + all_projects=all_projects, + **params, + ) + + if result: + return result else: - _logger.warn( - "Unable to verify the integrity of image %s" % (self.id)) + # Search also in hidden images + params['is_hidden'] = True + data = cls.list(session, **params) + + result = cls._get_one_match(name_or_id, data) + if result is not None: + return result - return resp.content + if ignore_missing: + return None + raise exceptions.NotFoundException( + f"No {cls.__name__} found for {name_or_id}" + ) diff --git a/openstack/image/v2/image_tasks.py b/openstack/image/v2/image_tasks.py new file mode 100644 index 0000000000..d38cda9b04 --- /dev/null +++ b/openstack/image/v2/image_tasks.py @@ -0,0 +1,57 @@ +# Copyright 2024 RedHat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ImageTasks(resource.Resource): + resources_key = 'tasks' + base_path = '/images/%(image_id)s/tasks' + + allow_list = True + + _max_microversion = '2.17' + + #: The type of task represented by this content + type = resource.Body('type') + #: The current status of this task. The value can be pending, processing, + #: success or failure + status = resource.Body('status') + #: An identifier for the owner of the task, usually the tenant ID + owner = resource.Body('owner') + #: The date and time when the task is subject to removal (ISO8601 format) + expires_at = resource.Body('expires_at') + #: The date and time when the task was created (ISO8601 format) + created_at = resource.Body('created_at') + #: The date and time when the task was updated (ISO8601 format) + updated_at = resource.Body('updated_at') + #: The date and time when the task was deleted (ISO8601 format) + deleted_at = resource.Body('deleted_at') + #: Whether the task was deleted + deleted = resource.Body('deleted') + #: The ID of the image associated to this task + image_id = resource.Body('image_id') + #: The request ID of the user message + request_id = resource.Body('request_id') + #: The user id associated with this task + user_id = resource.Body('user_id') + #: A JSON object specifying the input parameters to the task + input = resource.Body('input') + #: A JSON object specifying information about the ultimate outcome of the + #: task + result = resource.Body('result') + #: Human-readable text, possibly an empty string, usually displayed in a + #: error situation to provide more information about what has occurred + message = resource.Body('message') diff --git a/openstack/image/v2/member.py b/openstack/image/v2/member.py index 5548efd7c5..b9568545d5 100644 --- a/openstack/image/v2/member.py +++ b/openstack/image/v2/member.py @@ -10,19 +10,17 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.image import image_service -from openstack import resource2 +from openstack import resource -class Member(resource2.Resource): +class Member(resource.Resource): resources_key = 'members' base_path = '/images/%(image_id)s/members' - service = image_service.ImageService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True @@ -32,14 +30,14 @@ class Member(resource2.Resource): #: The ID of the image member. An image member is a tenant #: with whom the image is shared. - member_id = resource2.Body('member', alternate_id=True) + member_id = resource.Body('member', alternate_id=True) #: The date and time when the member was created. - created_at = resource2.Body('created_at') + created_at = resource.Body('created_at') #: Image ID stored through the image API. Typically a UUID. - image_id = resource2.URI('image_id') + image_id = resource.URI('image_id') #: The status of the image. - status = resource2.Body('status') + status = resource.Body('status') #: The URL for schema of the member. - schema = resource2.Body('schema') + schema = resource.Body('schema') #: The date and time when the member was updated. - updated_at = resource2.Body('updated_at') + updated_at = resource.Body('updated_at') diff --git a/openstack/image/v2/metadef_namespace.py b/openstack/image/v2/metadef_namespace.py new file mode 100644 index 0000000000..b7f3e50ac1 --- /dev/null +++ b/openstack/image/v2/metadef_namespace.py @@ -0,0 +1,151 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import typing_extensions as ty_ext + +from openstack.common import tag +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class MetadefNamespace(resource.Resource, tag.TagMixin): + resources_key = 'namespaces' + base_path = '/metadefs/namespaces' + + allow_create = True + allow_fetch = True + allow_commit = True + allow_list = True + allow_delete = True + + _query_mapping = resource.QueryParameters( + "limit", + "marker", + "resource_types", + "sort_dir", + "sort_key", + "visibility", + ) + + created_at = resource.Body('created_at') + description = resource.Body('description') + display_name = resource.Body('display_name') + is_protected = resource.Body('protected', type=bool) + namespace = resource.Body('namespace', alternate_id=True) + owner = resource.Body('owner') + resource_type_associations = resource.Body( + 'resource_type_associations', + type=list, + list_type=dict, + ) + updated_at = resource.Body('updated_at') + visibility = resource.Body('visibility') + + def _commit( + self, + session, + request, + method, + microversion, + has_body=True, + retry_on_conflict=None, + ): + # Rather annoyingly, Glance insists on us providing the 'namespace' + # argument, even if we're not changing it. We need to add this here + # since it won't be included if Resource.commit thinks its unchanged + # TODO(stephenfin): Eventually we could indicate attributes that are + # required in the body on update, like the 'requires_id' and + # 'create_requires_id' do for the ID in the URL + request.body['namespace'] = self.namespace + + return super()._commit( + session, + request, + method, + microversion, + has_body=True, + retry_on_conflict=None, + ) + + def _delete_all(self, session, url): + response = session.delete(url) + exceptions.raise_from_response(response) + self._translate_response(response, has_body=False) + return self + + def delete_all_properties(self, session): + """Delete all properties in a namespace. + + :param session: The session to use for making this request + :returns: The server response + """ + + url = utils.urljoin(self.base_path, self.id, 'properties') + return self._delete_all(session, url) + + def delete_all_objects(self, session): + """Delete all objects in a namespace. + + :param session: The session to use for making this request + :returns: The server response + """ + url = utils.urljoin(self.base_path, self.id, 'objects') + return self._delete_all(session, url) + + # NOTE(mrjoshi): This method is re-implemented as we require a ``POST`` + # call while the original method does a ``PUT`` call. + def add_tag(self, session: resource.AdapterT, tag: str) -> ty_ext.Self: + """Adds a single tag to the resource. + + :param session: The session to use for making this request. + :param tag: The tag as a string. + """ + url = utils.urljoin(self.base_path, self.id, 'tags', tag) + session = self._get_session(session) + response = session.post(url) + exceptions.raise_from_response(response) + # we do not want to update tags directly + tags = self.tags + tags.append(tag) + self._body.attributes.update({'tags': tags}) + return self + + # NOTE(mrjoshi): This method is re-implemented to add support for the + # 'append' option. This method uses a ``POST`` call rather than the + # standard ``PUT`` call. + def set_tags( + self, session: resource.AdapterT, tags: list[str], append: bool = False + ) -> ty_ext.Self: + """Sets/Replaces all tags on the resource. + + :param session: The session to use for making this request. + :param list tags: List with tags to be set on the resource + :param append: If set to true, adds new tags to existing tags, + else overwrites the existing tags with new ones. + """ + url = utils.urljoin(self.base_path, self.id, 'tags') + session = self._get_session(session) + + headers = {'X-OpenStack-Append': 'False'} + if append: + headers['X-Openstack-Append'] = 'True' + + response = session.post( + url, headers=headers, json={'tags': [{'name': x} for x in tags]} + ) + exceptions.raise_from_response(response) + + self._body.attributes.update({'tags': tags}) + + return self diff --git a/openstack/image/v2/metadef_object.py b/openstack/image/v2/metadef_object.py new file mode 100644 index 0000000000..fede230ee6 --- /dev/null +++ b/openstack/image/v2/metadef_object.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class MetadefObject(resource.Resource): + resources_key = 'objects' + base_path = '/metadefs/namespaces/%(namespace_name)s/objects' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + "visibility", + "resource_types", + "sort_key", + "sort_dir", + ) + + created_at = resource.Body('created_at') + description = resource.Body('description') + name = resource.Body('name', alternate_id=True) + namespace_name = resource.URI('namespace_name') + properties = resource.Body('properties') + required = resource.Body('required') + updated_at = resource.Body('updated_at') diff --git a/openstack/image/v2/metadef_property.py b/openstack/image/v2/metadef_property.py new file mode 100644 index 0000000000..20dcc808c7 --- /dev/null +++ b/openstack/image/v2/metadef_property.py @@ -0,0 +1,182 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import fields +from openstack import resource + + +class MetadefProperty(resource.Resource): + base_path = '/metadefs/namespaces/%(namespace_name)s/properties' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + #: An identifier (a name) for the namespace. + namespace_name = resource.URI('namespace_name') + #: The name of the property + name = resource.Body('name', alternate_id=True) + #: The property type. + type = resource.Body('type') + #: The title of the property. + title = resource.Body('title') + #: Detailed description of the property. + description = resource.Body('description') + #: A list of operator + operators = resource.Body('operators', type=list) + #: Default property description. + default = resource.Body('default') + #: Indicates whether this is a read-only property. + is_readonly = resource.Body('readonly', type=bool) + #: Minimum allowed numerical value. + minimum = resource.Body('minimum', type=int) + #: Maximum allowed numerical value. + maximum = resource.Body('maximum', type=int) + #: Enumerated list of property values. + enum = resource.Body('enum', type=list) + #: A regular expression + #: (`ECMA 262 `_) + #: that a string value must match. + pattern = resource.Body('pattern') + #: Minimum allowed string length. + min_length = resource.Body('minLength', type=int, default=0) + #: Maximum allowed string length. + max_length = resource.Body('maxLength', type=int) + # FIXME(stephenfin): This is causing conflicts due to the 'dict.items' + # method. Perhaps we need to rename it? + #: Schema for the items in an array. + items = resource.Body('items', type=dict) + #: Indicates whether all values in the array must be distinct. + require_unique_items = resource.Body( + 'uniqueItems', type=bool, default=False + ) + #: Minimum length of an array. + min_items = resource.Body('minItems', type=int, default=0) + #: Maximum length of an array. + max_items = resource.Body('maxItems', type=int) + #: Describes extra items, if you use tuple typing. If the value of + #: ``items`` is an array (tuple typing) and the instance is longer than + #: the list of schemas in ``items``, the additional items are described by + #: the schema in this property. If this value is ``false``, the instance + #: cannot be longer than the list of schemas in ``items``. If this value + #: is ``true``, that is equivalent to the empty schema (anything goes). + allow_additional_items = resource.Body('additionalItems', type=bool) + + # TODO(stephenfin): It would be nicer if we could do this in Resource + # itself since the logic is also found elsewhere (e.g. + # openstack.identity.v2.extension.Extension) but that code is a bit of a + # rat's nest right now and needs a spring clean + @classmethod + def list( + cls, + session, + paginated=True, + base_path=None, + allow_unknown_params=False, + *, + microversion=None, + **params, + ): + """This method is a generator which yields resource objects. + + A re-implementation of :meth:`~openstack.resource.Resource.list` that + handles glance's single, unpaginated list implementation. + + Refer to :meth:`~openstack.resource.Resource.list` for full + documentation including parameter, exception and return type + documentation. + """ + session = cls._get_session(session) + + if microversion is None: + microversion = cls._get_microversion(session) + + if base_path is None: + base_path = cls.base_path + + # There is no server-side filtering, only client-side + client_filters = {} + # Gather query parameters which are not supported by the server + for k, v in params.items(): + if ( + # Known attr + hasattr(cls, k) + # Is real attr property + and isinstance(getattr(cls, k), fields.Body) + # not included in the query_params + and k not in cls._query_mapping._mapping.keys() + ): + client_filters[k] = v + + uri = base_path % params + uri_params = {} + + for k, v in params.items(): + # We need to gather URI parts to set them on the resource later + if hasattr(cls, k) and isinstance(getattr(cls, k), fields.URI): + uri_params[k] = v + + def _dict_filter(f, d): + """Dict param based filtering""" + if not d: + return False + for key in f.keys(): + if isinstance(f[key], dict): + if not _dict_filter(f[key], d.get(key, None)): + return False + elif d.get(key, None) != f[key]: + return False + return True + + response = session.get( + uri, + headers={"Accept": "application/json"}, + params={}, + microversion=microversion, + ) + exceptions.raise_from_response(response) + data = response.json() + + for name, property_data in data['properties'].items(): + property = { + 'name': name, + **property_data, + **uri_params, + } + value = cls.existing( + microversion=microversion, + connection=session._get_connection(), + **property, + ) + + filters_matched = True + # Iterate over client filters and return only if matching + for key in client_filters.keys(): + if isinstance(client_filters[key], dict): + if not _dict_filter( + client_filters[key], + value.get(key, None), + ): + filters_matched = False + break + elif value.get(key, None) != client_filters[key]: + filters_matched = False + break + + if filters_matched: + yield value + + return None diff --git a/openstack/image/v2/metadef_resource_type.py b/openstack/image/v2/metadef_resource_type.py new file mode 100644 index 0000000000..4e28b04b55 --- /dev/null +++ b/openstack/image/v2/metadef_resource_type.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class MetadefResourceType(resource.Resource): + resources_key = 'resource_types' + base_path = '/metadefs/resource_types' + + # capabilities + allow_list = True + + #: The name of metadata definition resource type + name = resource.Body('name', alternate_id=True) + #: The date and time when the resource type was created. + created_at = resource.Body('created_at') + #: The date and time when the resource type was updated. + updated_at = resource.Body('updated_at') + + +class MetadefResourceTypeAssociation(resource.Resource): + resources_key = 'resource_type_associations' + base_path = '/metadefs/namespaces/%(namespace_name)s/resource_types' + + # capabilities + allow_create = True + allow_delete = True + allow_list = True + + #: The name of the namespace whose details you want to see. + namespace_name = resource.URI('namespace_name') + #: The name of metadata definition resource type + name = resource.Body('name', alternate_id=True) + #: The date and time when the resource type was created. + created_at = resource.Body('created_at') + #: The date and time when the resource type was updated. + updated_at = resource.Body('updated_at') + #: Prefix for any properties in the namespace that you want to apply + #: to the resource type. If you specify a prefix, you must append + #: a prefix separator, such as the colon (:) character. + prefix = resource.Body('prefix') + #: Some resource types allow more than one key and value pair + #: for each instance. For example, the Image service allows + #: both user and image metadata on volumes. The properties_target parameter + #: enables a namespace target to remove the ambiguity + properties_target = resource.Body('properties_target') diff --git a/openstack/image/v2/metadef_schema.py b/openstack/image/v2/metadef_schema.py new file mode 100644 index 0000000000..b5e4fee689 --- /dev/null +++ b/openstack/image/v2/metadef_schema.py @@ -0,0 +1,29 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class MetadefSchema(resource.Resource): + base_path = '/schemas/metadefs' + + # capabilities + allow_fetch = True + + #: A boolean value that indicates allows users to add custom properties. + additional_properties = resource.Body('additionalProperties', type=bool) + #: A set of definitions. + definitions = resource.Body('definitions', type=dict) + #: A list of required resources. + required = resource.Body('required', type=list) + #: Schema properties. + properties = resource.Body('properties', type=dict) diff --git a/openstack/image/v2/schema.py b/openstack/image/v2/schema.py new file mode 100644 index 0000000000..f67e004371 --- /dev/null +++ b/openstack/image/v2/schema.py @@ -0,0 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Schema(resource.Resource): + base_path = '/schemas' + + # capabilities + allow_fetch = True + + #: Additional properties + additional_properties = resource.Body('additionalProperties', type=dict) + #: Schema properties + properties = resource.Body('properties', type=dict) diff --git a/openstack/image/v2/service_info.py b/openstack/image/v2/service_info.py new file mode 100644 index 0000000000..24f82c777b --- /dev/null +++ b/openstack/image/v2/service_info.py @@ -0,0 +1,65 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Import(resource.Resource): + base_path = '/info/import' + + # capabilities + allow_fetch = True + + #: import methods + import_methods = resource.Body('import-methods', type=dict) + + +class Store(resource.Resource): + resources_key = 'stores' + base_path = '/info/stores' + + # capabilities + allow_list = True + + #: Description of the store + description = resource.Body('description') + #: default + is_default = resource.Body('default', type=bool) + #: properties + properties = resource.Body('properties', type=dict) + + def delete_image(self, session, image, *, ignore_missing=False): + """Delete image from store + + :param session: The session to use for making this request. + :param image: The value can be either the ID of an image or a + :class:`~openstack.image.v2.image.Image` instance. + + :returns: The result of the ``delete`` if resource found, else None. + :raises: :class:`~openstack.exceptions.NotFoundException` when + ignore_missing if ``False`` and a nonexistent resource + is attempted to be deleted. + """ + image_id = resource.Resource._get_id(image) + url = utils.urljoin('/stores', self.id, image_id) + + try: + response = session.delete(url) + exceptions.raise_from_response(response) + except exceptions.NotFoundException: + if ignore_missing: + return None + raise + + return response diff --git a/openstack/image/v2/task.py b/openstack/image/v2/task.py new file mode 100644 index 0000000000..9c14b848d2 --- /dev/null +++ b/openstack/image/v2/task.py @@ -0,0 +1,50 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Task(resource.Resource): + resources_key = 'tasks' + base_path = '/tasks' + + # capabilities + allow_create = True + allow_fetch = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'type', 'status', 'sort_dir', 'sort_key' + ) + + #: The date and time when the task was created. + created_at = resource.Body('created_at') + #: The date and time when the task is subject to removal. + expires_at = resource.Body('expires_at') + #: A JSON object specifying the input parameters to the task. + input = resource.Body('input') + #: Human-readable text, possibly an empty string, usually displayed + #: in an error situation to provide more information about what + #: has occurred. + message = resource.Body('message') + #: The ID of the owner, or project, of the task. + owner_id = resource.Body('owner') + #: A JSON object specifying the outcome of the task. + result = resource.Body('result') + #: The URL for schema of the task. + schema = resource.Body('schema') + #: The status of the task. + status = resource.Body('status') + #: The type of task represented by this content. + type = resource.Body('type') + #: The date and time when the task was updated. + updated_at = resource.Body('updated_at') diff --git a/openstack/tests/unit/bare_metal/v1/__init__.py b/openstack/instance_ha/__init__.py similarity index 100% rename from openstack/tests/unit/bare_metal/v1/__init__.py rename to openstack/instance_ha/__init__.py diff --git a/openstack/instance_ha/instance_ha_service.py b/openstack/instance_ha/instance_ha_service.py new file mode 100644 index 0000000000..0d311fb187 --- /dev/null +++ b/openstack/instance_ha/instance_ha_service.py @@ -0,0 +1,24 @@ +# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.instance_ha.v1 import _proxy +from openstack import service_description + + +class InstanceHaService(service_description.ServiceDescription[_proxy.Proxy]): + """The HA service.""" + + supported_versions = { + '1': _proxy.Proxy, + } diff --git a/openstack/tests/unit/block_store/__init__.py b/openstack/instance_ha/v1/__init__.py similarity index 100% rename from openstack/tests/unit/block_store/__init__.py rename to openstack/instance_ha/v1/__init__.py diff --git a/openstack/instance_ha/v1/_proxy.py b/openstack/instance_ha/v1/_proxy.py new file mode 100644 index 0000000000..fd715262bf --- /dev/null +++ b/openstack/instance_ha/v1/_proxy.py @@ -0,0 +1,322 @@ +# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import typing as ty + +from openstack import exceptions +from openstack.instance_ha.v1 import host as _host +from openstack.instance_ha.v1 import notification as _notification +from openstack.instance_ha.v1 import segment as _segment +from openstack.instance_ha.v1 import vmove as _vmove +from openstack import proxy +from openstack import resource + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['1']] = '1' + + _resource_registry = { + "host": _host.Host, + "notification": _notification.Notification, + "segment": _segment.Segment, + "vmove": _vmove.VMove, + } + + def notifications(self, **query): + """Return a generator of notifications. + + :param kwargs query: Optional query parameters to be sent to + limit the notifications being returned. + :returns: A generator of notifications + """ + return self._list(_notification.Notification, **query) + + def get_notification(self, notification): + """Get a single notification. + + :param notification: The value can be the ID of a notification or a + :class:`~masakariclient.sdk.ha.v1.notification.Notification` + instance. + :returns: One + :class:`~masakariclient.sdk.ha.v1.notification.Notification` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_notification.Notification, notification) + + def create_notification(self, **attrs): + """Create a new notification. + + :param dict attrs: Keyword arguments which will be used to create + a :class:`masakariclient.sdk.ha.v1.notification.Notification`, + comprised of the propoerties on the Notification class. + :returns: The result of notification creation + :rtype: :class:`masakariclient.sdk.ha.v1.notification.Notification` + """ + return self._create(_notification.Notification, **attrs) + + def segments(self, **query): + """Return a generator of segments. + + :param kwargs query: Optional query parameters to be sent to + limit the segments being returned. + :returns: A generator of segments + """ + return self._list(_segment.Segment, **query) + + def get_segment(self, segment): + """Get a single segment. + + :param segment: The value can be the ID of a segment or a + :class:`~masakariclient.sdk.ha.v1.segment.Segment` instance. + :returns: One :class:`~masakariclient.sdk.ha.v1.segment.Segment` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_segment.Segment, segment) + + def create_segment(self, **attrs): + """Create a new segment. + + :param dict attrs: Keyword arguments which will be used to create + a :class:`masakariclient.sdk.ha.v1.segment.Segment`, + comprised of the propoerties on the Segment class. + :returns: The result of segment creation + :rtype: :class:`masakariclient.sdk.ha.v1.segment.Segment` + """ + return self._create(_segment.Segment, **attrs) + + def update_segment(self, segment, **attrs): + """Update a segment. + + :param segment: The value can be the ID of a segment or a + :class:`~masakariclient.sdk.ha.v1.segment.Segment` instance. + :param dict attrs: Keyword arguments which will be used to update + a :class:`masakariclient.sdk.ha.v1.segment.Segment`, + comprised of the propoerties on the Segment class. + :returns: The updated segment. + :rtype: :class:`masakariclient.sdk.ha.v1.segment.Segment` + """ + return self._update(_segment.Segment, segment, **attrs) + + def delete_segment(self, segment, ignore_missing=True): + """Delete a segment. + + :param segment: + The value can be either the ID of a segment or a + :class:`~masakariclient.sdk.ha.v1.segment.Segment` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the segment does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent segment. + :returns: ``None`` + """ + return self._delete( + _segment.Segment, segment, ignore_missing=ignore_missing + ) + + def hosts(self, segment_id, **query): + """Return a generator of hosts. + + :param segment_id: The ID of a failover segment. + :param kwargs query: Optional query parameters to be sent to + limit the hosts being returned. + + :returns: A generator of hosts + """ + return self._list(_host.Host, segment_id=segment_id, **query) + + def create_host(self, segment_id, **attrs): + """Create a new host. + + :param segment_id: The ID of a failover segment. + :param dict attrs: Keyword arguments which will be used to create + a :class:`masakariclient.sdk.ha.v1.host.Host`, + comprised of the propoerties on the Host class. + + :returns: The results of host creation + """ + return self._create(_host.Host, segment_id=segment_id, **attrs) + + def get_host(self, host, segment_id=None): + """Get a single host. + + :param segment_id: The ID of a failover segment. + :param host: The value can be the ID of a host or a :class: + `~masakariclient.sdk.ha.v1.host.Host` instance. + + :returns: One :class:`~masakariclient.sdk.ha.v1.host.Host` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.InvalidRequest` + when segment_id is None. + """ + if segment_id is None: + raise exceptions.InvalidRequest("'segment_id' must be specified.") + + host_id = resource.Resource._get_id(host) + return self._get(_host.Host, host_id, segment_id=segment_id) + + def update_host(self, host, segment_id, **attrs): + """Update the host. + + :param segment_id: The ID of a failover segment. + :param host: The value can be the ID of a host or a :class: + `~masakariclient.sdk.ha.v1.host.Host` instance. + :param dict attrs: The attributes to update on the host represented. + + :returns: The updated host + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.InvalidRequest` + when segment_id is None. + """ + host_id = resource.Resource._get_id(host) + return self._update( + _host.Host, host_id, segment_id=segment_id, **attrs + ) + + def delete_host(self, host, segment_id=None, ignore_missing=True): + """Delete the host. + + :param segment_id: The ID of a failover segment. + :param host: The value can be the ID of a host or a :class: + `~masakariclient.sdk.ha.v1.host.Host` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the host does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent host. + + :returns: ``None`` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.InvalidRequest` + when segment_id is None. + + """ + if segment_id is None: + raise exceptions.InvalidRequest("'segment_id' must be specified.") + + host_id = resource.Resource._get_id(host) + return self._delete( + _host.Host, + host_id, + segment_id=segment_id, + ignore_missing=ignore_missing, + ) + + def vmoves(self, notification, **query): + """Return a generator of vmoves. + + :param notification: The value can be the UUID of a notification or + a :class: `~masakariclient.sdk.ha.v1.notification.Notification` + instance. + :param kwargs query: Optional query parameters to be sent to + limit the vmoves being returned. + + :returns: A generator of vmoves + """ + notification_id = resource.Resource._get_id(notification) + return self._list( + _vmove.VMove, + notification_id=notification_id, + **query, + ) + + def get_vmove(self, vmove, notification): + """Get a single vmove. + + :param vmove: The value can be the UUID of one vmove or + a :class: `~masakariclient.sdk.ha.v1.vmove.VMove` instance. + :param notification: The value can be the UUID of a notification or + a :class: `~masakariclient.sdk.ha.v1.notification.Notification` + instance. + :returns: one 'VMove' resource class. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :raises: :class:`~openstack.exceptions.InvalidRequest` + when notification_id is None. + """ + notification_id = resource.Resource._get_id(notification) + vmove_id = resource.Resource._get_id(vmove) + return self._get( + _vmove.VMove, + vmove_id, + notification_id=notification_id, + ) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/instance_ha/v1/host.py b/openstack/instance_ha/v1/host.py new file mode 100644 index 0000000000..ff356797ae --- /dev/null +++ b/openstack/instance_ha/v1/host.py @@ -0,0 +1,63 @@ +# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack import resource + + +class Host(resource.Resource): + resource_key = "host" + resources_key = "hosts" + base_path = "/segments/%(segment_id)s/hosts" + + # capabilities + # 1] GET /v1/segments//hosts + # 2] GET /v1/segments//hosts/ + # 3] POST /v1/segments//hosts + # 4] PUT /v1/segments//hosts + # 5] DELETE /v1/segments//hosts + allow_list = True + allow_fetch = True + allow_create = True + allow_commit = True + allow_delete = True + + #: A Uuid of representing this host + uuid = resource.Body("uuid") + #: A failover segment ID of this host(in URI) + segment_id = resource.URI("segment_id") + #: A created time of this host + created_at = resource.Body("created_at") + #: A latest updated time of this host + updated_at = resource.Body("updated_at") + #: A name of this host + name = resource.Body("name") + #: A type of this host + type = resource.Body("type") + #: A control attributes of this host + control_attributes = resource.Body("control_attributes") + #: A maintenance status of this host + on_maintenance = resource.Body("on_maintenance") + #: A reservation status of this host + reserved = resource.Body("reserved") + #: A failover segment ID of this host(in Body) + failover_segment_id = resource.Body("failover_segment_id") + + _query_mapping = resource.QueryParameters( + "sort_key", + "sort_dir", + failover_segment_id="failover_segment_id", + type="type", + on_maintenance="on_maintenance", + reserved="reserved", + ) diff --git a/openstack/instance_ha/v1/notification.py b/openstack/instance_ha/v1/notification.py new file mode 100644 index 0000000000..2ddccbf4b0 --- /dev/null +++ b/openstack/instance_ha/v1/notification.py @@ -0,0 +1,90 @@ +# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack import resource + + +class ProgressDetailsItem(resource.Resource): + #: The timestamp of recovery workflow task. + timestamp = resource.Body("timestamp") + #: The message of recovery workflow task. + message = resource.Body("message") + #: The progress of recovery workflow task. + progress = resource.Body("progress") + + +class RecoveryWorkflowDetailItem(resource.Resource): + #: The progress of recovery workflow. + progress = resource.Body("progress") + #: The name of recovery workflow. + name = resource.Body("name") + #: The state of recovery workflow. + state = resource.Body("state") + #: The progress details of this recovery workflow. + progress_details = resource.Body( + "progress_details", type=list, list_type=ProgressDetailsItem + ) + + +class Notification(resource.Resource): + resource_key = "notification" + resources_key = "notifications" + base_path = "/notifications" + + # capabilities + # 1] GET /v1/notifications + # 2] GET /v1/notifications/ + # 3] POST /v1/notifications + allow_list = True + allow_fetch = True + allow_create = True + allow_commit = False + allow_delete = False + + #: A ID of representing this notification. + id = resource.Body("id") + #: A Uuid of representing this notification. + notification_uuid = resource.Body("notification_uuid") + #: A created time of representing this notification. + created_at = resource.Body("created_at") + #: A latest updated time of representing this notification. + updated_at = resource.Body("updated_at") + #: The type of failure. Valuse values include ''COMPUTE_HOST'', + #: ''VM'', ''PROCESS'' + type = resource.Body("type") + #: The hostname of this notification. + hostname = resource.Body("hostname") + #: The status for this notitication. + status = resource.Body("status") + #: The generated_time for this notitication. + generated_time = resource.Body("generated_time") + #: The payload of this notification. + payload = resource.Body("payload") + #: The source host uuid of this notification. + source_host_uuid = resource.Body("source_host_uuid") + #: The recovery workflow details of this notification. + recovery_workflow_details = resource.Body( + "recovery_workflow_details", + type=list, + list_type=RecoveryWorkflowDetailItem, + ) + + _query_mapping = resource.QueryParameters( + "sort_key", + "sort_dir", + source_host_uuid="source_host_uuid", + type="type", + status="status", + generated_since="generated-since", + ) diff --git a/openstack/instance_ha/v1/segment.py b/openstack/instance_ha/v1/segment.py new file mode 100644 index 0000000000..814c832283 --- /dev/null +++ b/openstack/instance_ha/v1/segment.py @@ -0,0 +1,63 @@ +# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack import resource + + +class Segment(resource.Resource): + resource_key = "segment" + resources_key = "segments" + base_path = "/segments" + + # capabilities + # 1] GET /v1/segments + # 2] GET /v1/segments/ + # 3] POST /v1/segments + # 4] PUT /v1/segments/ + # 5] DELETE /v1/segments/ + allow_list = True + allow_fetch = True + allow_create = True + allow_commit = True + allow_delete = True + + # add enabled flag to segment in 1.2 + _max_microversion = '1.2' + + #: A ID of representing this segment. + id = resource.Body("id") + #: A Uuid of representing this segment. + uuid = resource.Body("uuid") + #: A created time of representing this segment. + created_at = resource.Body("created_at") + #: A latest updated time of representing this segment. + updated_at = resource.Body("updated_at") + #: The name of this segment. + name = resource.Body("name") + #: The description of this segment. + description = resource.Body("description") + #: The recovery method of this segment. + recovery_method = resource.Body("recovery_method") + #: The service type of this segment. + service_type = resource.Body("service_type") + #: The enabled flag of this segment. + is_enabled = resource.Body("enabled", type=bool) + + _query_mapping = resource.QueryParameters( + "sort_key", + "sort_dir", + recovery_method="recovery_method", + service_type="service_type", + is_enabled="enabled", + ) diff --git a/openstack/instance_ha/v1/vmove.py b/openstack/instance_ha/v1/vmove.py new file mode 100644 index 0000000000..e9cf9e360d --- /dev/null +++ b/openstack/instance_ha/v1/vmove.py @@ -0,0 +1,63 @@ +# Copyright(c) 2022 Inspur +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack import resource + + +class VMove(resource.Resource): + resource_key = "vmove" + resources_key = "vmoves" + base_path = "/notifications/%(notification_id)s/vmoves" + + # capabilities + # 1] GET /v1/notifications/{notification_uuid}/vmoves + # 2] GET /v1/notifications/{notification_uuid}/vmoves/{vmove_uuid} + allow_list = True + allow_fetch = True + + _query_mapping = resource.QueryParameters( + "sort_key", + "sort_dir", + "type", + "status", + ) + + #: A ID of representing this vmove + id = resource.Body("id") + #: A UUID of representing this vmove + uuid = resource.Body("uuid") + #: The notification UUID this vmove belongs to(in URI) + notification_id = resource.URI("notification_id") + #: A created time of this vmove + created_at = resource.Body("created_at") + #: A latest updated time of this vmove + updated_at = resource.Body("updated_at") + #: The instance uuid of this vmove + server_id = resource.Body("instance_uuid") + #: The instance name of this vmove + server_name = resource.Body("instance_name") + #: The source host of this vmove + source_host = resource.Body("source_host") + #: The dest host of this vmove + dest_host = resource.Body("dest_host") + #: A start time of this vmove + start_time = resource.Body("start_time") + #: A end time of this vmove + end_time = resource.Body("end_time") + #: The status of this vmove + status = resource.Body("status") + #: The type of this vmove + type = resource.Body("type") + #: The message of this vmove + message = resource.Body("message") diff --git a/openstack/key_manager/key_manager_service.py b/openstack/key_manager/key_manager_service.py index 36d3b413b3..d80951a03a 100644 --- a/openstack/key_manager/key_manager_service.py +++ b/openstack/key_manager/key_manager_service.py @@ -10,15 +10,13 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack import service_filter +from openstack.key_manager.v1 import _proxy +from openstack import service_description -class KeyManagerService(service_filter.ServiceFilter): +class KeyManagerService(service_description.ServiceDescription[_proxy.Proxy]): """The key manager service.""" - valid_versions = [service_filter.ValidVersion('v1')] - - def __init__(self, version=None): - """Create a key manager service.""" - super(KeyManagerService, self).__init__(service_type='key-manager', - version=version) + supported_versions = { + '1': _proxy.Proxy, + } diff --git a/openstack/key_manager/v1/_format.py b/openstack/key_manager/v1/_format.py index 34698a389c..58a72d8938 100644 --- a/openstack/key_manager/v1/_format.py +++ b/openstack/key_manager/v1/_format.py @@ -10,30 +10,21 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack import format - -from six.moves.urllib import parse +from urllib import parse +from openstack import format -class HREFToUUID(format.Formatter): +class HREFToUUID(format.Formatter[str]): @classmethod - def deserialize(cls, value): + def deserialize(cls, value: str) -> str: """Convert a HREF to the UUID portion""" parts = parse.urlsplit(value) # Only try to proceed if we have an actual URI. # Just check that we have a scheme, netloc, and path. if not all(parts[:3]): - raise ValueError("Unable to convert %s to an ID" % value) + raise ValueError(f"Unable to convert {value} to an ID") # The UUID will be the last portion of the URI. return parts.path.split("/")[-1] - - @classmethod - def serialize(cls, value): - # NOTE(briancurtin): If we had access to the session to get - # the endpoint we could do something smart here like take an ID - # and give back an HREF, but this will just have to be something - # that works different because Barbican does what it does... - return value diff --git a/openstack/key_manager/v1/_proxy.py b/openstack/key_manager/v1/_proxy.py index 0c65d26292..e5d161ffb3 100644 --- a/openstack/key_manager/v1/_proxy.py +++ b/openstack/key_manager/v1/_proxy.py @@ -10,20 +10,34 @@ # License for the specific language governing permissions and limitations # under the License. +import typing as ty + from openstack.key_manager.v1 import container as _container from openstack.key_manager.v1 import order as _order +from openstack.key_manager.v1 import project_quota as _project_quota from openstack.key_manager.v1 import secret as _secret -from openstack import proxy2 +from openstack.key_manager.v1 import secret_store as _secret_store +from openstack import proxy +from openstack import resource + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['1']] = '1' -class Proxy(proxy2.BaseProxy): + _resource_registry = { + "container": _container.Container, + "order": _order.Order, + "project_quota": _project_quota.ProjectQuota, + "secret": _secret.Secret, + "secret_store": _secret_store.SecretStore, + } def create_container(self, **attrs): """Create a new container from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.key_manager.v1.container.Container`, - comprised of the properties on the Container class. + a :class:`~openstack.key_manager.v1.container.Container`, + comprised of the properties on the Container class. :returns: The results of container creation :rtype: :class:`~openstack.key_manager.v1.container.Container` @@ -34,66 +48,67 @@ def delete_container(self, container, ignore_missing=True): """Delete a container :param container: The value can be either the ID of a container or a - :class:`~openstack.key_manager.v1.container.Container` - instance. + :class:`~openstack.key_manager.v1.container.Container` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the container does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent container. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the container does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent container. :returns: ``None`` """ - self._delete(_container.Container, container, - ignore_missing=ignore_missing) + self._delete( + _container.Container, container, ignore_missing=ignore_missing + ) def find_container(self, name_or_id, ignore_missing=True): """Find a single container :param name_or_id: The name or ID of a container. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.key_manager.v1.container.Container` - or None + or None """ - return self._find(_container.Container, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _container.Container, name_or_id, ignore_missing=ignore_missing + ) def get_container(self, container): """Get a single container :param container: The value can be the ID of a container or a - :class:`~openstack.key_manager.v1.container.Container` - instance. + :class:`~openstack.key_manager.v1.container.Container` + instance. :returns: One :class:`~openstack.key_manager.v1.container.Container` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_container.Container, container) def containers(self, **query): """Return a generator of containers - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of container objects :rtype: :class:`~openstack.key_manager.v1.container.Container` """ - return self._list(_container.Container, paginated=False, **query) + return self._list(_container.Container, **query) def update_container(self, container, **attrs): """Update a container :param container: Either the id of a container or a - :class:`~openstack.key_manager.v1.container.Container` - instance. - :attrs kwargs: The attributes to update on the container represented - by ``value``. + :class:`~openstack.key_manager.v1.container.Container` instance. + :param attrs: The attributes to update on the container represented + by ``container``. :returns: The updated container :rtype: :class:`~openstack.key_manager.v1.container.Container` @@ -104,8 +119,8 @@ def create_order(self, **attrs): """Create a new order from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.key_manager.v1.order.Order`, - comprised of the properties on the Order class. + a :class:`~openstack.key_manager.v1.order.Order`, + comprised of the properties on the Order class. :returns: The results of order creation :rtype: :class:`~openstack.key_manager.v1.order.Order` @@ -116,13 +131,13 @@ def delete_order(self, order, ignore_missing=True): """Delete an order :param order: The value can be either the ID of a order or a - :class:`~openstack.key_manager.v1.order.Order` - instance. + :class:`~openstack.key_manager.v1.order.Order` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the order does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent order. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the order does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent order. :returns: ``None`` """ @@ -133,47 +148,47 @@ def find_order(self, name_or_id, ignore_missing=True): :param name_or_id: The name or ID of a order. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.key_manager.v1.order.Order` or None """ - return self._find(_order.Order, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _order.Order, name_or_id, ignore_missing=ignore_missing + ) def get_order(self, order): """Get a single order :param order: The value can be the ID of an order or a - :class:`~openstack.key_manager.v1.order.Order` - instance. + :class:`~openstack.key_manager.v1.order.Order` + instance. :returns: One :class:`~openstack.key_manager.v1.order.Order` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_order.Order, order) def orders(self, **query): """Return a generator of orders - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of order objects :rtype: :class:`~openstack.key_manager.v1.order.Order` """ - return self._list(_order.Order, paginated=False, **query) + return self._list(_order.Order, **query) def update_order(self, order, **attrs): """Update a order :param order: Either the id of a order or a - :class:`~openstack.key_manager.v1.order.Order` - instance. - :attrs kwargs: The attributes to update on the order represented - by ``value``. + :class:`~openstack.key_manager.v1.order.Order` instance. + :param attrs: The attributes to update on the order represented + by ``order``. :returns: The updated order :rtype: :class:`~openstack.key_manager.v1.order.Order` @@ -184,8 +199,8 @@ def create_secret(self, **attrs): """Create a new secret from attributes :param dict attrs: Keyword arguments which will be used to create a - :class:`~openstack.key_manager.v1.secret.Secret`, - comprised of the properties on the Order class. + :class:`~openstack.key_manager.v1.secret.Secret`, + comprised of the properties on the Order class. :returns: The results of secret creation :rtype: :class:`~openstack.key_manager.v1.secret.Secret` @@ -196,13 +211,13 @@ def delete_secret(self, secret, ignore_missing=True): """Delete a secret :param secret: The value can be either the ID of a secret or a - :class:`~openstack.key_manager.v1.secret.Secret` - instance. + :class:`~openstack.key_manager.v1.secret.Secret` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the secret does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent secret. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the secret does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent secret. :returns: ``None`` """ @@ -213,50 +228,198 @@ def find_secret(self, name_or_id, ignore_missing=True): :param name_or_id: The name or ID of a secret. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.key_manager.v1.secret.Secret` or - None + None """ - return self._find(_secret.Secret, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _secret.Secret, name_or_id, ignore_missing=ignore_missing + ) def get_secret(self, secret): """Get a single secret :param secret: The value can be the ID of a secret or a - :class:`~openstack.key_manager.v1.secret.Secret` - instance. + :class:`~openstack.key_manager.v1.secret.Secret` + instance. :returns: One :class:`~openstack.key_manager.v1.secret.Secret` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_secret.Secret, secret) def secrets(self, **query): """Return a generator of secrets - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of secret objects :rtype: :class:`~openstack.key_manager.v1.secret.Secret` """ - return self._list(_secret.Secret, paginated=False, **query) + return self._list(_secret.Secret, **query) def update_secret(self, secret, **attrs): """Update a secret :param secret: Either the id of a secret or a - :class:`~openstack.key_manager.v1.secret.Secret` - instance. - :attrs kwargs: The attributes to update on the secret represented - by ``value``. + :class:`~openstack.key_manager.v1.secret.Secret` instance. + :param attrs: The attributes to update on the secret represented + by ``secret``. :returns: The updated secret :rtype: :class:`~openstack.key_manager.v1.secret.Secret` """ return self._update(_secret.Secret, secret, **attrs) + + # ========== Secret Store Operations ========== + + def secret_stores(self, **query): + """Return a generator of secret stores + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of secret store objects + :rtype: :class:`~openstack.key_manager.v1.secret_store.SecretStore` + """ + return self._list(_secret_store.SecretStore, **query) + + def get_global_default_secret_store(self): + """Get the global default secret store + + :returns: One + :class:`~openstack.key_manager.v1.secret_store.SecretStore` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _secret_store.SecretStore, + None, + requires_id=False, + base_path='/secret-stores/global-default', + ) + + def get_preferred_secret_store(self): + """Get the preferred secret store for the current project + + :returns: One + :class:`~openstack.key_manager.v1.secret_store.SecretStore` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _secret_store.SecretStore, + None, + requires_id=False, + base_path='/secret-stores/preferred', + ) + + def delete_project_quota(self, project_id, ignore_missing=True): + """Delete a project quota + + :param project_id: A project ID. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the project quota does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent project quota. + + :returns: ``None`` + """ + self._delete( + _project_quota.ProjectQuota, + project_id, + ignore_missing=ignore_missing, + ) + + def get_project_quota(self, project_id): + """Get a single project quota + + :param project_id: A project ID. + + :returns: One + :class:`~openstack.key_manager.v1.project_quota.ProjectQuota` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_project_quota.ProjectQuota, project_id) + + def update_project_quota(self, project_id, **attrs): + """Update a project quota + + :param project_id: A project ID. + :param attrs: The attributes to update on the project quota represented + by ``project quota``. + + :returns: The updated project quota + :rtype: :class:`~openstack.key_manager.v1.project_quota.ProjectQuota` + """ + return self._update(_project_quota.ProjectQuota, project_id, **attrs) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/key_manager/v1/container.py b/openstack/key_manager/v1/container.py index dbfcdfa567..eebe5d0383 100644 --- a/openstack/key_manager/v1/container.py +++ b/openstack/key_manager/v1/container.py @@ -10,40 +10,39 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.key_manager import key_manager_service from openstack.key_manager.v1 import _format -from openstack import resource2 +from openstack import resource -class Container(resource2.Resource): +class Container(resource.Resource): resources_key = 'containers' base_path = '/containers' - service = key_manager_service.KeyManagerService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True # Properties #: A URI for this container - container_ref = resource2.Body('container_ref') + container_ref = resource.Body('container_ref') #: The ID for this container - container_id = resource2.Body('container_ref', alternate_id=True, - type=_format.HREFToUUID) + container_id = resource.Body( + 'container_ref', alternate_id=True, type=_format.HREFToUUID + ) #: The timestamp when this container was created. - created_at = resource2.Body('created') + created_at = resource.Body('created') #: The name of this container - name = resource2.Body('name') + name = resource.Body('name') #: A list of references to secrets in this container - secret_refs = resource2.Body('secret_refs', type=list) + secret_refs = resource.Body('secret_refs', type=list) #: The status of this container - status = resource2.Body('status') + status = resource.Body('status') #: The type of this container - type = resource2.Body('type') + type = resource.Body('type') #: The timestamp when this container was updated. - updated_at = resource2.Body('updated') + updated_at = resource.Body('updated') #: A party interested in this container. - consumers = resource2.Body('consumers', type=list) + consumers = resource.Body('consumers', type=list) diff --git a/openstack/key_manager/v1/order.py b/openstack/key_manager/v1/order.py index b7bed06548..350677f7f9 100644 --- a/openstack/key_manager/v1/order.py +++ b/openstack/key_manager/v1/order.py @@ -10,46 +10,45 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.key_manager import key_manager_service from openstack.key_manager.v1 import _format -from openstack import resource2 +from openstack import resource -class Order(resource2.Resource): +class Order(resource.Resource): resources_key = 'orders' base_path = '/orders' - service = key_manager_service.KeyManagerService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True #: Timestamp in ISO8601 format of when the order was created - created_at = resource2.Body('created') + created_at = resource.Body('created') #: Keystone Id of the user who created the order - creator_id = resource2.Body('creator_id') + creator_id = resource.Body('creator_id') #: A dictionary containing key-value parameters which specify the #: details of an order request - meta = resource2.Body('meta', type=dict) + meta = resource.Body('meta', type=dict) #: A URI for this order - order_ref = resource2.Body('order_ref') + order_ref = resource.Body('order_ref') #: The ID of this order - order_id = resource2.Body('order_ref', alternate_id=True, - type=_format.HREFToUUID) + order_id = resource.Body( + 'order_ref', alternate_id=True, type=_format.HREFToUUID + ) #: Secret href associated with the order - secret_ref = resource2.Body('secret_ref') + secret_ref = resource.Body('secret_ref') #: Secret ID associated with the order - secret_id = resource2.Body('secret_ref', type=_format.HREFToUUID) + secret_id = resource.Body('secret_ref', type=_format.HREFToUUID) # The status of this order - status = resource2.Body('status') + status = resource.Body('status') #: Metadata associated with the order - sub_status = resource2.Body('sub_status') + sub_status = resource.Body('sub_status') #: Metadata associated with the order - sub_status_message = resource2.Body('sub_status_message') + sub_status_message = resource.Body('sub_status_message') # The type of order - type = resource2.Body('type') + type = resource.Body('type') #: Timestamp in ISO8601 format of the last time the order was updated. - updated_at = resource2.Body('updated') + updated_at = resource.Body('updated') diff --git a/openstack/key_manager/v1/project_quota.py b/openstack/key_manager/v1/project_quota.py new file mode 100644 index 0000000000..254b48bace --- /dev/null +++ b/openstack/key_manager/v1/project_quota.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ProjectQuota(resource.Resource): + resource_key = 'project_quotas' + resources_key = 'project_quotas' + base_path = '/project-quotas' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Properties + #: Contains the configured quota value of the requested project for the + #: secret resource. + secrets = resource.Body("secrets") + #: Contains the configured quota value of the requested project for the + #: orders resource. + orders = resource.Body("orders") + #: Contains the configured quota value of the requested project for the + #: containers resource. + containers = resource.Body("containers") + #: Contains the configured quota value of the requested project for the + #: consumers resource. + consumers = resource.Body("consumers") + #: Contains the configured quota value of the requested project for the CAs + #: resource. + cas = resource.Body("cas") diff --git a/openstack/key_manager/v1/secret.py b/openstack/key_manager/v1/secret.py index 603b09ef07..763fee3eb6 100644 --- a/openstack/key_manager/v1/secret.py +++ b/openstack/key_manager/v1/secret.py @@ -10,79 +10,94 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.key_manager import key_manager_service from openstack.key_manager.v1 import _format -from openstack import resource2 +from openstack import resource from openstack import utils -class Secret(resource2.Resource): +class Secret(resource.Resource): resources_key = 'secrets' base_path = '/secrets' - service = key_manager_service.KeyManagerService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True - _query_mapping = resource2.QueryParameters("name", "mode", "bits", - "secret_type", "acl_only", - "created", "updated", - "expiration", "sort", - algorithm="alg") + _query_mapping = resource.QueryParameters( + "name", + "mode", + "bits", + "secret_type", + "acl_only", + "created", + "updated", + "expiration", + "sort", + algorithm="alg", + ) # Properties #: Metadata provided by a user or system for informational purposes - algorithm = resource2.Body('algorithm') + algorithm = resource.Body('algorithm') #: Metadata provided by a user or system for informational purposes. #: Value must be greater than zero. - bit_length = resource2.Body('bit_length') + bit_length = resource.Body('bit_length') #: A list of content types - content_types = resource2.Body('content_types', type=dict) + content_types = resource.Body('content_types', type=dict) #: Once this timestamp has past, the secret will no longer be available. - expires_at = resource2.Body('expiration') + expires_at = resource.Body('expiration') #: Timestamp of when the secret was created. - created_at = resource2.Body('created') + created_at = resource.Body('created') #: Timestamp of when the secret was last updated. - updated_at = resource2.Body('updated') + updated_at = resource.Body('updated') #: The type/mode of the algorithm associated with the secret information. - mode = resource2.Body('mode') + mode = resource.Body('mode') #: The name of the secret set by the user - name = resource2.Body('name') + name = resource.Body('name') #: A URI to the sercret - secret_ref = resource2.Body('secret_ref') + secret_ref = resource.Body('secret_ref') #: The ID of the secret # NOTE: This is not really how alternate IDs are supposed to work and # ultimately means this has to work differently than all other services # in all of OpenStack because of the departure from using actual IDs # that even this service can't even use itself. - secret_id = resource2.Body('secret_ref', alternate_id=True, - type=_format.HREFToUUID) + secret_id = resource.Body( + 'secret_ref', alternate_id=True, type=_format.HREFToUUID + ) #: Used to indicate the type of secret being stored. - secret_type = resource2.Body('secret_type') + secret_type = resource.Body('secret_type') #: The status of this secret - status = resource2.Body('status') + status = resource.Body('status') #: A timestamp when this secret was updated. - updated_at = resource2.Body('updated') + updated_at = resource.Body('updated') #: The secret's data to be stored. payload_content_type must also #: be supplied if payload is included. (optional) - payload = resource2.Body('payload') + payload = resource.Body('payload') #: The media type for the content of the payload. #: (required if payload is included) - payload_content_type = resource2.Body('payload_content_type') + payload_content_type = resource.Body('payload_content_type') #: The encoding used for the payload to be able to include it in #: the JSON request. Currently only base64 is supported. #: (required if payload is encoded) - payload_content_encoding = resource2.Body('payload_content_encoding') + payload_content_encoding = resource.Body('payload_content_encoding') - def get(self, session, requires_id=True): - request = self._prepare_request(requires_id=requires_id) + def fetch( + self, + session, + requires_id=True, + base_path=None, + error_message=None, + skip_cache=False, + **kwargs, + ): + request = self._prepare_request( + requires_id=requires_id, base_path=base_path + ) - response = session.get(request.uri, - endpoint_filter=self.service).json() + response = session.get(request.url).json() content_type = None if self.payload_content_type is not None: @@ -93,14 +108,24 @@ def get(self, session, requires_id=True): # Only try to get the payload if a content type has been explicitly # specified or if one was found in the metadata response if content_type is not None: - payload = session.get(utils.urljoin(request.uri, "payload"), - endpoint_filter=self.service, - headers={"Accept": content_type}) - response["payload"] = payload.text + payload = session.get( + utils.urljoin(request.url, "payload"), + headers={"Accept": content_type}, + skip_cache=skip_cache, + ) + # NOTE(pas-ha): do not return payload.text here, + # as it will be decoded to whatever requests and chardet detected, + # and if they are wrong, there'd be no way of getting original + # bytes back and try to re-decode. + # At least this way, the encoding is always the same, + # so SDK user can always get original bytes back and fix things. + # Besides, this is exactly what python-barbicanclient does. + if content_type == "text/plain": + response["payload"] = payload.content.decode("UTF-8") + else: + response["payload"] = payload.content # We already have the JSON here so don't call into _translate_response - body = self._filter_component(response, self._body_mapping()) - self._body.attributes.update(body) - self._body.clean() + self._update_from_body_attrs(response) return self diff --git a/openstack/key_manager/v1/secret_store.py b/openstack/key_manager/v1/secret_store.py new file mode 100644 index 0000000000..f6798d96cb --- /dev/null +++ b/openstack/key_manager/v1/secret_store.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.key_manager.v1 import _format +from openstack import resource + + +class SecretStore(resource.Resource): + resources_key = 'secret_stores' + base_path = '/secret-stores' + + # capabilities + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = True + + _query_mapping = resource.QueryParameters( + "name", + "status", + "global_default", + "crypto_plugin", + "secret_store_plugin", + "created", + "updated", + ) + + # Properties + #: The name of the secret store + name = resource.Body('name') + #: The status of the secret store + status = resource.Body('status') + #: Timestamp of when the secret store was created + created_at = resource.Body('created') + #: Timestamp of when the secret store was last updated + updated_at = resource.Body('updated') + #: A URI to the secret store + secret_store_ref = resource.Body('secret_store_ref') + #: The ID of the secret store + secret_store_id = resource.Body( + 'secret_store_ref', alternate_id=True, type=_format.HREFToUUID + ) + #: Flag indicating if this secret store is global default + global_default = resource.Body('global_default', type=bool) + #: The crypto plugin name + crypto_plugin = resource.Body('crypto_plugin') + #: The secret store plugin name + secret_store_plugin = resource.Body('secret_store_plugin') diff --git a/openstack/tests/unit/block_store/v2/__init__.py b/openstack/load_balancer/__init__.py similarity index 100% rename from openstack/tests/unit/block_store/v2/__init__.py rename to openstack/load_balancer/__init__.py diff --git a/openstack/load_balancer/load_balancer_service.py b/openstack/load_balancer/load_balancer_service.py new file mode 100644 index 0000000000..b1b6c2dc93 --- /dev/null +++ b/openstack/load_balancer/load_balancer_service.py @@ -0,0 +1,24 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.load_balancer.v2 import _proxy +from openstack import service_description + + +class LoadBalancerService( + service_description.ServiceDescription[_proxy.Proxy] +): + """The load balancer service.""" + + supported_versions = { + '2': _proxy.Proxy, + } diff --git a/openstack/tests/unit/cluster/__init__.py b/openstack/load_balancer/v2/__init__.py similarity index 100% rename from openstack/tests/unit/cluster/__init__.py rename to openstack/load_balancer/v2/__init__.py diff --git a/openstack/load_balancer/v2/_proxy.py b/openstack/load_balancer/v2/_proxy.py new file mode 100644 index 0000000000..716971767d --- /dev/null +++ b/openstack/load_balancer/v2/_proxy.py @@ -0,0 +1,1350 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +from openstack.load_balancer.v2 import amphora as _amphora +from openstack.load_balancer.v2 import availability_zone as _availability_zone +from openstack.load_balancer.v2 import ( + availability_zone_profile as _availability_zone_profile, +) +from openstack.load_balancer.v2 import flavor as _flavor +from openstack.load_balancer.v2 import flavor_profile as _flavor_profile +from openstack.load_balancer.v2 import health_monitor as _hm +from openstack.load_balancer.v2 import l7_policy as _l7policy +from openstack.load_balancer.v2 import l7_rule as _l7rule +from openstack.load_balancer.v2 import listener as _listener +from openstack.load_balancer.v2 import load_balancer as _lb +from openstack.load_balancer.v2 import member as _member +from openstack.load_balancer.v2 import pool as _pool +from openstack.load_balancer.v2 import provider as _provider +from openstack.load_balancer.v2 import quota as _quota +from openstack import proxy +from openstack import resource + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['2']] = '2' + + _resource_registry = { + "amphora": _amphora.Amphora, + "availability_zone": _availability_zone.AvailabilityZone, + "availability_zone_profile": _availability_zone_profile.AvailabilityZoneProfile, # noqa: E501 + "flavor": _flavor.Flavor, + "flavor_profile": _flavor_profile.FlavorProfile, + "health_monitor": _hm.HealthMonitor, + "l7_policy": _l7policy.L7Policy, + "l7_rule": _l7rule.L7Rule, + "load_balancer": _lb.LoadBalancer, + "member": _member.Member, + "pool": _pool.Pool, + "provider": _provider.Provider, + "quota": _quota.Quota, + } + + def create_load_balancer(self, **attrs): + """Create a new load balancer from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer`, + comprised of the properties on the + LoadBalancer class. + + :returns: The results of load balancer creation + :rtype: :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` + """ + return self._create(_lb.LoadBalancer, **attrs) + + def get_load_balancer(self, *attrs): + """Get a load balancer + + :param load_balancer: The value can be the ID of a load balancer + or :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` + instance. + + :returns: One + :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` + """ + return self._get(_lb.LoadBalancer, *attrs) + + def get_load_balancer_statistics(self, load_balancer): + """Get the load balancer statistics + + :param load_balancer: The value can be the ID of a load balancer + or :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` + instance. + + :returns: One + :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancerStats` + """ + return self._get( + _lb.LoadBalancerStats, lb_id=load_balancer, requires_id=False + ) + + def load_balancers(self, **query): + """Retrieve a generator of load balancers + + :returns: A generator of load balancer instances + """ + return self._list(_lb.LoadBalancer, **query) + + def delete_load_balancer( + self, load_balancer, ignore_missing=True, cascade=False + ): + """Delete a load balancer + + :param load_balancer: The load_balancer can be either the ID or a + :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` + instance + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the load balancer does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent load + balancer. + :param bool cascade: If true will delete all child objects of + the load balancer. + + :returns: ``None`` + """ + load_balancer = self._get_resource(_lb.LoadBalancer, load_balancer) + load_balancer.cascade = cascade + return self._delete( + _lb.LoadBalancer, load_balancer, ignore_missing=ignore_missing + ) + + def find_load_balancer(self, name_or_id, ignore_missing=True): + """Find a single load balancer + + :param name_or_id: The name or ID of a load balancer + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the load balancer does not exist. + When set to ``True``, no exception will be set when attempting + to find a nonexistent load balancer. + + :returns: ``None`` + """ + return self._find( + _lb.LoadBalancer, name_or_id, ignore_missing=ignore_missing + ) + + def update_load_balancer(self, load_balancer, **attrs): + """Update a load balancer + + :param load_balancer: The load_balancer can be either the ID or a + :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` + instance + :param dict attrs: The attributes to update on the load balancer + represented by ``load_balancer``. + + :returns: The updated load_balancer + :rtype: :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` + """ + return self._update(_lb.LoadBalancer, load_balancer, **attrs) + + def wait_for_load_balancer( + self, + name_or_id, + status='ACTIVE', + failures=['ERROR'], + interval=2, + wait=300, + ): + """Wait for load balancer status + + :param name_or_id: The name or ID of the load balancer. + :param status: Desired status. + :param failures: Statuses that would be interpreted as failures. + Default to ['ERROR']. + :type failures: :py:class:`list` + :param interval: Number of seconds to wait between consecutive + checks. Defaults to 2. + :param wait: Maximum number of seconds to wait before the status + to be reached. Defaults to 300. + :returns: The load balancer is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to the desired status failed to occur within the specified wait + time. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + has transited to one of the failure statuses. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute. + """ + lb = self._find(_lb.LoadBalancer, name_or_id, ignore_missing=False) + + return resource.wait_for_status( + self, + lb, + status, + failures, + interval, + wait, + attribute='provisioning_status', + ) + + def failover_load_balancer(self, load_balancer): + """Failover a load balancer + + :param load_balancer: The value can be the ID of a load balancer + or :class:`~openstack.load_balancer.v2.load_balancer.LoadBalancer` + instance. + + :returns: ``None`` + """ + lb = self._get_resource(_lb.LoadBalancer, load_balancer) + lb.failover(self) + + def create_listener(self, **attrs): + """Create a new listener from attributes + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.load_balancer.v2.listener.Listener`, + comprised of the properties on the Listener class. + + :returns: The results of listener creation + :rtype: :class:`~openstack.load_balancer.v2.listener.Listener` + """ + return self._create(_listener.Listener, **attrs) + + def delete_listener(self, listener, ignore_missing=True): + """Delete a listener + + :param listener: The value can be either the ID of a listener or a + :class:`~openstack.load_balancer.v2.listener.Listener` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the listner does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent listener. + + :returns: ``None`` + """ + self._delete( + _listener.Listener, listener, ignore_missing=ignore_missing + ) + + def find_listener(self, name_or_id, ignore_missing=True): + """Find a single listener + + :param name_or_id: The name or ID of a listener. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + + :returns: One :class:`~openstack.load_balancer.v2.listener.Listener` + or None + """ + return self._find( + _listener.Listener, name_or_id, ignore_missing=ignore_missing + ) + + def get_listener(self, listener): + """Get a single listener + + :param listener: The value can be the ID of a listener or a + :class:`~openstack.load_balancer.v2.listener.Listener` + instance. + + :returns: One :class:`~openstack.load_balancer.v2.listener.Listener` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_listener.Listener, listener) + + def get_listener_statistics(self, listener): + """Get the listener statistics + + :param listener: The value can be the ID of a listener or a + :class:`~openstack.load_balancer.v2.listener.Listener` + instance. + + :returns: One + :class:`~openstack.load_balancer.v2.listener.ListenerStats` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + return self._get( + _listener.ListenerStats, listener_id=listener, requires_id=False + ) + + def listeners(self, **query): + """Return a generator of listeners + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Valid parameters are: + :returns: A generator of listener objects + :rtype: :class:`~openstack.load_balancer.v2.listener.Listener` + """ + return self._list(_listener.Listener, **query) + + def update_listener(self, listener, **attrs): + """Update a listener + + :param listener: Either the id of a listener or a + :class:`~openstack.load_balancer.v2.listener.Listener` + instance. + :param dict attrs: The attributes to update on the listener + represented by ``listener``. + + :returns: The updated listener + :rtype: :class:`~openstack.load_balancer.v2.listener.Listener` + """ + return self._update(_listener.Listener, listener, **attrs) + + def create_pool(self, **attrs): + """Create a new pool from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.load_balancer.v2.pool.Pool`, comprised of the + properties on the Pool class. + + :returns: The results of Pool creation + :rtype: :class:`~openstack.load_balancer.v2.pool.Pool` + """ + return self._create(_pool.Pool, **attrs) + + def get_pool(self, *attrs): + """Get a pool + + :param pool: Value is either a pool ID or a + :class:`~openstack.load_balancer.v2.pool.Pool` + instance. + + :returns: One + :class:`~openstack.load_balancer.v2.pool.Pool` + """ + return self._get(_pool.Pool, *attrs) + + def pools(self, **query): + """Retrieve a generator of pools + + :returns: A generator of Pool instances + """ + return self._list(_pool.Pool, **query) + + def delete_pool(self, pool, ignore_missing=True): + """Delete a pool + + :param pool: The pool is either a pool ID or a + :class:`~openstack.load_balancer.v2.pool.Pool` + instance + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the pool does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent pool. + + :returns: ``None`` + """ + return self._delete(_pool.Pool, pool, ignore_missing=ignore_missing) + + def find_pool(self, name_or_id, ignore_missing=True): + """Find a single pool + + :param name_or_id: The name or ID of a pool + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the pool does not exist. + When set to ``True``, no exception will be set when attempting + to find a nonexistent pool. + + :returns: ``None`` + """ + return self._find( + _pool.Pool, name_or_id, ignore_missing=ignore_missing + ) + + def update_pool(self, pool, **attrs): + """Update a pool + + :param pool: Either the id of a pool or a + :class:`~openstack.load_balancer.v2.pool.Pool` + instance. + :param dict attrs: The attributes to update on the pool + represented by ``pool``. + + :returns: The updated pool + :rtype: :class:`~openstack.load_balancer.v2.pool.Pool` + """ + return self._update(_pool.Pool, pool, **attrs) + + def create_member(self, pool, **attrs): + """Create a new member from attributes + + :param pool: The pool can be either the ID of a pool or a + :class:`~openstack.load_balancer.v2.pool.Pool` instance + that the member will be created in. + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.load_balancer.v2.member.Member`, + comprised of the properties on the Member class. + + :returns: The results of member creation + :rtype: :class:`~openstack.load_balancer.v2.member.Member` + """ + poolobj = self._get_resource(_pool.Pool, pool) + return self._create(_member.Member, pool_id=poolobj.id, **attrs) + + def delete_member(self, member, pool, ignore_missing=True): + """Delete a member + + :param member: + The member can be either the ID of a member or a + :class:`~openstack.load_balancer.v2.member.Member` instance. + :param pool: The pool can be either the ID of a pool or a + :class:`~openstack.load_balancer.v2.pool.Pool` instance + that the member belongs to. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the member does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent member. + + :returns: ``None`` + """ + poolobj = self._get_resource(_pool.Pool, pool) + self._delete( + _member.Member, + member, + ignore_missing=ignore_missing, + pool_id=poolobj.id, + ) + + def find_member(self, name_or_id, pool, ignore_missing=True): + """Find a single member + + :param str name_or_id: The name or ID of a member. + :param pool: The pool can be either the ID of a pool or a + :class:`~openstack.load_balancer.v2.pool.Pool` instance + that the member belongs to. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + + :returns: One :class:`~openstack.load_balancer.v2.member.Member` + or None + """ + poolobj = self._get_resource(_pool.Pool, pool) + return self._find( + _member.Member, + name_or_id, + ignore_missing=ignore_missing, + pool_id=poolobj.id, + ) + + def get_member(self, member, pool): + """Get a single member + + :param member: The member can be the ID of a member or a + :class:`~openstack.load_balancer.v2.member.Member` + instance. + :param pool: The pool can be either the ID of a pool or a + :class:`~openstack.load_balancer.v2.pool.Pool` instance + that the member belongs to. + + :returns: One :class:`~openstack.load_balancer.v2.member.Member` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + poolobj = self._get_resource(_pool.Pool, pool) + return self._get(_member.Member, member, pool_id=poolobj.id) + + def members(self, pool, **query): + """Return a generator of members + + :param pool: The pool can be either the ID of a pool or a + :class:`~openstack.load_balancer.v2.pool.Pool` instance + that the member belongs to. + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Valid parameters are: + + :returns: A generator of member objects + :rtype: :class:`~openstack.load_balancer.v2.member.Member` + """ + poolobj = self._get_resource(_pool.Pool, pool) + return self._list(_member.Member, pool_id=poolobj.id, **query) + + def update_member(self, member, pool, **attrs): + """Update a member + + :param member: Either the ID of a member or a + :class:`~openstack.load_balancer.v2.member.Member` + instance. + :param pool: The pool can be either the ID of a pool or a + :class:`~openstack.load_balancer.v2.pool.Pool` instance + that the member belongs to. + :param dict attrs: The attributes to update on the member + represented by ``member``. + + :returns: The updated member + :rtype: :class:`~openstack.load_balancer.v2.member.Member` + """ + poolobj = self._get_resource(_pool.Pool, pool) + return self._update( + _member.Member, member, pool_id=poolobj.id, **attrs + ) + + def find_health_monitor(self, name_or_id, ignore_missing=True): + """Find a single health monitor + + :param name_or_id: The name or ID of a health monitor + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the health monitor does not exist. + When set to ``True``, no exception will be set when attempting + to find a nonexistent health monitor. + + :returns: The + :class:`openstack.load_balancer.v2.healthmonitor.HealthMonitor` + object matching the given name or id or None if nothing matches. + + :raises: :class:`openstack.exceptions.DuplicateResource` if more + than one resource is found for this request. + :raises: :class:`openstack.exceptions.NotFoundException` if nothing + is found and ignore_missing is ``False``. + """ + return self._find( + _hm.HealthMonitor, name_or_id, ignore_missing=ignore_missing + ) + + def create_health_monitor(self, **attrs): + """Create a new health monitor from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor`, + comprised of the properties on the HealthMonitor class. + + :returns: The results of HealthMonitor creation + :rtype: + :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor` + """ + + return self._create(_hm.HealthMonitor, **attrs) + + def get_health_monitor(self, healthmonitor): + """Get a health monitor + + :param healthmonitor: The value can be the ID of a health monitor or + :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor` + instance. + + :returns: One health monitor + :rtype: + :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor` + """ + return self._get(_hm.HealthMonitor, healthmonitor) + + def health_monitors(self, **query): + """Retrieve a generator of health monitors + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Valid parameters are: + 'name', 'created_at', 'updated_at', 'delay', + 'expected_codes', 'http_method', 'max_retries', + 'max_retries_down', 'pool_id', + 'provisioning_status', 'operating_status', + 'timeout', 'project_id', 'type', 'url_path', + 'is_admin_state_up'. + + :returns: A generator of health monitor instances + """ + return self._list(_hm.HealthMonitor, **query) + + def delete_health_monitor(self, healthmonitor, ignore_missing=True): + """Delete a health monitor + + :param healthmonitor: The healthmonitor can be either the ID of the + health monitor or a + :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor` + instance + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the healthmonitor does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + healthmonitor. + + :returns: ``None`` + """ + return self._delete( + _hm.HealthMonitor, healthmonitor, ignore_missing=ignore_missing + ) + + def update_health_monitor(self, healthmonitor, **attrs): + """Update a health monitor + + :param healthmonitor: The healthmonitor can be either the ID of the + health monitor or a + :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor` + instance + :param dict attrs: The attributes to update on the health monitor + represented by ``healthmonitor``. + + :returns: The updated health monitor + :rtype: + :class:`~openstack.load_balancer.v2.healthmonitor.HealthMonitor` + """ + return self._update(_hm.HealthMonitor, healthmonitor, **attrs) + + def create_l7_policy(self, **attrs): + """Create a new l7policy from attributes + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.load_balancer.v2.l7_policy.L7Policy`, + comprised of the properties on the L7Policy class. + + :returns: The results of l7policy creation + :rtype: :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + """ + return self._create(_l7policy.L7Policy, **attrs) + + def delete_l7_policy(self, l7_policy, ignore_missing=True): + """Delete a l7policy + + :param l7_policy: The value can be either the ID of a l7policy or a + :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the l7policy does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent l7policy. + + :returns: ``None`` + """ + self._delete( + _l7policy.L7Policy, l7_policy, ignore_missing=ignore_missing + ) + + def find_l7_policy(self, name_or_id, ignore_missing=True): + """Find a single l7policy + + :param name_or_id: The name or ID of a l7policy. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + + :returns: One :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + or None + """ + return self._find( + _l7policy.L7Policy, name_or_id, ignore_missing=ignore_missing + ) + + def get_l7_policy(self, l7_policy): + """Get a single l7policy + + :param l7_policy: The value can be the ID of a l7policy or a + :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + instance. + + :returns: One :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_l7policy.L7Policy, l7_policy) + + def l7_policies(self, **query): + """Return a generator of l7policies + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Valid parameters are: + + :returns: A generator of l7policy objects + :rtype: :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + """ + return self._list(_l7policy.L7Policy, **query) + + def update_l7_policy(self, l7_policy, **attrs): + """Update a l7policy + + :param l7_policy: Either the id of a l7policy or a + :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + instance. + :param dict attrs: The attributes to update on the l7policy + represented by ``l7policy``. + + :returns: The updated l7policy + :rtype: :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + """ + return self._update(_l7policy.L7Policy, l7_policy, **attrs) + + def create_l7_rule(self, l7_policy, **attrs): + """Create a new l7rule from attributes + + :param l7_policy: The l7_policy can be either the ID of a l7policy or + :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + instance that the l7rule will be created in. + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.load_balancer.v2.l7_rule.L7Rule`, + comprised of the properties on the L7Rule class. + + :returns: The results of l7rule creation + :rtype: :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` + """ + l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy) + return self._create( + _l7rule.L7Rule, l7policy_id=l7policyobj.id, **attrs + ) + + def delete_l7_rule(self, l7rule, l7_policy, ignore_missing=True): + """Delete a l7rule + + :param l7rule: The l7rule can be either the ID of a l7rule or a + :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` instance. + :param l7_policy: The l7_policy can be either the ID of a l7policy or + :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + instance that the l7rule belongs to. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the l7rule does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent l7rule. + + :returns: ``None`` + """ + l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy) + self._delete( + _l7rule.L7Rule, + l7rule, + ignore_missing=ignore_missing, + l7policy_id=l7policyobj.id, + ) + + def find_l7_rule(self, name_or_id, l7_policy, ignore_missing=True): + """Find a single l7rule + + :param str name_or_id: The name or ID of a l7rule. + :param l7_policy: The l7_policy can be either the ID of a l7policy or + :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + instance that the l7rule belongs to. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + + :returns: One :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` + or None + """ + l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy) + return self._find( + _l7rule.L7Rule, + name_or_id, + ignore_missing=ignore_missing, + l7policy_id=l7policyobj.id, + ) + + def get_l7_rule(self, l7rule, l7_policy): + """Get a single l7rule + + :param l7rule: The l7rule can be the ID of a l7rule or a + :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` + instance. + :param l7_policy: The l7_policy can be either the ID of a l7policy or + :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + instance that the l7rule belongs to. + + :returns: One :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy) + return self._get(_l7rule.L7Rule, l7rule, l7policy_id=l7policyobj.id) + + def l7_rules(self, l7_policy, **query): + """Return a generator of l7rules + + :param l7_policy: The l7_policy can be either the ID of a l7_policy or + :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + instance that the l7rule belongs to. + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Valid parameters are: + + :returns: A generator of l7rule objects + :rtype: :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` + """ + l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy) + return self._list(_l7rule.L7Rule, l7policy_id=l7policyobj.id, **query) + + def update_l7_rule(self, l7rule, l7_policy, **attrs): + """Update a l7rule + + :param l7rule: Either the ID of a l7rule or a + :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` + instance. + :param l7_policy: The l7_policy can be either the ID of a l7policy or + :class:`~openstack.load_balancer.v2.l7_policy.L7Policy` + instance that the l7rule belongs to. + :param dict attrs: The attributes to update on the l7rule + represented by ``l7rule``. + + :returns: The updated l7rule + :rtype: :class:`~openstack.load_balancer.v2.l7_rule.L7Rule` + """ + l7policyobj = self._get_resource(_l7policy.L7Policy, l7_policy) + return self._update( + _l7rule.L7Rule, l7rule, l7policy_id=l7policyobj.id, **attrs + ) + + def quotas(self, **query): + """Return a generator of quotas + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Currently no query + parameter is supported. + + :returns: A generator of quota objects + :rtype: :class:`~openstack.load_balancer.v2.quota.Quota` + """ + return self._list(_quota.Quota, **query) + + def get_quota(self, quota): + """Get a quota + + :param quota: The value can be the ID of a quota or a + :class:`~openstack.load_balancer.v2.quota.Quota` + instance. The ID of a quota is the same as the project + ID for the quota. + + :returns: One :class:`~openstack.load_balancer.v2.quota.Quota` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_quota.Quota, quota) + + def update_quota(self, quota, **attrs): + """Update a quota + + :param quota: Either the ID of a quota or a + :class:`~openstack.load_balancer.v2.quota.Quota` + instance. The ID of a quota is the same as the + project ID for the quota. + :param dict attrs: The attributes to update on the quota represented + by ``quota``. + + :returns: The updated quota + :rtype: :class:`~openstack.load_balancer.v2.quota.Quota` + """ + return self._update(_quota.Quota, quota, **attrs) + + def get_quota_default(self): + """Get a default quota + + :returns: One :class:`~openstack.load_balancer.v2.quota.QuotaDefault` + """ + return self._get(_quota.QuotaDefault, requires_id=False) + + def delete_quota(self, quota, ignore_missing=True): + """Delete a quota (i.e. reset to the default quota) + + :param quota: The value can be either the ID of a quota or a + :class:`~openstack.load_balancer.v2.quota.Quota` + instance. The ID of a quota is the same as the + project ID for the quota. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when quota does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent quota. + + :returns: ``None`` + """ + self._delete(_quota.Quota, quota, ignore_missing=ignore_missing) + + def providers(self, **query): + """Retrieve a generator of providers + + :returns: A generator of providers instances + """ + return self._list(_provider.Provider, **query) + + def provider_flavor_capabilities(self, provider, **query): + """Retrieve a generator of provider flavor capabilities + + :returns: A generator of provider flavor capabilities instances + """ + return self._list( + _provider.ProviderFlavorCapabilities, provider=provider, **query + ) + + def create_flavor_profile(self, **attrs): + """Create a new flavor profile from attributes + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile`, + comprised of the properties on the FlavorProfile class. + + :returns: The results of profile creation creation + :rtype: + :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile` + """ + return self._create(_flavor_profile.FlavorProfile, **attrs) + + def get_flavor_profile(self, *attrs): + """Get a flavor profile + + :param flavor_profile: The value can be the name of a flavor profile or + :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile` + instance. + + :returns: One + :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile` + """ + return self._get(_flavor_profile.FlavorProfile, *attrs) + + def flavor_profiles(self, **query): + """Retrieve a generator of flavor profiles + + :returns: A generator of flavor profiles instances + """ + return self._list(_flavor_profile.FlavorProfile, **query) + + def delete_flavor_profile(self, flavor_profile, ignore_missing=True): + """Delete a flavor profile + + :param flavor_profile: The flavor_profile can be either the ID or a + :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile` + instance + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the flavor profile does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + flavor profile. + + :returns: ``None`` + """ + self._delete( + _flavor_profile.FlavorProfile, + flavor_profile, + ignore_missing=ignore_missing, + ) + + def find_flavor_profile(self, name_or_id, ignore_missing=True): + """Find a single flavor profile + + :param name_or_id: The name or ID of a flavor profile + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the flavor profile does not exist. + When set to ``True``, no exception will be set when attempting + to find a nonexistent flavor profile. + + :returns: ``None`` + """ + return self._find( + _flavor_profile.FlavorProfile, + name_or_id, + ignore_missing=ignore_missing, + ) + + def update_flavor_profile(self, flavor_profile, **attrs): + """Update a flavor profile + + :param flavor_profile: The flavor_profile can be either the ID or a + :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile` + instance + :param dict attrs: The attributes to update on the flavor profile + represented by ``flavor_profile``. + + :returns: The updated flavor profile + :rtype: + :class:`~openstack.load_balancer.v2.flavor_profile.FlavorProfile` + """ + return self._update( + _flavor_profile.FlavorProfile, flavor_profile, **attrs + ) + + def create_flavor(self, **attrs): + """Create a new flavor from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.load_balancer.v2.flavor.Flavor`, + comprised of the properties on the Flavorclass. + + :returns: The results of flavor creation creation + :rtype: :class:`~openstack.load_balancer.v2.flavor.Flavor` + """ + return self._create(_flavor.Flavor, **attrs) + + def get_flavor(self, *attrs): + """Get a flavor + + :param flavor: The value can be the ID of a flavor + or :class:`~openstack.load_balancer.v2.flavor.Flavor` instance. + + :returns: One + :class:`~openstack.load_balancer.v2.flavor.Flavor` + """ + return self._get(_flavor.Flavor, *attrs) + + def flavors(self, **query): + """Retrieve a generator of flavors + + :returns: A generator of flavor instances + """ + return self._list(_flavor.Flavor, **query) + + def delete_flavor(self, flavor, ignore_missing=True): + """Delete a flavor + + :param flavor: The flavorcan be either the ID or a + :class:`~openstack.load_balancer.v2.flavor.Flavor` instance + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the flavor does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent flavor. + + :returns: ``None`` + """ + self._delete(_flavor.Flavor, flavor, ignore_missing=ignore_missing) + + def find_flavor(self, name_or_id, ignore_missing=True): + """Find a single flavor + + :param name_or_id: The name or ID of a flavor + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the flavor does not exist. + When set to ``True``, no exception will be set when attempting + to find a nonexistent flavor. + + :returns: ``None`` + """ + return self._find( + _flavor.Flavor, name_or_id, ignore_missing=ignore_missing + ) + + def update_flavor(self, flavor, **attrs): + """Update a flavor + + :param flavor: The flavor can be either the ID or a + :class:`~openstack.load_balancer.v2.flavor.Flavor` instance + :param dict attrs: The attributes to update on the flavor + represented by ``flavor``. + + :returns: The updated flavor + :rtype: :class:`~openstack.load_balancer.v2.flavor.Flavor` + """ + return self._update(_flavor.Flavor, flavor, **attrs) + + def amphorae(self, **query): + """Retrieve a generator of amphorae + + :returns: A generator of amphora instances + """ + return self._list(_amphora.Amphora, **query) + + def get_amphora(self, *attrs): + """Get a amphora + + :param amphora: The value can be the ID of an amphora + or :class:`~openstack.load_balancer.v2.amphora.Amphora` instance. + + :returns: One + :class:`~openstack.load_balancer.v2.amphora.Amphora` + """ + return self._get(_amphora.Amphora, *attrs) + + def find_amphora(self, amphora_id, ignore_missing=True): + """Find a single amphora + + :param amphora_id: The ID of a amphora + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the amphora does not exist. + When set to ``True``, no exception will be set when attempting + to find a nonexistent amphora. + + :returns: ``None`` + """ + return self._find( + _amphora.Amphora, amphora_id, ignore_missing=ignore_missing + ) + + def configure_amphora(self, amphora_id): + """Update the configuration of an amphora agent + + :param amphora_id: The ID of an amphora + + :returns: ``None`` + """ + lb = self._get_resource(_amphora.Amphora, amphora_id) + lb.configure(self) + + def failover_amphora(self, amphora_id): + """Failover an amphora + + :param amphora_id: The ID of an amphora + + :returns: ``None`` + """ + lb = self._get_resource(_amphora.Amphora, amphora_id) + lb.failover(self) + + def create_availability_zone_profile(self, **attrs): + """Create a new availability zone profile from attributes + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` + comprised of the properties on the AvailabilityZoneProfile + class. + + :returns: The results of profile creation + :rtype: + :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` + """ + return self._create( + _availability_zone_profile.AvailabilityZoneProfile, **attrs + ) + + def get_availability_zone_profile(self, *attrs): + """Get an availability zone profile + + :param availability_zone_profile: The value can be the ID of an + availability_zone profile or + :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` + instance. + + :returns: One + :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` + """ + return self._get( + _availability_zone_profile.AvailabilityZoneProfile, *attrs + ) + + def availability_zone_profiles(self, **query): + """Retrieve a generator of availability zone profiles + + :returns: A generator of availability zone profiles instances + """ + return self._list( + _availability_zone_profile.AvailabilityZoneProfile, **query + ) + + def delete_availability_zone_profile( + self, availability_zone_profile, ignore_missing=True + ): + """Delete an availability zone profile + + :param availability_zone_profile: The availability_zone_profile can be + either the ID or a + :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` + instance + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the availability zone profile does not exist. When set to + ``True``, no exception will be set when attempting to delete a + nonexistent availability zone profile. + + :returns: ``None`` + """ + self._delete( + _availability_zone_profile.AvailabilityZoneProfile, + availability_zone_profile, + ignore_missing=ignore_missing, + ) + + def find_availability_zone_profile(self, name_or_id, ignore_missing=True): + """Find a single availability zone profile + + :param name_or_id: The name or ID of a availability zone profile + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the availability zone profile does not exist. + When set to ``True``, no exception will be set when attempting + to find a nonexistent availability zone profile. + + :returns: ``None`` + """ + return self._find( + _availability_zone_profile.AvailabilityZoneProfile, + name_or_id, + ignore_missing=ignore_missing, + ) + + def update_availability_zone_profile( + self, availability_zone_profile, **attrs + ): + """Update an availability zone profile + + :param availability_zone_profile: The availability_zone_profile can be + either the ID or a + :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` + instance + :param dict attrs: The attributes to update on the availability_zone + profile represented by ``availability_zone_profile``. + + :returns: The updated availability zone profile + :rtype: + :class:`~openstack.load_balancer.v2.availability_zone_profile.AvailabilityZoneProfile` + """ + return self._update( + _availability_zone_profile.AvailabilityZoneProfile, + availability_zone_profile, + **attrs, + ) + + def create_availability_zone(self, **attrs): + """Create a new availability zone from attributes + + :param dict attrs: Keyword arguments which will be used to create a + :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` + comprised of the properties on the AvailabilityZoneclass. + + :returns: The results of availability_zone creation creation + :rtype: + :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` + """ + return self._create(_availability_zone.AvailabilityZone, **attrs) + + def get_availability_zone(self, *attrs): + """Get an availability zone + + :param availability_zone: The value can be the ID of a + availability_zone or + :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` + instance. + + :returns: One + :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` + """ + return self._get(_availability_zone.AvailabilityZone, *attrs) + + def availability_zones(self, **query): + """Retrieve a generator of availability zones + + :returns: A generator of availability zone instances + """ + return self._list(_availability_zone.AvailabilityZone, **query) + + def delete_availability_zone(self, availability_zone, ignore_missing=True): + """Delete an availability_zone + + :param availability_zone: The availability_zone can be either the ID + or a + :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` + instance + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the availability zone does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + availability zone. + + :returns: ``None`` + """ + self._delete( + _availability_zone.AvailabilityZone, + availability_zone, + ignore_missing=ignore_missing, + ) + + def find_availability_zone(self, name_or_id, ignore_missing=True): + """Find a single availability zone + + :param name_or_id: The name or ID of a availability zone + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the availability zone does not exist. + When set to ``True``, no exception will be set when attempting + to find a nonexistent availability zone. + + :returns: ``None`` + """ + return self._find( + _availability_zone.AvailabilityZone, + name_or_id, + ignore_missing=ignore_missing, + ) + + def update_availability_zone(self, availability_zone, **attrs): + """Update an availability zone + + :param availability_zone: The availability_zone can be either the ID + or a + :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` + instance + :param dict attrs: The attributes to update on the availability_zone + represented by ``availability_zone``. + + :returns: The updated availability_zone + :rtype: + :class:`~openstack.load_balancer.v2.availability_zone.AvailabilityZone` + """ + return self._update( + _availability_zone.AvailabilityZone, availability_zone, **attrs + ) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/load_balancer/v2/amphora.py b/openstack/load_balancer/v2/amphora.py new file mode 100644 index 0000000000..b84156ef85 --- /dev/null +++ b/openstack/load_balancer/v2/amphora.py @@ -0,0 +1,192 @@ +# Copyright 2019 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Amphora(resource.Resource): + resource_key = 'amphora' + resources_key = 'amphorae' + base_path = '/octavia/amphorae' + + # capabilities + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = True + + _query_mapping = resource.QueryParameters( + 'id', + 'loadbalancer_id', + 'compute_id', + 'lb_network_ip', + 'vrrp_ip', + 'ha_ip', + 'vrrp_port_id', + 'ha_port_id', + 'cert_expiration', + 'cert_busy', + 'role', + 'status', + 'vrrp_interface', + 'vrrp_id', + 'vrrp_priority', + 'cached_zone', + 'created_at', + 'updated_at', + 'image_id', + 'compute_flavor', + ) + + # Properties + #: The ID of the amphora. + id = resource.Body('id') + #: The ID of the load balancer. + loadbalancer_id = resource.Body('loadbalancer_id') + #: The ID of the amphora resource in the compute system. + compute_id = resource.Body('compute_id') + #: The management IP of the amphora. + lb_network_ip = resource.Body('lb_network_ip') + #: The address of the vrrp port on the amphora. + vrrp_ip = resource.Body('vrrp_ip') + #: The IP address of the Virtual IP (VIP). + ha_ip = resource.Body('ha_ip') + #: The vrrp port's ID in the networking system. + vrrp_port_id = resource.Body('vrrp_port_id') + #: The ID of the Virtual IP (VIP) port. + ha_port_id = resource.Body('ha_port_id') + #: The date the certificate for the amphora expires. + cert_expiration = resource.Body('cert_expiration') + #: Whether the certificate is in the process of being replaced. + cert_busy = resource.Body('cert_busy') + #: The role configured for the amphora. One of STANDALONE, MASTER, BACKUP. + role = resource.Body('role') + #: The status of the amphora. One of: BOOTING, ALLOCATED, READY, + #: PENDING_CREATE, PENDING_DELETE, DELETED, ERROR. + status = resource.Body('status') + #: The bound interface name of the vrrp port on the amphora. + vrrp_interface = resource.Body('vrrp_interface') + #: The vrrp group's ID for the amphora. + vrrp_id = resource.Body('vrrp_id') + #: The priority of the amphora in the vrrp group. + vrrp_priority = resource.Body('vrrp_priority') + #: The availability zone of a compute instance, cached at create time. + cached_zone = resource.Body('cached_zone') + #: The UTC date and timestamp when the resource was created. + created_at = resource.Body('created_at') + #: The UTC date and timestamp when the resource was last updated. + updated_at = resource.Body('updated_at') + #: The ID of the glance image used for the amphora. + image_id = resource.Body('image_id') + #: The ID of the compute flavor used for the amphora. + compute_flavor = resource.Body('compute_flavor') + + def configure(self, session): + """Configure load balancer. + + Update the amphora agent configuration. This will push the new + configuration to the amphora agent and will update the configuration + options that are mutatable. + + :param session: The session to use for making this request. + :returns: None + """ + session = self._get_session(session) + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'config') + + response = session.put( + request.url, + headers=request.headers, + microversion=version, + ) + + msg = f"Failed to configure load balancer {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + def failover(self, session): + """Failover load balancer. + + :param session: The session to use for making this request. + :returns: None + """ + session = self._get_session(session) + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'failover') + + response = session.put( + request.url, + headers=request.headers, + microversion=version, + ) + + msg = f"Failed to failover load balancer {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + +# TODO(stephenfin): Delete this: it's useless +class AmphoraConfig(resource.Resource): + base_path = '/octavia/amphorae/%(amphora_id)s/config' + + # capabilities + allow_create = False + allow_fetch = False + allow_commit = True + allow_delete = False + allow_list = False + allow_empty_commit = True + + requires_id = False + + # Properties + #: The ID of the amphora. + amphora_id = resource.URI('amphora_id') + + # The default _update code path also has no way to pass has_body into this + # function, so overriding the method here. + def commit( + self, session, prepend_key=True, has_body=False, *args, **kwargs + ): + return super().commit(session, prepend_key, has_body, *args, *kwargs) + + +# TODO(stephenfin): Delete this: it's useless +class AmphoraFailover(resource.Resource): + base_path = '/octavia/amphorae/%(amphora_id)s/failover' + + # capabilities + allow_create = False + allow_fetch = False + allow_commit = True + allow_delete = False + allow_list = False + allow_empty_commit = True + + requires_id = False + + # Properties + #: The ID of the amphora. + amphora_id = resource.URI('amphora_id') + + # The default _update code path also has no way to pass has_body into this + # function, so overriding the method here. + def commit( + self, session, prepend_key=True, has_body=False, *args, **kwargs + ): + return super().commit(session, prepend_key, has_body, *args, *kwargs) diff --git a/openstack/load_balancer/v2/availability_zone.py b/openstack/load_balancer/v2/availability_zone.py new file mode 100644 index 0000000000..4037fbf520 --- /dev/null +++ b/openstack/load_balancer/v2/availability_zone.py @@ -0,0 +1,46 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class AvailabilityZone(resource.Resource): + resource_key = 'availability_zone' + resources_key = 'availability_zones' + base_path = '/lbaas/availabilityzones' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'name', + 'description', + 'availability_zone_profile_id', + is_enabled='enabled', + ) + + # Properties + #: The name of the availability zone. + name = resource.Body('name') + #: The availability zone description. + description = resource.Body('description') + #: The associated availability zone profile ID + availability_zone_profile_id = resource.Body( + 'availability_zone_profile_id' + ) + #: Whether the availability zone is enabled for use or not. + is_enabled = resource.Body('enabled') diff --git a/openstack/load_balancer/v2/availability_zone_profile.py b/openstack/load_balancer/v2/availability_zone_profile.py new file mode 100644 index 0000000000..dbe72e6161 --- /dev/null +++ b/openstack/load_balancer/v2/availability_zone_profile.py @@ -0,0 +1,41 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class AvailabilityZoneProfile(resource.Resource): + resource_key = 'availability_zone_profile' + resources_key = 'availability_zone_profiles' + base_path = '/lbaas/availabilityzoneprofiles' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'id', 'name', 'provider_name', 'availability_zone_data' + ) + + # Properties + #: The ID of the availability zone profile. + id = resource.Body('id') + #: The name of the availability zone profile. + name = resource.Body('name') + #: The provider this availability zone profile is for. + provider_name = resource.Body('provider_name') + #: The JSON string containing the availability zone metadata. + availability_zone_data = resource.Body('availability_zone_data') diff --git a/openstack/load_balancer/v2/flavor.py b/openstack/load_balancer/v2/flavor.py new file mode 100644 index 0000000000..799d975784 --- /dev/null +++ b/openstack/load_balancer/v2/flavor.py @@ -0,0 +1,44 @@ +# Copyright 2019 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Flavor(resource.Resource): + resource_key = 'flavor' + resources_key = 'flavors' + base_path = '/lbaas/flavors' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'id', 'name', 'description', 'flavor_profile_id', is_enabled='enabled' + ) + + # Properties + #: The ID of the flavor. + id = resource.Body('id') + #: The name of the flavor. + name = resource.Body('name') + #: The flavor description. + description = resource.Body('description') + #: The associated flavor profile ID + flavor_profile_id = resource.Body('flavor_profile_id') + #: Whether the flavor is enabled for use or not. + is_enabled = resource.Body('enabled') diff --git a/openstack/load_balancer/v2/flavor_profile.py b/openstack/load_balancer/v2/flavor_profile.py new file mode 100644 index 0000000000..bdb5031701 --- /dev/null +++ b/openstack/load_balancer/v2/flavor_profile.py @@ -0,0 +1,42 @@ +# Copyright 2019 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class FlavorProfile(resource.Resource): + resource_key = 'flavorprofile' + resources_key = 'flavorprofiles' + base_path = '/lbaas/flavorprofiles' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'id', 'name', 'provider_name', 'flavor_data' + ) + + # Properties + #: The ID of the flavor profile. + id = resource.Body('id') + #: The name of the flavor profile. + name = resource.Body('name') + #: The provider this flavor profile is for. + provider_name = resource.Body('provider_name') + #: The JSON string containing the flavor metadata. + flavor_data = resource.Body('flavor_data') diff --git a/openstack/load_balancer/v2/health_monitor.py b/openstack/load_balancer/v2/health_monitor.py new file mode 100644 index 0000000000..9741b285b6 --- /dev/null +++ b/openstack/load_balancer/v2/health_monitor.py @@ -0,0 +1,85 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.common import tag +from openstack import resource + + +class HealthMonitor(resource.Resource, tag.TagMixin): + resource_key = 'healthmonitor' + resources_key = 'healthmonitors' + base_path = '/lbaas/healthmonitors' + + # capabilities + allow_create = True + allow_list = True + allow_fetch = True + allow_delete = True + allow_commit = True + + _query_mapping = resource.QueryParameters( + 'name', + 'created_at', + 'updated_at', + 'delay', + 'expected_codes', + 'http_method', + 'max_retries', + 'max_retries_down', + 'pool_id', + 'provisioning_status', + 'operating_status', + 'timeout', + 'project_id', + 'type', + 'url_path', + is_admin_state_up='admin_state_up', + **tag.TagMixin._tag_query_parameters, + ) + + #: Properties + #: Timestamp when the health monitor was created. + created_at = resource.Body('created_at') + #: The time, in seconds, between sending probes to members. + delay = resource.Body('delay', type=int) + #: The expected http status codes to get from a successful health check + expected_codes = resource.Body('expected_codes') + #: The HTTP method that the monitor uses for requests + http_method = resource.Body('http_method') + #: The administrative state of the health monitor *Type: bool* + is_admin_state_up = resource.Body('admin_state_up', type=bool) + #: The number of successful checks before changing the operating status + #: of the member to ONLINE. + max_retries = resource.Body('max_retries', type=int) + #: The number of allowed check failures before changing the operating + #: status of the member to ERROR. + max_retries_down = resource.Body('max_retries_down', type=int) + #: The health monitor name + name = resource.Body('name') + #: Operating status of the member. + operating_status = resource.Body('operating_status') + #: List of associated pools. + #: *Type: list of dicts which contain the pool IDs* + pools = resource.Body('pools', type=list) + #: The ID of the associated Pool + pool_id = resource.Body('pool_id') + #: The ID of the project + project_id = resource.Body('project_id') + #: The provisioning status of this member. + provisioning_status = resource.Body('provisioning_status') + #: The time, in seconds, after which a health check times out + timeout = resource.Body('timeout', type=int) + #: The type of health monitor + type = resource.Body('type') + #: Timestamp when the member was last updated. + updated_at = resource.Body('updated_at') + #: The HTTP path of the request to test the health of a member + url_path = resource.Body('url_path') diff --git a/openstack/load_balancer/v2/l7_policy.py b/openstack/load_balancer/v2/l7_policy.py new file mode 100644 index 0000000000..130ada8394 --- /dev/null +++ b/openstack/load_balancer/v2/l7_policy.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.common import tag +from openstack import resource + + +class L7Policy(resource.Resource, tag.TagMixin): + resource_key = 'l7policy' + resources_key = 'l7policies' + base_path = '/lbaas/l7policies' + + # capabilities + allow_create = True + allow_list = True + allow_fetch = True + allow_commit = True + allow_delete = True + + _query_mapping = resource.QueryParameters( + 'action', + 'description', + 'listener_id', + 'name', + 'position', + 'redirect_pool_id', + 'redirect_url', + 'provisioning_status', + 'operating_status', + 'redirect_prefix', + 'project_id', + is_admin_state_up='admin_state_up', + **tag.TagMixin._tag_query_parameters, + ) + + #: Properties + #: The action to be taken l7policy is matched + action = resource.Body('action') + #: Timestamp when the L7 policy was created. + created_at = resource.Body('created_at') + #: The l7policy description + description = resource.Body('description') + #: The administrative state of the l7policy *Type: bool* + is_admin_state_up = resource.Body('admin_state_up', type=bool) + #: The ID of the listener associated with this l7policy + listener_id = resource.Body('listener_id') + #: The l7policy name + name = resource.Body('name') + #: Operating status of the member. + operating_status = resource.Body('operating_status') + #: Sequence number of this l7policy + position = resource.Body('position', type=int) + #: The ID of the project this l7policy is associated with. + project_id = resource.Body('project_id') + #: The provisioning status of this l7policy + provisioning_status = resource.Body('provisioning_status') + #: The ID of the pool to which the requests will be redirected + redirect_pool_id = resource.Body('redirect_pool_id') + #: The URL prefix to which the requests should be redirected + redirect_prefix = resource.Body('redirect_prefix') + #: The URL to which the requests should be redirected + redirect_url = resource.Body('redirect_url') + #: The list of L7Rules associated with the l7policy + rules = resource.Body('rules', type=list) + #: Timestamp when the member was last updated. + updated_at = resource.Body('updated_at') diff --git a/openstack/load_balancer/v2/l7_rule.py b/openstack/load_balancer/v2/l7_rule.py new file mode 100644 index 0000000000..458c8000e7 --- /dev/null +++ b/openstack/load_balancer/v2/l7_rule.py @@ -0,0 +1,69 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.common import tag +from openstack import resource + + +class L7Rule(resource.Resource, tag.TagMixin): + resource_key = 'rule' + resources_key = 'rules' + base_path = '/lbaas/l7policies/%(l7policy_id)s/rules' + + # capabilities + allow_create = True + allow_list = True + allow_fetch = True + allow_commit = True + allow_delete = True + + _query_mapping = resource.QueryParameters( + 'compare_type', + 'created_at', + 'invert', + 'key', + 'project_id', + 'provisioning_status', + 'type', + 'updated_at', + 'rule_value', + 'operating_status', + is_admin_state_up='admin_state_up', + l7_policy_id='l7policy_id', + **tag.TagMixin._tag_query_parameters, + ) + + #: Properties + #: The administrative state of the l7policy *Type: bool* + is_admin_state_up = resource.Body('admin_state_up', type=bool) + #: comparison type to be used with the value in this L7 rule. + compare_type = resource.Body('compare_type') + #: Timestamp when the L7 rule was created. + created_at = resource.Body('created_at') + #: inverts the logic of the rule if True + # (ie. perform a logical NOT on the rule) + invert = resource.Body('invert', type=bool) + #: The key to use for the comparison. + key = resource.Body('key') + #: The ID of the associated l7 policy + l7_policy_id = resource.URI('l7policy_id') + #: The operating status of this l7rule + operating_status = resource.Body('operating_status') + #: The ID of the project this l7policy is associated with. + project_id = resource.Body('project_id') + #: The provisioning status of this l7policy + provisioning_status = resource.Body('provisioning_status') + #: The type of L7 rule + type = resource.Body('type') + #: Timestamp when the L7 rule was updated. + updated_at = resource.Body('updated_at') + #: value to be compared with + rule_value = resource.Body('value') diff --git a/openstack/load_balancer/v2/listener.py b/openstack/load_balancer/v2/listener.py new file mode 100644 index 0000000000..29e50f80e0 --- /dev/null +++ b/openstack/load_balancer/v2/listener.py @@ -0,0 +1,155 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.common import tag +from openstack import resource + + +class Listener(resource.Resource, tag.TagMixin): + resource_key = 'listener' + resources_key = 'listeners' + base_path = '/lbaas/listeners' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'connection_limit', + 'default_pool_id', + 'default_tls_container_ref', + 'description', + 'name', + 'project_id', + 'protocol', + 'protocol_port', + 'created_at', + 'updated_at', + 'provisioning_status', + 'operating_status', + 'sni_container_refs', + 'insert_headers', + 'load_balancer_id', + 'timeout_client_data', + 'timeout_member_connect', + 'timeout_member_data', + 'timeout_tcp_inspect', + 'allowed_cidrs', + 'tls_ciphers', + 'tls_versions', + 'alpn_protocols', + 'hsts_max_age', + is_hsts_include_subdomains='hsts_include_subdomains', + is_hsts_preload='hsts_preload', + is_admin_state_up='admin_state_up', + **tag.TagMixin._tag_query_parameters, + ) + + # Properties + #: List of IPv4 or IPv6 CIDRs. + allowed_cidrs = resource.Body('allowed_cidrs', type=list) + #: List of ALPN protocols. + alpn_protocols = resource.Body('alpn_protocols', type=list) + #: The maximum number of connections permitted for this load balancer. + #: Default is infinite. + connection_limit = resource.Body('connection_limit') + #: Timestamp when the listener was created. + created_at = resource.Body('created_at') + #: Default pool to which the requests will be routed. + default_pool = resource.Body('default_pool') + #: ID of default pool. Must have compatible protocol with listener. + default_pool_id = resource.Body('default_pool_id') + #: A reference to a container of TLS secrets. + default_tls_container_ref = resource.Body('default_tls_container_ref') + #: Description for the listener. + description = resource.Body('description') + #: Defines whether the `include_subdomains` directive is used for HSTS or + #: not + is_hsts_include_subdomains = resource.Body( + 'hsts_include_subdomains', type=bool + ) + #: Enables HTTP Strict Transport Security (HSTS) and sets the `max_age` + #: directive to given value + hsts_max_age = resource.Body('hsts_max_age', type=int) + #: Defines whether the `hsts_preload` directive is used for HSTS or not + is_hsts_preload = resource.Body('hsts_preload', type=bool) + #: Dictionary of additional headers insertion into HTTP header. + insert_headers = resource.Body('insert_headers', type=dict) + #: The administrative state of the listener, which is up + #: ``True`` or down ``False``. *Type: bool* + is_admin_state_up = resource.Body('admin_state_up', type=bool) + #: List of l7policies associated with this listener. + l7_policies = resource.Body('l7policies', type=list) + #: The ID of the parent load balancer. + load_balancer_id = resource.Body('loadbalancer_id') + #: List of load balancers associated with this listener. + #: *Type: list of dicts which contain the load balancer IDs* + load_balancers = resource.Body('loadbalancers', type=list) + #: Name of the listener + name = resource.Body('name') + #: Operating status of the listener. + operating_status = resource.Body('operating_status') + #: The ID of the project this listener is associated with. + project_id = resource.Body('project_id') + #: The protocol of the listener, which is TCP, HTTP, HTTPS + #: or TERMINATED_HTTPS. + protocol = resource.Body('protocol') + #: Port the listener will listen to, e.g. 80. + protocol_port = resource.Body('protocol_port', type=int) + #: The provisioning status of this listener. + provisioning_status = resource.Body('provisioning_status') + #: A list of references to TLS secrets. + #: *Type: list* + sni_container_refs = resource.Body('sni_container_refs') + #: Timestamp when the listener was last updated. + updated_at = resource.Body('updated_at') + #: Frontend client inactivity timeout in milliseconds. + timeout_client_data = resource.Body('timeout_client_data', type=int) + #: Backend member connection timeout in milliseconds. + timeout_member_connect = resource.Body('timeout_member_connect', type=int) + #: Backend member inactivity timeout in milliseconds. + timeout_member_data = resource.Body('timeout_member_data', type=int) + #: Time, in milliseconds, to wait for additional TCP packets for content + #: inspection. + timeout_tcp_inspect = resource.Body('timeout_tcp_inspect', type=int) + #: Stores a cipher string in OpenSSL format. + tls_ciphers = resource.Body('tls_ciphers') + #: A lsit of TLS protocols to be used by the listener + tls_versions = resource.Body('tls_versions', type=list) + + +class ListenerStats(resource.Resource): + resource_key = 'stats' + base_path = '/lbaas/listeners/%(listener_id)s/stats' + + # capabilities + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = False + + # Properties + #: The ID of the listener. + listener_id = resource.URI('listener_id') + #: The currently active connections. + active_connections = resource.Body('active_connections', type=int) + #: The total bytes received. + bytes_in = resource.Body('bytes_in', type=int) + #: The total bytes sent. + bytes_out = resource.Body('bytes_out', type=int) + #: The total requests that were unable to be fulfilled. + request_errors = resource.Body('request_errors', type=int) + #: The total connections handled. + total_connections = resource.Body('total_connections', type=int) diff --git a/openstack/load_balancer/v2/load_balancer.py b/openstack/load_balancer/v2/load_balancer.py new file mode 100644 index 0000000000..1668e484ec --- /dev/null +++ b/openstack/load_balancer/v2/load_balancer.py @@ -0,0 +1,175 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.common import tag +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class LoadBalancer(resource.Resource, tag.TagMixin): + resource_key = 'loadbalancer' + resources_key = 'loadbalancers' + base_path = '/lbaas/loadbalancers' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'description', + 'flavor_id', + 'name', + 'project_id', + 'provider', + 'vip_address', + 'vip_network_id', + 'vip_port_id', + 'vip_subnet_id', + 'vip_qos_policy_id', + 'provisioning_status', + 'operating_status', + 'availability_zone', + is_admin_state_up='admin_state_up', + **tag.TagMixin._tag_query_parameters, + ) + + # Properties + #: The administrative state of the load balancer *Type: bool* + is_admin_state_up = resource.Body('admin_state_up', type=bool) + #: Name of the target Octavia availability zone + availability_zone = resource.Body('availability_zone') + #: Timestamp when the load balancer was created + created_at = resource.Body('created_at') + #: The load balancer description + description = resource.Body('description') + #: The load balancer flavor ID + flavor_id = resource.Body('flavor_id') + #: List of listeners associated with this load balancer + listeners = resource.Body('listeners', type=list) + #: The load balancer name + name = resource.Body('name') + #: Operating status of the load balancer + operating_status = resource.Body('operating_status') + #: List of pools associated with this load balancer + pools = resource.Body('pools', type=list) + #: The ID of the project this load balancer is associated with. + project_id = resource.Body('project_id') + #: Provider name for the load balancer. + provider = resource.Body('provider') + #: The provisioning status of this load balancer + provisioning_status = resource.Body('provisioning_status') + #: Timestamp when the load balancer was last updated + updated_at = resource.Body('updated_at') + #: VIP address of load balancer + vip_address = resource.Body('vip_address') + #: VIP netowrk ID + vip_network_id = resource.Body('vip_network_id') + #: VIP port ID + vip_port_id = resource.Body('vip_port_id') + #: VIP subnet ID + vip_subnet_id = resource.Body('vip_subnet_id') + # VIP qos policy id + vip_qos_policy_id = resource.Body('vip_qos_policy_id') + #: Additional VIPs + additional_vips = resource.Body('additional_vips', type=list) + + def delete(self, session, error_message=None, **kwargs): + request = self._prepare_request() + params = {} + if ( + hasattr(self, 'cascade') + and isinstance(self.cascade, bool) + and self.cascade + ): + params['cascade'] = True + response = session.delete(request.url, params=params) + + self._translate_response( + response, has_body=False, error_message=error_message + ) + return self + + def failover(self, session): + """Failover load balancer. + + :param session: The session to use for making this request. + :returns: None + """ + session = self._get_session(session) + version = self._get_microversion(session) + request = self._prepare_request(requires_id=True) + request.url = utils.urljoin(request.url, 'failover') + + response = session.put( + request.url, + headers=request.headers, + microversion=version, + ) + + msg = f"Failed to failover load balancer {self.id}" + exceptions.raise_from_response(response, error_message=msg) + + +class LoadBalancerStats(resource.Resource): + resource_key = 'stats' + base_path = '/lbaas/loadbalancers/%(lb_id)s/stats' + + # capabilities + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = False + + # Properties + #: The ID of the load balancer. + lb_id = resource.URI('lb_id') + #: The currently active connections. + active_connections = resource.Body('active_connections', type=int) + #: The total bytes received. + bytes_in = resource.Body('bytes_in', type=int) + #: The total bytes sent. + bytes_out = resource.Body('bytes_out', type=int) + #: The total requests that were unable to be fulfilled. + request_errors = resource.Body('request_errors', type=int) + #: The total connections handled. + total_connections = resource.Body('total_connections', type=int) + + +# TODO(stephenfin): Delete this: it's useless +class LoadBalancerFailover(resource.Resource): + base_path = '/lbaas/loadbalancers/%(lb_id)s/failover' + + # capabilities + allow_create = False + allow_fetch = False + allow_commit = True + allow_delete = False + allow_list = False + allow_empty_commit = True + + requires_id = False + + # Properties + #: The ID of the load balancer. + lb_id = resource.URI('lb_id') + + # The default _update code path also has no + # way to pass has_body into this function, so overriding the method here. + def commit( + self, session, prepend_key=True, has_body=False, *args, **kwargs + ): + return super().commit(session, prepend_key, has_body, *args, **kwargs) diff --git a/openstack/load_balancer/v2/member.py b/openstack/load_balancer/v2/member.py new file mode 100644 index 0000000000..a17c931617 --- /dev/null +++ b/openstack/load_balancer/v2/member.py @@ -0,0 +1,82 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.common import tag +from openstack import resource + + +class Member(resource.Resource, tag.TagMixin): + resource_key = 'member' + resources_key = 'members' + base_path = '/lbaas/pools/%(pool_id)s/members' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'address', + 'name', + 'protocol_port', + 'subnet_id', + 'weight', + 'created_at', + 'updated_at', + 'provisioning_status', + 'operating_status', + 'project_id', + 'monitor_address', + 'monitor_port', + 'backup', + is_admin_state_up='admin_state_up', + **tag.TagMixin._tag_query_parameters, + ) + + # Properties + #: The IP address of the member. + address = resource.Body('address') + #: Timestamp when the member was created. + created_at = resource.Body('created_at') + #: The administrative state of the member, which is up ``True`` or + #: down ``False``. *Type: bool* + is_admin_state_up = resource.Body('admin_state_up', type=bool) + #: IP address used to monitor this member + monitor_address = resource.Body('monitor_address') + #: Port used to monitor this member + monitor_port = resource.Body('monitor_port', type=int) + #: Name of the member. + name = resource.Body('name') + #: Operating status of the member. + operating_status = resource.Body('operating_status') + #: The ID of the owning pool. + pool_id = resource.URI('pool_id') + #: The provisioning status of this member. + provisioning_status = resource.Body('provisioning_status') + #: The ID of the project this member is associated with. + project_id = resource.Body('project_id') + #: The port on which the application is hosted. + protocol_port = resource.Body('protocol_port', type=int) + #: Subnet ID in which to access this member. + subnet_id = resource.Body('subnet_id') + #: Timestamp when the member was last updated. + updated_at = resource.Body('updated_at') + #: A positive integer value that indicates the relative portion of traffic + #: that this member should receive from the pool. For example, a member + #: with a weight of 10 receives five times as much traffic as a member + #: with weight of 2. + weight = resource.Body('weight', type=int) + #: A bool value that indicates whether the member is a backup or not. + #: Backup members only receive traffic when all non-backup members + #: are down. + backup = resource.Body('backup', type=bool) diff --git a/openstack/load_balancer/v2/pool.py b/openstack/load_balancer/v2/pool.py new file mode 100644 index 0000000000..09becab86c --- /dev/null +++ b/openstack/load_balancer/v2/pool.py @@ -0,0 +1,97 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.common import tag +from openstack import resource + + +class Pool(resource.Resource, tag.TagMixin): + resource_key = 'pool' + resources_key = 'pools' + base_path = '/lbaas/pools' + + # capabilities + allow_create = True + allow_list = True + allow_fetch = True + allow_delete = True + allow_commit = True + + _query_mapping = resource.QueryParameters( + 'health_monitor_id', + 'lb_algorithm', + 'listener_id', + 'loadbalancer_id', + 'description', + 'name', + 'project_id', + 'protocol', + 'created_at', + 'updated_at', + 'provisioning_status', + 'operating_status', + 'tls_enabled', + 'tls_ciphers', + 'tls_versions', + 'alpn_protocols', + 'ca_tls_container_ref', + 'crl_container_ref', + is_admin_state_up='admin_state_up', + **tag.TagMixin._tag_query_parameters, + ) + + #: Properties + #: List of ALPN protocols. + alpn_protocols = resource.Body('alpn_protocols', type=list) + #: Timestamp when the pool was created + created_at = resource.Body('created_at') + #: Description for the pool. + description = resource.Body('description') + #: Health Monitor ID + health_monitor_id = resource.Body('healthmonitor_id') + #: The administrative state of the pool *Type: bool* + is_admin_state_up = resource.Body('admin_state_up', type=bool) + #: The loadbalancing algorithm used in the pool + lb_algorithm = resource.Body('lb_algorithm') + #: ID of listener associated with this pool + listener_id = resource.Body('listener_id') + #: List of listeners associated with this pool + listeners = resource.Body('listeners', type=list) + #: ID of load balancer associated with this pool + loadbalancer_id = resource.Body('loadbalancer_id') + #: List of loadbalancers associated with this pool + loadbalancers = resource.Body('loadbalancers', type=list) + #: Members associated with this pool + members = resource.Body('members', type=list) + #: The pool name + name = resource.Body('name') + #: Operating status of the pool + operating_status = resource.Body('operating_status') + #: The ID of the project + project_id = resource.Body('project_id') + #: The protocol of the pool + protocol = resource.Body('protocol') + #: Provisioning status of the pool + provisioning_status = resource.Body('provisioning_status') + #: Stores a string of cipher strings in OpenSSL format. + tls_ciphers = resource.Body('tls_ciphers') + #: A JSON object specifying the session persistence for the pool. + session_persistence = resource.Body('session_persistence', type=dict) + #: A list of TLS protocol versions to be used in by the pool + tls_versions = resource.Body('tls_versions', type=list) + #: Timestamp when the pool was updated + updated_at = resource.Body('updated_at') + #: Use TLS for connections to backend member servers *Type: bool* + tls_enabled = resource.Body('tls_enabled', type=bool) + #: Stores the ca certificate used by backend servers + ca_tls_container_ref = resource.Body('ca_tls_container_ref') + #: Stores the revocation list file + crl_container_ref = resource.Body('crl_container_ref') diff --git a/openstack/load_balancer/v2/provider.py b/openstack/load_balancer/v2/provider.py new file mode 100644 index 0000000000..5ff6e0c6eb --- /dev/null +++ b/openstack/load_balancer/v2/provider.py @@ -0,0 +1,57 @@ +# Copyright 2019 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Provider(resource.Resource): + resources_key = 'providers' + base_path = '/lbaas/providers' + + # capabilities + allow_create = False + allow_fetch = False + allow_commit = False + allow_delete = False + allow_list = True + + _query_mapping = resource.QueryParameters('description', 'name') + + # Properties + #: The provider name. + name = resource.Body('name') + #: The provider description. + description = resource.Body('description') + + +class ProviderFlavorCapabilities(resource.Resource): + resources_key = 'flavor_capabilities' + base_path = '/lbaas/providers/%(provider)s/flavor_capabilities' + + # capabilities + allow_create = False + allow_fetch = False + allow_commit = False + allow_delete = False + allow_list = True + + _query_mapping = resource.QueryParameters('description', 'name') + + # Properties + #: The provider name to query. + provider = resource.URI('provider') + #: The provider name. + name = resource.Body('name') + #: The provider description. + description = resource.Body('description') diff --git a/openstack/load_balancer/v2/quota.py b/openstack/load_balancer/v2/quota.py new file mode 100644 index 0000000000..2151418a47 --- /dev/null +++ b/openstack/load_balancer/v2/quota.py @@ -0,0 +1,71 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Quota(resource.Resource): + resource_key = 'quota' + resources_key = 'quotas' + base_path = '/lbaas/quotas' + + # capabilities + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Properties + #: The maximum amount of load balancers you can have. *Type: int* + load_balancers = resource.Body('load_balancer', type=int) + #: The maximum amount of listeners you can create. *Type: int* + listeners = resource.Body('listener', type=int) + #: The maximum amount of pools you can create. *Type: int* + pools = resource.Body('pool', type=int) + #: The maximum amount of health monitors you can create. *Type: int* + health_monitors = resource.Body('health_monitor', type=int) + #: The maximum amount of members you can create. *Type: int* + members = resource.Body('member', type=int) + #: The ID of the project this quota is associated with. + project_id = resource.Body('project_id', alternate_id=True) + + def _prepare_request( + self, + requires_id=True, + prepend_key=False, + patch=False, + base_path=None, + *args, + **kwargs, + ): + _request = super()._prepare_request( + requires_id, prepend_key, base_path=base_path + ) + if self.resource_key in _request.body: + _body = _request.body[self.resource_key] + else: + _body = _request.body + if 'id' in _body: + del _body['id'] + return _request + + +class QuotaDefault(Quota): + base_path = '/lbaas/quotas/defaults' + + allow_retrieve = True + allow_commit = False + allow_delete = False + allow_list = False diff --git a/openstack/load_balancer/version.py b/openstack/load_balancer/version.py new file mode 100644 index 0000000000..bb0891768d --- /dev/null +++ b/openstack/load_balancer/version.py @@ -0,0 +1,26 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Version(resource.Resource): + resource_key = 'version' + resources_key = 'versions' + base_path = '/' + + # capabilities + allow_list = True + + # Properties + links = resource.Body('links') + status = resource.Body('status') diff --git a/openstack/message/message_service.py b/openstack/message/message_service.py index e74bf80bbc..0cf7d19eba 100644 --- a/openstack/message/message_service.py +++ b/openstack/message/message_service.py @@ -10,18 +10,13 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack import service_filter +from openstack.message.v2 import _proxy +from openstack import service_description -class MessageService(service_filter.ServiceFilter): +class MessageService(service_description.ServiceDescription[_proxy.Proxy]): """The message service.""" - valid_versions = [service_filter.ValidVersion('v1'), - service_filter.ValidVersion('v2')] - - def __init__(self, version=None): - """Create a message service.""" - super(MessageService, self).__init__( - service_type='messaging', - version=version - ) + supported_versions = { + '2': _proxy.Proxy, + } diff --git a/openstack/message/v1/_proxy.py b/openstack/message/v1/_proxy.py deleted file mode 100644 index ea6c205e6f..0000000000 --- a/openstack/message/v1/_proxy.py +++ /dev/null @@ -1,80 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.message.v1 import claim -from openstack.message.v1 import message -from openstack.message.v1 import queue -from openstack import proxy - - -class Proxy(proxy.BaseProxy): - - def create_queue(self, **attrs): - """Create a new queue from attributes - - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.message.v1.queue.Queue`, - comprised of the properties on the Queue class. - - :returns: The results of queue creation - :rtype: :class:`~openstack.message.v1.queue.Queue` - """ - return self._create(queue.Queue, **attrs) - - def delete_queue(self, value, ignore_missing=True): - """Delete a queue - - :param value: The value can be either the name of a queue or a - :class:`~openstack.message.v1.queue.Queue` instance. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the queue does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent queue. - - :returns: ``None`` - """ - return self._delete(queue.Queue, value, ignore_missing=ignore_missing) - - def create_messages(self, values): - """Create new messages - - :param list values: The list of - :class:`~openstack.message.v1.message.Message`s to create. - - :returns: The results of message creation - :rtype: list messages: The list of - :class:`~openstack.message.v1.message.Message`s created. - """ - return message.Message.create_messages(self.session, values) - - def claim_messages(self, value): - """Claims a set of messages. - - :param value: The value must be a - :class:`~openstack.message.v1.claim.Claim` instance. - - :returns: The results of a claim - :rtype: list messages: The list of - :class:`~openstack.message.v1.message.Message`s claimed. - """ - return claim.Claim.claim_messages(self.session, value) - - def delete_message(self, value): - """Delete a message - - :param value: The value must be a - :class:`~openstack.message.v1.message.Message` instance. - - :returns: ``None`` - """ - message.Message.delete_by_id(self.session, value) diff --git a/openstack/message/v1/claim.py b/openstack/message/v1/claim.py deleted file mode 100644 index 5b8c25f613..0000000000 --- a/openstack/message/v1/claim.py +++ /dev/null @@ -1,87 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from openstack import exceptions -from openstack.message import message_service -from openstack.message.v1 import message -from openstack import resource - - -class Claim(resource.Resource): - resources_key = 'claims' - base_path = "/queues/%(queue_name)s/claims" - service = message_service.MessageService() - - # capabilities - allow_create = True - allow_list = False - allow_retrieve = False - allow_delete = False - - #: A ID for each client instance. The ID must be submitted in its - #: canonical form (for example, 3381af92-2b9e-11e3-b191-71861300734c). - #: The client generates this ID once. The client ID persists between - #: restarts of the client so the client should reuse that same ID. - #: All message-related operations require the use of the client ID in - #: the headers to ensure that messages are not echoed back to the client - #: that posted them, unless the client explicitly requests this. - client_id = None - - #: The name of the queue this Claim belongs to. - queue_name = None - - #: Specifies the number of Messages to return. - limit = None - - #: Specifies how long the server waits before releasing the claim, - #: in seconds. - ttl = resource.prop("ttl") - - #: Specifies the message grace period, in seconds. - grace = resource.prop("grace") - - @classmethod - def claim_messages(cls, session, claim): - """Create a remote resource from this instance.""" - url = cls._get_url({'queue_name': claim.queue_name}) - headers = {'Client-ID': claim.client_id} - params = {'limit': claim.limit} if claim.limit else None - body = [] - - try: - resp = session.post(url, endpoint_filter=cls.service, - headers=headers, - data=json.dumps(claim, cls=ClaimEncoder), - params=params) - body = resp.json() - except exceptions.InvalidResponse as e: - # The Message Service will respond with a 204 and no content in - # the body when there are no messages to claim. The transport - # layer doesn't like that and we have to correct for it here. - # Ultimately it's a bug in the v1.0 Message Service API. - # TODO(etoews): API is fixed in v1.1 so fix this for message.v1_1 - # https://wiki.openstack.org/wiki/Zaqar/specs/api/v1.1 - if e.response.status_code != 204: - raise e - - for message_attrs in body: - yield message.Message.new( - client_id=claim.client_id, - queue_name=claim.queue_name, - **message_attrs) - - -class ClaimEncoder(json.JSONEncoder): - def default(self, claim): - return {'ttl': claim.ttl, 'grace': claim.grace} diff --git a/openstack/message/v1/message.py b/openstack/message/v1/message.py deleted file mode 100644 index 4650032eed..0000000000 --- a/openstack/message/v1/message.py +++ /dev/null @@ -1,107 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -from six.moves.urllib import parse - -from openstack.message import message_service -from openstack import resource - - -class Message(resource.Resource): - resources_key = 'messages' - base_path = "/queues/%(queue_name)s/messages" - service = message_service.MessageService() - - # capabilities - allow_create = True - allow_list = False - allow_retrieve = False - allow_delete = False - - #: A ID for each client instance. The ID must be submitted in its - #: canonical form (for example, 3381af92-2b9e-11e3-b191-71861300734c). - #: The client generates this ID once. The client ID persists between - #: restarts of the client so the client should reuse that same ID. - #: All message-related operations require the use of the client ID in - #: the headers to ensure that messages are not echoed back to the client - #: that posted them, unless the client explicitly requests this. - client_id = None - - #: The name of the queue this Message belongs to. - queue_name = None - - #: A relative href that references this Message. - href = resource.prop("href") - - #: An arbitrary JSON document that constitutes the body of the message - #: being sent. - body = resource.prop("body") - - #: Specifies how long the server waits, in seconds, before marking the - #: message as expired and removing it from the queue. - ttl = resource.prop("ttl") - - #: Specifies how long the message has been in the queue, in seconds. - age = resource.prop("age") - - @classmethod - def create_messages(cls, session, messages): - if len(messages) == 0: - raise ValueError('messages cannot be empty') - - for i, message in enumerate(messages, -1): - if message.queue_name != messages[i].queue_name: - raise ValueError('All queues in messages must be equal') - if message.client_id != messages[i].client_id: - raise ValueError('All clients in messages must be equal') - - url = cls._get_url({'queue_name': messages[0].queue_name}) - headers = {'Client-ID': messages[0].client_id} - - resp = session.post(url, endpoint_filter=cls.service, headers=headers, - data=json.dumps(messages, cls=MessageEncoder)) - resp = resp.json() - - messages_created = [] - hrefs = resp['resources'] - - for i, href in enumerate(hrefs): - message = Message.existing(**messages[i]) - message.href = href - messages_created.append(message) - - return messages_created - - @classmethod - def _strip_version(cls, href): - path = parse.urlparse(href).path - - if path.startswith('/v'): - return href[href.find('/', 1):] - else: - return href - - @classmethod - def delete_by_id(cls, session, message, path_args=None): - url = cls._strip_version(message.href) - headers = { - 'Client-ID': message.client_id, - 'Accept': '', - } - session.delete(url, endpoint_filter=cls.service, headers=headers) - - -class MessageEncoder(json.JSONEncoder): - def default(self, message): - return {'body': message.body, 'ttl': message.ttl} diff --git a/openstack/message/v1/queue.py b/openstack/message/v1/queue.py deleted file mode 100644 index dd10420fae..0000000000 --- a/openstack/message/v1/queue.py +++ /dev/null @@ -1,34 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.message import message_service -from openstack import resource - - -class Queue(resource.Resource): - id_attribute = 'name' - resources_key = 'queues' - base_path = '/queues' - service = message_service.MessageService() - - # capabilities - allow_create = True - allow_list = False - allow_retrieve = False - allow_delete = True - - @classmethod - def create_by_id(cls, session, attrs, resource_id=None, path_args=None): - url = cls._get_url(path_args, resource_id) - headers = {'Accept': ''} - session.put(url, endpoint_filter=cls.service, headers=headers) - return {cls.id_attribute: resource_id} diff --git a/openstack/message/v2/_base.py b/openstack/message/v2/_base.py new file mode 100644 index 0000000000..018bcbfe93 --- /dev/null +++ b/openstack/message/v2/_base.py @@ -0,0 +1,129 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty +import uuid + +from keystoneauth1 import adapter +import typing_extensions as ty_ext + +from openstack import resource + + +class MessageResource(resource.Resource): + # FIXME(anyone): The name string of `location` field of Zaqar API response + # is lower case. That is inconsistent with the guide from API-WG. This is + # a workaround for this issue. + location = resource.Header("location") + + #: The ID to identify the client accessing Zaqar API. Must be specified + #: in header for each API request. + client_id = resource.Header("Client-ID") + #: The ID to identify the project. Must be provided when keystone + #: authentication is not enabled in Zaqar service. + project_id = resource.Header("X-PROJECT-ID") + + @classmethod + def list( + cls, + session: adapter.Adapter, + paginated: bool = True, + base_path: str | None = None, + allow_unknown_params: bool = False, + *, + microversion: str | None = None, + headers: dict[str, str] | None = None, + max_items: int | None = None, + **params: ty.Any, + ) -> ty.Generator[ty_ext.Self, None, None]: + """This method is a generator which yields resource objects. + + This is almost the copy of list method of resource.Resource class. + The only difference is the request header now includes `Client-ID` + and `X-PROJECT-ID` fields which are required by Zaqar v2 API. + """ + more_data = True + + if base_path is None: + base_path = cls.base_path + + uri = base_path % params + + project_id = params.get('project_id', None) or session.get_project_id() + assert project_id is not None + + headers = { + "Client-ID": params.get('client_id', None) or str(uuid.uuid4()), + "X-PROJECT-ID": project_id, + } + + query_params = cls._query_mapping._transpose(params, cls) + while more_data: + resp = session.get( + uri, headers=headers, params=query_params + ).json()[cls.resources_key] + + if not resp: + more_data = False + + yielded = 0 + new_marker = None + for data in resp: + value = cls.existing(**data) + new_marker = value.id + yielded += 1 + yield value + + if not paginated: + return + if "limit" in query_params and yielded < query_params["limit"]: + return + query_params["limit"] = yielded + query_params["marker"] = new_marker + + def fetch( + self, + session, + requires_id=True, + base_path=None, + error_message=None, + skip_cache=False, + **kwargs, + ): + request = self._prepare_request( + requires_id=requires_id, base_path=base_path + ) + headers = { + "Client-ID": self.client_id or str(uuid.uuid4()), + "X-PROJECT-ID": self.project_id or session.get_project_id(), + } + request.headers.update(headers) + response = session.get( + request.url, headers=headers, skip_cache=skip_cache + ) + self._translate_response(response) + + return self + + def delete( + self, session, error_message=None, *, microversion=None, **kwargs + ): + request = self._prepare_request() + headers = { + "Client-ID": self.client_id or str(uuid.uuid4()), + "X-PROJECT-ID": self.project_id or session.get_project_id(), + } + request.headers.update(headers) + response = session.delete(request.url, headers=headers) + + self._translate_response(response, has_body=False) + return self diff --git a/openstack/message/v2/_proxy.py b/openstack/message/v2/_proxy.py index 66aaee1268..b089112e6a 100644 --- a/openstack/message/v2/_proxy.py +++ b/openstack/message/v2/_proxy.py @@ -10,22 +10,32 @@ # License for the specific language governing permissions and limitations # under the License. +import typing as ty + from openstack.message.v2 import claim as _claim from openstack.message.v2 import message as _message from openstack.message.v2 import queue as _queue from openstack.message.v2 import subscription as _subscription -from openstack import proxy2 -from openstack import resource2 +from openstack import proxy +from openstack import resource + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['2']] = '2' -class Proxy(proxy2.BaseProxy): + _resource_registry = { + "claim": _claim.Claim, + "message": _message.Message, + "queue": _queue.Queue, + "subscription": _subscription.Subscription, + } def create_queue(self, **attrs): """Create a new queue from attributes :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.message.v2.queue.Queue`, - comprised of the properties on the Queue class. + a :class:`~openstack.message.v2.queue.Queue`, + comprised of the properties on the Queue class. :returns: The results of queue creation :rtype: :class:`~openstack.message.v2.queue.Queue` @@ -39,7 +49,7 @@ def get_queue(self, queue): :class:`~openstack.message.v2.queue.Queue` instance. :returns: One :class:`~openstack.message.v2.queue.Queue` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no + :raises: :class:`~openstack.exceptions.NotFoundException` when no queue matching the name could be found. """ return self._get(_queue.Queue, queue) @@ -47,7 +57,7 @@ def get_queue(self, queue): def queues(self, **query): """Retrieve a generator of queues - :param kwargs \*\*query: Optional query parameters to be sent to + :param kwargs query: Optional query parameters to be sent to restrict the queues to be returned. Available parameters include: * limit: Requests at most the specified number of items be @@ -59,18 +69,18 @@ def queues(self, **query): :returns: A generator of queue instances. """ - return self._list(_queue.Queue, paginated=True, **query) + return self._list(_queue.Queue, **query) def delete_queue(self, value, ignore_missing=True): """Delete a queue :param value: The value can be either the name of a queue or a - :class:`~openstack.message.v2.queue.Queue` instance. + :class:`~openstack.message.v2.queue.Queue` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the queue does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent queue. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the queue does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent queue. :returns: ``None`` """ @@ -80,19 +90,21 @@ def post_message(self, queue_name, messages): """Post messages to given queue :param queue_name: The name of target queue to post message to. - :param list messages: List of messages body and TTL to post. + :param messages: List of messages body and TTL to post. + :type messages: :py:class:`list` :returns: A string includes location of messages successfully posted. """ - message = self._get_resource(_message.Message, None, - queue_name=queue_name) - return message.post(self.session, messages) + message = self._get_resource( + _message.Message, None, queue_name=queue_name + ) + return message.post(self, messages) def messages(self, queue_name, **query): """Retrieve a generator of messages :param queue_name: The name of target queue to query messages from. - :param kwargs \*\*query: Optional query parameters to be sent to + :param kwargs query: Optional query parameters to be sent to restrict the messages to be returned. Available parameters include: * limit: Requests at most the specified number of items be @@ -109,7 +121,7 @@ def messages(self, queue_name, **query): :returns: A generator of message instances. """ query["queue_name"] = queue_name - return self._list(_message.Message, paginated=True, **query) + return self._list(_message.Message, **query) def get_message(self, queue_name, message): """Get a message @@ -119,37 +131,41 @@ def get_message(self, queue_name, message): :class:`~openstack.message.v2.message.Message` instance. :returns: One :class:`~openstack.message.v2.message.Message` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no + :raises: :class:`~openstack.exceptions.NotFoundException` when no message matching the criteria could be found. """ - message = self._get_resource(_message.Message, message, - queue_name=queue_name) + message = self._get_resource( + _message.Message, message, queue_name=queue_name + ) return self._get(_message.Message, message) - def delete_message(self, queue_name, value, claim=None, - ignore_missing=True): + def delete_message( + self, queue_name, value, claim=None, ignore_missing=True + ): """Delete a message :param queue_name: The name of target queue to delete message from. :param value: The value can be either the name of a message or a - :class:`~openstack.message.v2.message.Message` instance. + :class:`~openstack.message.v2.message.Message` instance. :param claim: The value can be the ID or a - :class:`~openstack.message.v2.claim.Claim` instance of - the claim seizing the message. If None, the message has - not been claimed. + :class:`~openstack.message.v2.claim.Claim` instance of + the claim seizing the message. If None, the message has + not been claimed. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the message does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent message. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the message does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent message. :returns: ``None`` """ - message = self._get_resource(_message.Message, value, - queue_name=queue_name) - message.claim_id = resource2.Resource._get_id(claim) - return self._delete(_message.Message, message, - ignore_missing=ignore_missing) + message = self._get_resource( + _message.Message, value, queue_name=queue_name + ) + message.claim_id = resource.Resource._get_id(claim) + return self._delete( + _message.Message, message, ignore_missing=ignore_missing + ) def create_subscription(self, queue_name, **attrs): """Create a new subscription from attributes @@ -162,14 +178,15 @@ def create_subscription(self, queue_name, **attrs): :returns: The results of subscription creation :rtype: :class:`~openstack.message.v2.subscription.Subscription` """ - return self._create(_subscription.Subscription, queue_name=queue_name, - **attrs) + return self._create( + _subscription.Subscription, queue_name=queue_name, **attrs + ) def subscriptions(self, queue_name, **query): """Retrieve a generator of subscriptions :param queue_name: The name of target queue to subscribe on. - :param kwargs \*\*query: Optional query parameters to be sent to + :param kwargs query: Optional query parameters to be sent to restrict the subscriptions to be returned. Available parameters include: @@ -183,7 +200,7 @@ def subscriptions(self, queue_name, **query): :returns: A generator of subscription instances. """ query["queue_name"] = queue_name - return self._list(_subscription.Subscription, paginated=True, **query) + return self._list(_subscription.Subscription, **query) def get_subscription(self, queue_name, subscription): """Get a subscription @@ -193,34 +210,38 @@ def get_subscription(self, queue_name, subscription): :class:`~openstack.message.v2.subscription.Subscription` instance. :returns: One :class:`~openstack.message.v2.subscription.Subscription` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no + :raises: :class:`~openstack.exceptions.NotFoundException` when no subscription matching the criteria could be found. """ - subscription = self._get_resource(_subscription.Subscription, - subscription, - queue_name=queue_name) + subscription = self._get_resource( + _subscription.Subscription, subscription, queue_name=queue_name + ) return self._get(_subscription.Subscription, subscription) def delete_subscription(self, queue_name, value, ignore_missing=True): """Delete a subscription :param queue_name: The name of target queue to delete subscription - from. + from. :param value: The value can be either the name of a subscription or a - :class:`~openstack.message.v2.subscription.Subscription` - instance. + :class:`~openstack.message.v2.subscription.Subscription` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the subscription does not exist. - When set to ``True``, no exception will be thrown when - attempting to delete a nonexistent subscription. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the subscription does not exist. + When set to ``True``, no exception will be thrown when + attempting to delete a nonexistent subscription. :returns: ``None`` """ - subscription = self._get_resource(_subscription.Subscription, value, - queue_name=queue_name) - return self._delete(_subscription.Subscription, subscription, - ignore_missing=ignore_missing) + subscription = self._get_resource( + _subscription.Subscription, value, queue_name=queue_name + ) + return self._delete( + _subscription.Subscription, + subscription, + ignore_missing=ignore_missing, + ) def create_claim(self, queue_name, **attrs): """Create a new claim from attributes @@ -243,7 +264,7 @@ def get_claim(self, queue_name, claim): :class:`~openstack.message.v2.claim.Claim` instance. :returns: One :class:`~openstack.message.v2.claim.Claim` - :raises: :class:`~openstack.exceptions.ResourceNotFound` when no + :raises: :class:`~openstack.exceptions.NotFoundException` when no claim matching the criteria could be found. """ return self._get(_claim.Claim, claim, queue_name=queue_name) @@ -261,22 +282,90 @@ def update_claim(self, queue_name, claim, **attrs): :returns: The results of claim update :rtype: :class:`~openstack.message.v2.claim.Claim` """ - return self._update(_claim.Claim, claim, queue_name=queue_name, - **attrs) + return self._update( + _claim.Claim, claim, queue_name=queue_name, **attrs + ) def delete_claim(self, queue_name, claim, ignore_missing=True): """Delete a claim :param queue_name: The name of target queue to claim messages from. :param claim: The value can be either the ID of a claim or a - :class:`~openstack.message.v2.claim.Claim` instance. + :class:`~openstack.message.v2.claim.Claim` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the claim does not exist. - When set to ``True``, no exception will be thrown when - attempting to delete a nonexistent claim. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the claim does not exist. + When set to ``True``, no exception will be thrown when + attempting to delete a nonexistent claim. :returns: ``None`` """ - return self._delete(_claim.Claim, claim, queue_name=queue_name, - ignore_missing=ignore_missing) + return self._delete( + _claim.Claim, + claim, + queue_name=queue_name, + ignore_missing=ignore_missing, + ) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/message/v2/claim.py b/openstack/message/v2/claim.py index 1358587264..0ee2fbf72d 100644 --- a/openstack/message/v2/claim.py +++ b/openstack/message/v2/claim.py @@ -12,67 +12,75 @@ import uuid -from openstack.message import message_service -from openstack import resource2 +from openstack.message.v2 import _base +from openstack import resource -class Claim(resource2.Resource): - # FIXME(anyone): The name string of `location` field of Zaqar API response - # is lower case. That is inconsistent with the guide from API-WG. This is - # a workaround for this issue. - location = resource2.Header("location") - +class Claim(_base.MessageResource): resources_key = 'claims' base_path = '/queues/%(queue_name)s/claims' - service = message_service.MessageService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True - patch_update = True + commit_method = 'PATCH' # Properties #: The value in seconds indicating how long the claim has existed. - age = resource2.Body("age") + age = resource.Body("age") #: In case worker stops responding for a long time, the server will #: extend the lifetime of claimed messages to be at least as long as #: the lifetime of the claim itself, plus the specified grace period. #: Must between 60 and 43200 seconds(12 hours). - grace = resource2.Body("grace") + grace = resource.Body("grace") #: The number of messages to claim. Default 10, up to 20. - limit = resource2.Body("limit") + limit = resource.Body("limit") #: Messages have been successfully claimed. - messages = resource2.Body("messages") + messages = resource.Body("messages") #: Number of seconds the server wait before releasing the claim. Must #: between 60 and 43200 seconds(12 hours). - ttl = resource2.Body("ttl") + ttl = resource.Body("ttl") #: The name of queue to claim message from. - queue_name = resource2.URI("queue_name") - #: The ID to identify the client accessing Zaqar API. Must be specified - #: in header for each API request. - client_id = resource2.Header("Client-ID") - #: The ID to identify the project. Must be provided when keystone - #: authentication is not enabled in Zaqar service. - project_id = resource2.Header("X-PROJECT-ID") - - def _translate_response(self, response, has_body=True): - super(Claim, self)._translate_response(response, has_body=has_body) + queue_name = resource.URI("queue_name") + + def _translate_response( + self, + response, + has_body=None, + error_message=None, + *, + resource_response_key=None, + ): + # For case no message was claimed successfully, 204 No Content + # message will be returned. In other cases, we translate response + # body which has `messages` field(list) included. + if response.status_code == 204: + return + + super()._translate_response( + response, + has_body, + error_message, + resource_response_key=resource_response_key, + ) if has_body and self.location: # Extract claim ID from location self.id = self.location.split("claims/")[1] - def create(self, session, prepend_key=False): - request = self._prepare_request(requires_id=False, - prepend_key=prepend_key) + def create(self, session, prepend_key=False, base_path=None, **kwargs): + request = self._prepare_request( + requires_id=False, prepend_key=prepend_key, base_path=base_path + ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() + "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) - response = session.post(request.uri, endpoint_filter=self.service, - json=request.body, headers=request.headers) + response = session.post( + request.url, json=request.body, headers=request.headers + ) # For case no message was claimed successfully, 204 No Content # message will be returned. In other cases, we translate response @@ -82,43 +90,24 @@ def create(self, session, prepend_key=False): return self - def get(self, session, requires_id=True): - request = self._prepare_request(requires_id=requires_id) - headers = { - "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() - } - - request.headers.update(headers) - response = session.get(request.uri, endpoint_filter=self.service, - headers=request.headers) - self._translate_response(response) - - return self - - def update(self, session, prepend_key=False, has_body=False): - request = self._prepare_request(prepend_key=prepend_key) - headers = { - "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() - } - - request.headers.update(headers) - session.patch(request.uri, endpoint_filter=self.service, - json=request.body, headers=request.headers) - - return self - - def delete(self, session): - request = self._prepare_request() + def commit( + self, + session, + prepend_key=True, + has_body=True, + retry_on_conflict=None, + base_path=None, + **kwargs, + ): + request = self._prepare_request( + prepend_key=prepend_key, base_path=base_path + ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() + "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) - response = session.delete(request.uri, endpoint_filter=self.service, - headers=request.headers) + session.patch(request.url, json=request.body, headers=request.headers) - self._translate_response(response, has_body=False) return self diff --git a/openstack/message/v2/message.py b/openstack/message/v2/message.py index 668f69c06b..091aefdc75 100644 --- a/openstack/message/v2/message.py +++ b/openstack/message/v2/message.py @@ -12,122 +12,83 @@ import uuid -from openstack.message import message_service -from openstack import resource2 +from openstack.message.v2 import _base +from openstack import resource -class Message(resource2.Resource): - # FIXME(anyone): The name string of `location` field of Zaqar API response - # is lower case. That is inconsistent with the guide from API-WG. This is - # a workaround for this issue. - location = resource2.Header("location") - +class Message(_base.MessageResource): resources_key = 'messages' base_path = '/queues/%(queue_name)s/messages' - service = message_service.MessageService() # capabilities allow_create = True allow_list = True - allow_get = True + allow_fetch = True allow_delete = True - _query_mapping = resource2.QueryParameters("echo", "include_claimed") + _query_mapping = resource.QueryParameters("echo", "include_claimed") # Properties #: The value in second to specify how long the message has been #: posted to the queue. - age = resource2.Body("age") + age = resource.Body("age") #: A dictionary specifies an arbitrary document that constitutes the #: body of the message being sent. - body = resource2.Body("body") + body = resource.Body("body") #: An uri string describe the location of the message resource. - href = resource2.Body("href") + href = resource.Body("href") #: The value in seconds to specify how long the server waits before #: marking the message as expired and removing it from the queue. - ttl = resource2.Body("ttl") + ttl = resource.Body("ttl") #: The name of target queue message is post to or got from. - queue_name = resource2.URI("queue_name") - #: The ID to identify the client accessing Zaqar API. Must be specified - #: in header for each API request. - client_id = resource2.Header("Client-ID") - #: The ID to identify the project accessing Zaqar API. Must be specified - #: in case keystone auth is not enabled in Zaqar service. - project_id = resource2.Header("X-PROJECT-ID") + queue_name = resource.URI("queue_name") + + # FIXME(stephenfin): This is actually a query arg but we need it for + # deletions and resource.delete doesn't respect these currently + claim_id: str | None = None def post(self, session, messages): request = self._prepare_request(requires_id=False, prepend_key=True) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() + "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) request.body = {'messages': messages} - response = session.post(request.uri, endpoint_filter=self.service, - json=request.body, headers=request.headers) + response = session.post( + request.url, json=request.body, headers=request.headers + ) return response.json()['resources'] - @classmethod - def list(cls, session, paginated=True, **params): - """This method is a generator which yields message objects. - - This is almost the copy of list method of resource2.Resource class. - The only difference is the request header now includes `Client-ID` - and `X-PROJECT-ID` fields which are required by Zaqar v2 API. - """ - more_data = True - uri = cls.base_path % params - headers = { - "Client-ID": params.get('client_id', None) or str(uuid.uuid4()), - "X-PROJECT-ID": params.get('project_id', None - ) or session.get_project_id() - } - - query_params = cls._query_mapping._transpose(params) - while more_data: - resp = session.get(uri, endpoint_filter=cls.service, - headers=headers, params=query_params) - resp = resp.json() - resp = resp[cls.resources_key] - - if not resp: - more_data = False - - yielded = 0 - new_marker = None - for data in resp: - value = cls.existing(**data) - new_marker = value.id - yielded += 1 - yield value - - if not paginated: - return - if "limit" in query_params and yielded < query_params["limit"]: - return - query_params["limit"] = yielded - query_params["marker"] = new_marker - - def get(self, session, requires_id=True): - request = self._prepare_request(requires_id=requires_id) + def create(self, session, prepend_key=False, base_path=None, **kwargs): + request = self._prepare_request( + requires_id=False, prepend_key=prepend_key, base_path=base_path + ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() + "X-PROJECT-ID": self.project_id or session.get_project_id(), } - request.headers.update(headers) - response = session.get(request.uri, endpoint_filter=self.service, - headers=headers) - self._translate_response(response) + response = session.post( + request.url, json=request.body, headers=request.headers + ) + + # For case no message was claimed successfully, 204 No Content + # message will be returned. In other cases, we translate response + # body which has `messages` field(list) included. + if response.status_code != 204: + self._translate_response(response) return self - def delete(self, session): + def delete( + self, session, error_message=None, *, microversion=None, **kwargs + ): request = self._prepare_request() headers = { "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() + "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) @@ -135,9 +96,8 @@ def delete(self, session): # parameter when deleting a message that has been claimed, we # rebuild the request URI if claim_id is not None. if self.claim_id: - request.uri += '?claim_id=%s' % self.claim_id - response = session.delete(request.uri, endpoint_filter=self.service, - headers=headers) + request.url += f'?claim_id={self.claim_id}' + response = session.delete(request.url, headers=headers) self._translate_response(response, has_body=False) return self diff --git a/openstack/message/v2/queue.py b/openstack/message/v2/queue.py index 49f79f6e10..9e23e33cc6 100644 --- a/openstack/message/v2/queue.py +++ b/openstack/message/v2/queue.py @@ -12,123 +12,46 @@ import uuid -from openstack.message import message_service -from openstack import resource2 +from openstack.message.v2 import _base +from openstack import resource -class Queue(resource2.Resource): - # FIXME(anyone): The name string of `location` field of Zaqar API response - # is lower case. That is inconsistent with the guide from API-WG. This is - # a workaround for this issue. - location = resource2.Header("location") - +class Queue(_base.MessageResource): resources_key = "queues" base_path = "/queues" - service = message_service.MessageService() # capabilities allow_create = True allow_list = True - allow_get = True + allow_fetch = True allow_delete = True # Properties #: The default TTL of messages defined for a queue, which will effect for #: any messages posted to the queue. - default_message_ttl = resource2.Body("_default_message_ttl") + default_message_ttl = resource.Body("_default_message_ttl") #: Description of the queue. - description = resource2.Body("description") + description = resource.Body("description") #: The max post size of messages defined for a queue, which will effect #: for any messages posted to the queue. - max_messages_post_size = resource2.Body("_max_messages_post_size") + max_messages_post_size = resource.Body("_max_messages_post_size") #: Name of the queue. The name is the unique identity of a queue. It #: must not exceed 64 bytes in length, and it is limited to US-ASCII #: letters, digits, underscores, and hyphens. - name = resource2.Body("name", alternate_id=True) - #: The ID to identify the client accessing Zaqar API. Must be specified - #: in header for each API request. - client_id = resource2.Header("Client-ID") - #: The ID to identify the project accessing Zaqar API. Must be specified - #: in case keystone auth is not enabled in Zaqar service. - project_id = resource2.Header("X-PROJECT-ID") - - def create(self, session, prepend_key=True): - request = self._prepare_request(requires_id=True, - prepend_key=prepend_key) - headers = { - "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() - } - request.headers.update(headers) - response = session.put(request.uri, endpoint_filter=self.service, - json=request.body, headers=request.headers) - - self._translate_response(response, has_body=False) - return self - - @classmethod - def list(cls, session, paginated=False, **params): - """This method is a generator which yields queue objects. - - This is almost the copy of list method of resource2.Resource class. - The only difference is the request header now includes `Client-ID` - and `X-PROJECT-ID` fields which are required by Zaqar v2 API. - """ - more_data = True - query_params = cls._query_mapping._transpose(params) - uri = cls.base_path % params - headers = { - "Client-ID": params.get('client_id', None) or str(uuid.uuid4()), - "X-PROJECT-ID": params.get('project_id', None - ) or session.get_project_id() - } - - while more_data: - resp = session.get(uri, endpoint_filter=cls.service, - headers=headers, params=query_params) - resp = resp.json() - resp = resp[cls.resources_key] - - if not resp: - more_data = False - - yielded = 0 - new_marker = None - for data in resp: - value = cls.existing(**data) - new_marker = value.id - yielded += 1 - yield value - - if not paginated: - return - if "limit" in query_params and yielded < query_params["limit"]: - return - query_params["limit"] = yielded - query_params["marker"] = new_marker - - def get(self, session, requires_id=True): - request = self._prepare_request(requires_id=requires_id) - headers = { - "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() - } - request.headers.update(headers) - response = session.get(request.uri, endpoint_filter=self.service, - headers=headers) - self._translate_response(response) - - return self + name = resource.Body("name", alternate_id=True) - def delete(self, session): - request = self._prepare_request() + def create(self, session, prepend_key=False, base_path=None, **kwargs): + request = self._prepare_request( + requires_id=True, prepend_key=prepend_key, base_path=None + ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() + "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) - response = session.delete(request.uri, endpoint_filter=self.service, - headers=headers) + response = session.put( + request.url, json=request.body, headers=request.headers + ) self._translate_response(response, has_body=False) return self diff --git a/openstack/message/v2/subscription.py b/openstack/message/v2/subscription.py index d5acdb0595..5da4962466 100644 --- a/openstack/message/v2/subscription.py +++ b/openstack/message/v2/subscription.py @@ -12,133 +12,54 @@ import uuid -from openstack.message import message_service -from openstack import resource2 +from openstack.message.v2 import _base +from openstack import resource -class Subscription(resource2.Resource): - # FIXME(anyone): The name string of `location` field of Zaqar API response - # is lower case. That is inconsistent with the guide from API-WG. This is - # a workaround for this issue. - location = resource2.Header("location") - +class Subscription(_base.MessageResource): resources_key = 'subscriptions' base_path = '/queues/%(queue_name)s/subscriptions' - service = message_service.MessageService() # capabilities allow_create = True allow_list = True - allow_get = True + allow_fetch = True allow_delete = True # Properties #: The value in seconds indicating how long the subscription has existed. - age = resource2.Body("age") + age = resource.Body("age") #: Alternate id of the subscription. This key is used in response of #: subscription create API to return id of subscription created. - subscription_id = resource2.Body("subscription_id", alternate_id=True) + subscription_id = resource.Body("subscription_id", alternate_id=True) #: The extra metadata for the subscription. The value must be a dict. #: If the subscriber is `mailto`. The options can contain `from` and #: `subject` to indicate the email's author and title. - options = resource2.Body("options", type=dict) + options = resource.Body("options", type=dict) #: The queue name which the subscription is registered on. - source = resource2.Body("source") + source = resource.Body("source") #: The destination of the message. Two kinds of subscribers are supported: #: http/https and email. The http/https subscriber should start with #: `http/https`. The email subscriber should start with `mailto`. - subscriber = resource2.Body("subscriber") + subscriber = resource.Body("subscriber") #: Number of seconds the subscription remains alive? The ttl value must #: be great than 60 seconds. The default value is 3600 seconds. - ttl = resource2.Body("ttl") + ttl = resource.Body("ttl") #: The queue name which the subscription is registered on. - queue_name = resource2.URI("queue_name") - #: The ID to identify the client accessing Zaqar API. Must be specified - #: in header for each API request. - client_id = resource2.Header("Client-ID") - #: The ID to identify the project. Must be provided when keystone - #: authentication is not enabled in Zaqar service. - project_id = resource2.Header("X-PROJECT-ID") + queue_name = resource.URI("queue_name") - def create(self, session, prepend_key=True): - request = self._prepare_request(requires_id=False, - prepend_key=prepend_key) + def create(self, session, prepend_key=False, base_path=None, **kwargs): + request = self._prepare_request( + requires_id=False, prepend_key=prepend_key, base_path=base_path + ) headers = { "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() + "X-PROJECT-ID": self.project_id or session.get_project_id(), } request.headers.update(headers) - response = session.post(request.uri, endpoint_filter=self.service, - json=request.body, headers=request.headers) - - self._translate_response(response) - return self - - @classmethod - def list(cls, session, paginated=True, **params): - """This method is a generator which yields subscription objects. - - This is almost the copy of list method of resource2.Resource class. - The only difference is the request header now includes `Client-ID` - and `X-PROJECT-ID` fields which are required by Zaqar v2 API. - """ - more_data = True - uri = cls.base_path % params - headers = { - "Client-ID": params.get('client_id', None) or str(uuid.uuid4()), - "X-PROJECT-ID": params.get('project_id', None - ) or session.get_project_id() - } - - query_params = cls._query_mapping._transpose(params) - while more_data: - resp = session.get(uri, endpoint_filter=cls.service, - headers=headers, params=query_params) - resp = resp.json() - resp = resp[cls.resources_key] - - if not resp: - more_data = False - - yielded = 0 - new_marker = None - for data in resp: - value = cls.existing(**data) - new_marker = value.id - yielded += 1 - yield value - - if not paginated: - return - if "limit" in query_params and yielded < query_params["limit"]: - return - query_params["limit"] = yielded - query_params["marker"] = new_marker - - def get(self, session, requires_id=True): - request = self._prepare_request(requires_id=requires_id) - headers = { - "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() - } + response = session.post( + request.url, json=request.body, headers=request.headers + ) - request.headers.update(headers) - response = session.get(request.uri, endpoint_filter=self.service, - headers=request.headers) self._translate_response(response) - - return self - - def delete(self, session): - request = self._prepare_request() - headers = { - "Client-ID": self.client_id or str(uuid.uuid4()), - "X-PROJECT-ID": self.project_id or session.get_project_id() - } - - request.headers.update(headers) - response = session.delete(request.uri, endpoint_filter=self.service, - headers=request.headers) - - self._translate_response(response, has_body=False) return self diff --git a/openstack/message/version.py b/openstack/message/version.py index 431fd239d6..805ce6a345 100644 --- a/openstack/message/version.py +++ b/openstack/message/version.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.message import message_service from openstack import resource @@ -18,13 +17,10 @@ class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' - service = message_service.MessageService( - version=message_service.MessageService.UNVERSIONED - ) # capabilities allow_list = True # Properties - links = resource.prop('links') - status = resource.prop('status') + links = resource.Body('links') + status = resource.Body('status') diff --git a/openstack/metric/metric_service.py b/openstack/metric/metric_service.py deleted file mode 100644 index b18153fb5a..0000000000 --- a/openstack/metric/metric_service.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import service_filter - - -class MetricService(service_filter.ServiceFilter): - """The metric service.""" - - valid_versions = [service_filter.ValidVersion('v1')] - - def __init__(self, version=None): - """Create a metric service.""" - super(MetricService, self).__init__(service_type='metric', - version=version) diff --git a/openstack/metric/v1/_proxy.py b/openstack/metric/v1/_proxy.py deleted file mode 100644 index e25e9303ee..0000000000 --- a/openstack/metric/v1/_proxy.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.metric.v1 import capabilities -from openstack import proxy - - -class Proxy(proxy.BaseProxy): - - def capabilities(self, **query): - """Return a generator of capabilities - - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. - - :returns: A generator of capability objects - :rtype: :class:`~openstack.metric.v1.capabilities.Capabilities` - """ - return self._list(capabilities.Capabilities, paginated=False, **query) diff --git a/openstack/metric/v1/archive_policy.py b/openstack/metric/v1/archive_policy.py deleted file mode 100644 index df25246037..0000000000 --- a/openstack/metric/v1/archive_policy.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.metric import metric_service -from openstack import resource - - -class ArchivePolicy(resource.Resource): - base_path = '/archive_policy' - service = metric_service.MetricService() - - # Supported Operations - allow_create = True - allow_retrieve = True - allow_delete = True - allow_list = True - - id_attribute = "name" - - # Properties - #: The name of this policy - name = resource.prop('name') - #: The definition of this policy - definition = resource.prop('definition', type=list) - #: The window of time older than the period that archives can be requested - back_window = resource.prop('back_window') - #: A list of the aggregation methods supported - aggregation_methods = resource.prop("aggregation_methods", type=list) diff --git a/openstack/metric/v1/capabilities.py b/openstack/metric/v1/capabilities.py deleted file mode 100644 index e085492d08..0000000000 --- a/openstack/metric/v1/capabilities.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.metric import metric_service -from openstack import resource - - -class Capabilities(resource.Resource): - base_path = '/capabilities' - service = metric_service.MetricService() - - # Supported Operations - allow_retrieve = True - - #: The supported methods of aggregation. - aggregation_methods = resource.prop('aggregation_methods', type=list) diff --git a/openstack/metric/v1/metric.py b/openstack/metric/v1/metric.py deleted file mode 100644 index 3fb095ee9e..0000000000 --- a/openstack/metric/v1/metric.py +++ /dev/null @@ -1,39 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.metric import metric_service -from openstack import resource - - -class Metric(resource.Resource): - base_path = '/metric' - service = metric_service.MetricService() - - # Supported Operations - allow_create = True - allow_retrieve = True - allow_delete = True - allow_list = True - - # Properties - #: The name of the archive policy - archive_policy_name = resource.prop('archive_policy_name') - #: The archive policy - archive_policy = resource.prop('archive_policy') - #: The ID of the user who created this metric - created_by_user_id = resource.prop('created_by_user_id') - #: The ID of the project this metric was created under - created_by_project_id = resource.prop('created_by_project_id') - #: The identifier of this metric - resource_id = resource.prop('resource_id') - #: The name of this metric - name = resource.prop('name') diff --git a/openstack/metric/v1/resource.py b/openstack/metric/v1/resource.py deleted file mode 100644 index f6f8c84204..0000000000 --- a/openstack/metric/v1/resource.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.metric import metric_service -from openstack import resource - - -class Generic(resource.Resource): - base_path = '/resource/generic' - service = metric_service.MetricService() - - # Supported Operations - allow_create = True - allow_retrieve = True - allow_delete = True - allow_list = True - allow_update = True - - # Properties - #: The identifier of this resource - id = resource.prop('id', alias="resource_id") - #: The ID of the user who created this resource - created_by_user_id = resource.prop('created_by_user_id') - #: The ID of the project this resource was created under - created_by_project_id = resource.prop('created_by_project_id') - #: The ID of the user - user_id = resource.prop('user_id') - #: The ID of the project - project_id = resource.prop('project_id') - #: Timestamp when this resource was started - started_at = resource.prop('started_at') - #: Timestamp when this resource was ended - ended_at = resource.prop('ended_at') - #: A dictionary of metrics collected on this resource - metrics = resource.prop('metrics', type=dict) - - def create(self, session): - resp = self.create_by_id(session, self._attrs) - self._attrs[self.id_attribute] = resp[self.id_attribute] - self._reset_dirty() - return self diff --git a/openstack/module_loader.py b/openstack/module_loader.py deleted file mode 100644 index faf271acde..0000000000 --- a/openstack/module_loader.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -Load various modules for authorization and eventually services. -""" -from stevedore import extension - - -def load_service_plugins(namespace): - service_plugins = extension.ExtensionManager( - namespace=namespace, - invoke_on_load=True, - ) - services = {} - for service in service_plugins: - service = service.obj - service.interface = None - services[service.service_type] = service - return services diff --git a/openstack/network/network_service.py b/openstack/network/network_service.py index 28c20e4b85..b6f10f0758 100644 --- a/openstack/network/network_service.py +++ b/openstack/network/network_service.py @@ -10,15 +10,13 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack import service_filter +from openstack.network.v2 import _proxy +from openstack import service_description -class NetworkService(service_filter.ServiceFilter): +class NetworkService(service_description.ServiceDescription[_proxy.Proxy]): """The network service.""" - valid_versions = [service_filter.ValidVersion('v2', 'v2.0')] - - def __init__(self, version=None): - """Create a network service.""" - super(NetworkService, self).__init__(service_type='network', - version=version) + supported_versions = { + '2': _proxy.Proxy, + } diff --git a/openstack/network/v2/_base.py b/openstack/network/v2/_base.py new file mode 100644 index 0000000000..7acc05f8d5 --- /dev/null +++ b/openstack/network/v2/_base.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.common import tag +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class NetworkResource(resource.Resource): + #: Revision number of the resource. *Type: int* + revision_number = resource.Body('revision_number', type=int) + + # Headers for HEAD and GET requests + #: See http://www.ietf.org/rfc/rfc2616.txt. + if_match = resource.Header("if-match", type=list) + + _allow_unknown_attrs_in_body = True + + def _prepare_request( + self, + requires_id=None, + prepend_key=False, + patch=False, + base_path=None, + params=None, + **kwargs, + ): + req = super()._prepare_request( + requires_id=requires_id, + prepend_key=prepend_key, + patch=patch, + base_path=base_path, + params=params, + **kwargs, + ) + return req + + +class TagMixinNetwork(tag.TagMixin): + def add_tags(self, session, tags): + """Create the tags on the resource + + :param session: The session to use for making this request. + :param tags: List with tags to be set on the resource + """ + tags = tags or [] + url = utils.urljoin(self.base_path, self.id, 'tags') + session = self._get_session(session) + response = session.post(url, json={'tags': tags}) + exceptions.raise_from_response(response) + self._body.attributes.update({'tags': tags}) + return self diff --git a/openstack/network/v2/_proxy.py b/openstack/network/v2/_proxy.py index d4ab2393b4..d4ed2c08e9 100644 --- a/openstack/network/v2/_proxy.py +++ b/openstack/network/v2/_proxy.py @@ -10,30 +10,72 @@ # License for the specific language governing permissions and limitations # under the License. +import typing as ty + +from openstack import exceptions +from openstack.network.v2 import _base +from openstack.network.v2 import address_group as _address_group from openstack.network.v2 import address_scope as _address_scope from openstack.network.v2 import agent as _agent -from openstack.network.v2 import auto_allocated_topology as \ - _auto_allocated_topology +from openstack.network.v2 import ( + auto_allocated_topology as _auto_allocated_topology, +) from openstack.network.v2 import availability_zone +from openstack.network.v2 import bgp_peer as _bgp_peer +from openstack.network.v2 import bgp_speaker as _bgp_speaker +from openstack.network.v2 import bgpvpn as _bgpvpn +from openstack.network.v2 import ( + bgpvpn_network_association as _bgpvpn_network_association, +) +from openstack.network.v2 import ( + bgpvpn_port_association as _bgpvpn_port_association, +) +from openstack.network.v2 import ( + bgpvpn_router_association as _bgpvpn_router_association, +) +from openstack.network.v2 import ( + default_security_group_rule as _default_security_group_rule, +) from openstack.network.v2 import extension +from openstack.network.v2 import firewall_group as _firewall_group +from openstack.network.v2 import firewall_policy as _firewall_policy +from openstack.network.v2 import firewall_rule as _firewall_rule from openstack.network.v2 import flavor as _flavor from openstack.network.v2 import floating_ip as _floating_ip from openstack.network.v2 import health_monitor as _health_monitor +from openstack.network.v2 import l3_conntrack_helper as _l3_conntrack_helper from openstack.network.v2 import listener as _listener from openstack.network.v2 import load_balancer as _load_balancer +from openstack.network.v2 import local_ip as _local_ip +from openstack.network.v2 import local_ip_association as _local_ip_association from openstack.network.v2 import metering_label as _metering_label from openstack.network.v2 import metering_label_rule as _metering_label_rule +from openstack.network.v2 import ndp_proxy as _ndp_proxy from openstack.network.v2 import network as _network from openstack.network.v2 import network_ip_availability +from openstack.network.v2 import ( + network_segment_range as _network_segment_range, +) from openstack.network.v2 import pool as _pool from openstack.network.v2 import pool_member as _pool_member from openstack.network.v2 import port as _port -from openstack.network.v2 import qos_bandwidth_limit_rule as \ - _qos_bandwidth_limit_rule -from openstack.network.v2 import qos_dscp_marking_rule as \ - _qos_dscp_marking_rule -from openstack.network.v2 import qos_minimum_bandwidth_rule as \ - _qos_minimum_bandwidth_rule +from openstack.network.v2 import port_binding as _port_binding +from openstack.network.v2 import port_forwarding as _port_forwarding +from openstack.network.v2 import ( + qos_bandwidth_limit_rule as _qos_bandwidth_limit_rule, +) +from openstack.network.v2 import ( + qos_dscp_marking_rule as _qos_dscp_marking_rule, +) +from openstack.network.v2 import ( + qos_minimum_bandwidth_rule as _qos_minimum_bandwidth_rule, +) +from openstack.network.v2 import ( + qos_minimum_packet_rate_rule as _qos_minimum_packet_rate_rule, +) +from openstack.network.v2 import ( + qos_packet_rate_limit_rule as _qos_packet_rate_limit_rule, +) from openstack.network.v2 import qos_policy as _qos_policy from openstack.network.v2 import qos_rule_type as _qos_rule_type from openstack.network.v2 import quota as _quota @@ -44,18 +86,289 @@ from openstack.network.v2 import segment as _segment from openstack.network.v2 import service_profile as _service_profile from openstack.network.v2 import service_provider as _service_provider +from openstack.network.v2 import sfc_flow_classifier as _sfc_flow_classifier +from openstack.network.v2 import sfc_port_chain as _sfc_port_chain +from openstack.network.v2 import sfc_port_pair as _sfc_port_pair +from openstack.network.v2 import sfc_port_pair_group as _sfc_port_pair_group +from openstack.network.v2 import sfc_service_graph as _sfc_sservice_graph from openstack.network.v2 import subnet as _subnet from openstack.network.v2 import subnet_pool as _subnet_pool +from openstack.network.v2 import tap_flow as _tap_flow +from openstack.network.v2 import tap_mirror as _tap_mirror +from openstack.network.v2 import tap_service as _tap_service +from openstack.network.v2 import trunk as _trunk +from openstack.network.v2 import vpn_endpoint_group as _vpn_endpoint_group +from openstack.network.v2 import vpn_ike_policy as _ike_policy +from openstack.network.v2 import vpn_ipsec_policy as _ipsec_policy +from openstack.network.v2 import ( + vpn_ipsec_site_connection as _ipsec_site_connection, +) from openstack.network.v2 import vpn_service as _vpn_service -from openstack import proxy2 +from openstack import proxy +from openstack import resource + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['2']] = '2' + + _resource_registry = { + "address_group": _address_group.AddressGroup, + "address_scope": _address_scope.AddressScope, + "agent": _agent.Agent, + "auto_allocated_topology": ( + _auto_allocated_topology.AutoAllocatedTopology + ), + "availability_zone": availability_zone.AvailabilityZone, + "bgp_peer": _bgp_peer.BgpPeer, + "bgp_speaker": _bgp_speaker.BgpSpeaker, + "bgpvpn": _bgpvpn.BgpVpn, + "bgpvpn_network_association": ( + _bgpvpn_network_association.BgpVpnNetworkAssociation + ), + "bgpvpn_port_association": ( + _bgpvpn_port_association.BgpVpnPortAssociation + ), + "bgpvpn_router_association": ( + _bgpvpn_router_association.BgpVpnRouterAssociation + ), + "default_security_group_rule": ( + _default_security_group_rule.DefaultSecurityGroupRule + ), + "extension": extension.Extension, + "firewall_group": _firewall_group.FirewallGroup, + "firewall_policy": _firewall_policy.FirewallPolicy, + "firewall_rule": _firewall_rule.FirewallRule, + "flavor": _flavor.Flavor, + "floating_ip": _floating_ip.FloatingIP, + "health_monitor": _health_monitor.HealthMonitor, + "l3_conntrack_helper": _l3_conntrack_helper.ConntrackHelper, + "listener": _listener.Listener, + "load_balancer": _load_balancer.LoadBalancer, + "local_ip": _local_ip.LocalIP, + "local_ip_association": _local_ip_association.LocalIPAssociation, + "metering_label": _metering_label.MeteringLabel, + "metering_label_rule": _metering_label_rule.MeteringLabelRule, + "ndp_proxy": _ndp_proxy.NDPProxy, + "network": _network.Network, + "network_ip_availability": ( + network_ip_availability.NetworkIPAvailability + ), + "network_segment_range": _network_segment_range.NetworkSegmentRange, + "pool": _pool.Pool, + "pool_member": _pool_member.PoolMember, + "port": _port.Port, + "port_forwarding": _port_forwarding.PortForwarding, + "qos_bandwidth_limit_rule": ( + _qos_bandwidth_limit_rule.QoSBandwidthLimitRule + ), + "qos_dscp_marking_rule": _qos_dscp_marking_rule.QoSDSCPMarkingRule, + "qos_minimum_bandwidth_rule": ( + _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule + ), + "qos_minimum_packet_rate_rule": ( + _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule + ), + "qos_policy": _qos_policy.QoSPolicy, + "qos_rule_type": _qos_rule_type.QoSRuleType, + "quota": _quota.Quota, + "rbac_policy": _rbac_policy.RBACPolicy, + "router": _router.Router, + "security_group": _security_group.SecurityGroup, + "security_group_rule": _security_group_rule.SecurityGroupRule, + "segment": _segment.Segment, + "service_profile": _service_profile.ServiceProfile, + "service_provider": _service_provider.ServiceProvider, + "sfc_flow_classifier": _sfc_flow_classifier.SfcFlowClassifier, + "sfc_port_chain": _sfc_port_chain.SfcPortChain, + "sfc_port_pair": _sfc_port_pair.SfcPortPair, + "sfc_port_pair_group": _sfc_port_pair_group.SfcPortPairGroup, + "sfc_service_graph": _sfc_sservice_graph.SfcServiceGraph, + "subnet": _subnet.Subnet, + "subnet_pool": _subnet_pool.SubnetPool, + "tap_flow": _tap_flow.TapFlow, + "tap_mirror": _tap_mirror.TapMirror, + "tap_service": _tap_service.TapService, + "trunk": _trunk.Trunk, + "vpn_endpoint_group": _vpn_endpoint_group.VpnEndpointGroup, + "vpn_ike_policy": _ike_policy.VpnIkePolicy, + "vpn_ipsec_policy": _ipsec_policy.VpnIpsecPolicy, + "vpn_ipsec_site_connection": ( + _ipsec_site_connection.VpnIPSecSiteConnection + ), + "vpn_service": _vpn_service.VpnService, + } + + def _update( + self, + resource_type: type[resource.ResourceT], + value: str | resource.ResourceT | None, + base_path: str | None = None, + if_revision: int | None = None, + **attrs: ty.Any, + ) -> resource.ResourceT: + if ( + issubclass(resource_type, _base.NetworkResource) + and if_revision is not None + ): + attrs.update({'if_match': f'revision_number={if_revision}'}) + + res = self._get_resource(resource_type, value, **attrs) + + return res.commit(self, base_path=base_path) + + def _delete( + self, + resource_type: type[resource.ResourceT], + value: str | resource.ResourceT | None, + ignore_missing: bool = True, + if_revision: int | None = None, + **attrs: ty.Any, + ) -> resource.ResourceT | None: + if ( + issubclass(resource_type, _base.NetworkResource) + and if_revision is not None + ): + attrs.update({'if_match': f'revision_number={if_revision}'}) + + res = self._get_resource(resource_type, value, **attrs) + + try: + rv = res.delete(self) + except exceptions.NotFoundException: + if ignore_missing: + return None + raise + + return rv + + def create_address_group(self, **attrs): + """Create a new address group from attributes + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.address_group.AddressGroup`, + comprised of the properties on the AddressGroup class. + + :returns: The results of address group creation + :rtype: :class:`~openstack.network.v2.address_group.AddressGroup` + """ + return self._create(_address_group.AddressGroup, **attrs) + + def delete_address_group(self, address_group, ignore_missing=True): + """Delete an address group + + :param address_group: The value can be either the ID of an + address group or + a :class:`~openstack.network.v2.address_group.AddressGroup` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will + be raised when the address group does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent address group. + + :returns: ``None`` + """ + self._delete( + _address_group.AddressGroup, + address_group, + ignore_missing=ignore_missing, + ) + + def find_address_group(self, name_or_id, ignore_missing=True, **query): + """Find a single address group + + :param name_or_id: The name or ID of an address group. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One :class:`~openstack.network.v2.address_group.AddressGroup` + or None + """ + return self._find( + _address_group.AddressGroup, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_address_group(self, address_group): + """Get a single address group + + :param address_group: The value can be the ID of an address group or a + :class:`~openstack.network.v2.address_group.AddressGroup` instance. + + :returns: One :class:`~openstack.network.v2.address_group.AddressGroup` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_address_group.AddressGroup, address_group) + + def address_groups(self, **query): + """Return a generator of address groups + :param dict query: Optional query parameters to be sent to limit + the resources being returned. + + * ``name``: Address group name + * ``description``: Address group description + * ``project_id``: Owner project ID + + :returns: A generator of address group objects + :rtype: :class:`~openstack.network.v2.address_group.AddressGroup` + """ + return self._list(_address_group.AddressGroup, **query) + + def update_address_group( + self, + address_group: str | _address_group.AddressGroup, + **attrs: ty.Any, + ) -> _address_group.AddressGroup: + """Update an address group + + :param address_group: Either the ID of an address group or a + :class:`~openstack.network.v2.address_group.AddressGroup` instance. + :param attrs: The attributes to update on the address group + represented by ``value``. + + :returns: The updated address group + :rtype: :class:`~openstack.network.v2.address_group.AddressGroup` + """ + return self._update( + _address_group.AddressGroup, address_group, **attrs + ) -class Proxy(proxy2.BaseProxy): + def add_addresses_to_address_group(self, address_group, addresses): + """Add addresses to a address group + + :param address_group: Either the ID of an address group or a + :class:`~openstack.network.v2.address_group.AddressGroup` instance. + :param list addresses: List of address strings. + :returns: AddressGroup with updated addresses + :rtype: :class:`~openstack.network.v2.address_group.AddressGroup` + """ + ag = self._get_resource(_address_group.AddressGroup, address_group) + return ag.add_addresses(self, addresses) + + def remove_addresses_from_address_group(self, address_group, addresses): + """Remove addresses from a address group + + :param address_group: Either the ID of an address group or a + :class:`~openstack.network.v2.address_group.AddressGroup` instance. + :param list addresses: List of address strings. + :returns: AddressGroup with updated addresses + :rtype: :class:`~openstack.network.v2.address_group.AddressGroup` + """ + ag = self._get_resource(_address_group.AddressGroup, address_group) + return ag.remove_addresses(self, addresses) def create_address_scope(self, **attrs): """Create a new address scope from attributes - :param dict attrs: Keyword arguments which will be used to create + :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.address_scope.AddressScope`, comprised of the properties on the AddressScope class. @@ -72,30 +385,39 @@ def delete_address_scope(self, address_scope, ignore_missing=True): a :class:`~openstack.network.v2.address_scope.AddressScope` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the address scope does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent address scope. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the address scope does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent address scope. :returns: ``None`` """ - self._delete(_address_scope.AddressScope, address_scope, - ignore_missing=ignore_missing) + self._delete( + _address_scope.AddressScope, + address_scope, + ignore_missing=ignore_missing, + ) - def find_address_scope(self, name_or_id, ignore_missing=True): + def find_address_scope(self, name_or_id, ignore_missing=True, **query): """Find a single address scope :param name_or_id: The name or ID of an address scope. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.address_scope.AddressScope` - or None + or None """ - return self._find(_address_scope.AddressScope, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _address_scope.AddressScope, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_address_scope(self, address_scope): """Get a single address scope @@ -104,8 +426,8 @@ def get_address_scope(self, address_scope): :class:`~openstack.network.v2.address_scope.AddressScope` instance. :returns: One :class:`~openstack.network.v2.address_scope.AddressScope` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_address_scope.AddressScope, address_scope) @@ -113,7 +435,7 @@ def address_scopes(self, **query): """Return a generator of address scopes :param dict query: Optional query parameters to be sent to limit - the resources being returned. + the resources being returned. * ``name``: Address scope name * ``ip_version``: Address scope IP address version @@ -123,54 +445,54 @@ def address_scopes(self, **query): :returns: A generator of address scope objects :rtype: :class:`~openstack.network.v2.address_scope.AddressScope` """ - return self._list(_address_scope.AddressScope, paginated=False, - **query) + return self._list(_address_scope.AddressScope, **query) def update_address_scope(self, address_scope, **attrs): """Update an address scope :param address_scope: Either the ID of an address scope or a :class:`~openstack.network.v2.address_scope.AddressScope` instance. - :param dict attrs: The attributes to update on the address scope - represented by ``value``. + :param attrs: The attributes to update on the address scope + represented by ``value``. :returns: The updated address scope :rtype: :class:`~openstack.network.v2.address_scope.AddressScope` """ - return self._update(_address_scope.AddressScope, address_scope, - **attrs) + return self._update( + _address_scope.AddressScope, address_scope, **attrs + ) def agents(self, **query): """Return a generator of network agents :param dict query: Optional query parameters to be sent to limit the - resources being returned. + resources being returned. * ``agent_type``: Agent type. * ``availability_zone``: The availability zone for an agent. * ``binary``: The name of the agent's application binary. * ``description``: The description of the agent. * ``host``: The host (host name or host address) the agent is - running on. + running on. * ``topic``: The message queue topic used. * ``is_admin_state_up``: The administrative state of the agent. - : ``is_alive``: Whether the agent is alive. + * ``is_alive``: Whether the agent is alive. :returns: A generator of agents :rtype: :class:`~openstack.network.v2.agent.Agent` """ - return self._list(_agent.Agent, paginated=False, **query) + return self._list(_agent.Agent, **query) def delete_agent(self, agent, ignore_missing=True): """Delete a network agent :param agent: The value can be the ID of a agent or a - :class:`~openstack.network.v2.agent.Agent` instance. + :class:`~openstack.network.v2.agent.Agent` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the agent does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent agent. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the agent does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent agent. :returns: ``None`` """ @@ -180,12 +502,12 @@ def get_agent(self, agent): """Get a single network agent :param agent: The value can be the ID of a agent or a - :class:`~openstack.network.v2.agent.Agent` instance. + :class:`~openstack.network.v2.agent.Agent` instance. :returns: One :class:`~openstack.network.v2.agent.Agent` :rtype: :class:`~openstack.network.v2.agent.Agent` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_agent.Agent, agent) @@ -193,9 +515,9 @@ def update_agent(self, agent, **attrs): """Update a network agent :param agent: The value can be the ID of a agent or a - :class:`~openstack.network.v2.agent.Agent` instance. - :param dict attrs: The attributes to update on the agent represented - by ``value``. + :class:`~openstack.network.v2.agent.Agent` instance. + :param attrs: The attributes to update on the agent represented + by ``value``. :returns: One :class:`~openstack.network.v2.agent.Agent` :rtype: :class:`~openstack.network.v2.agent.Agent` @@ -206,115 +528,122 @@ def dhcp_agent_hosting_networks(self, agent, **query): """A generator of networks hosted by a DHCP agent. :param agent: Either the agent id of an instance of - :class:`~openstack.network.v2.network_agent.Agent` - :param query: kwargs \*\*query: Optional query parameters to be sent - to limit the resources being returned. + :class:`~openstack.network.v2.network_agent.Agent` + :param query: kwargs query: Optional query parameters to be sent + to limit the resources being returned. :return: A generator of networks """ agent_obj = self._get_resource(_agent.Agent, agent) - return self._list(_agent.DHCPAgentHostingNetwork, paginated=False, - agent_id=agent_obj.id, **query) + return self._list( + _network.DHCPAgentHostingNetwork, agent_id=agent_obj.id, **query + ) def add_dhcp_agent_to_network(self, agent, network): """Add a DHCP Agent to a network :param agent: Either the agent id of an instance of - :class:`~openstack.network.v2.network_agent.Agent` + :class:`~openstack.network.v2.network_agent.Agent` :param network: Network instance :return: """ network = self._get_resource(_network.Network, network) agent = self._get_resource(_agent.Agent, agent) - body = {'network_id': network.id} - return agent.add_agent_to_network(self.session, **body) + return agent.add_agent_to_network(self, network.id) def remove_dhcp_agent_from_network(self, agent, network): """Remove a DHCP Agent from a network :param agent: Either the agent id of an instance of - :class:`~openstack.network.v2.network_agent.Agent` + :class:`~openstack.network.v2.network_agent.Agent` :param network: Network instance :return: """ - # network_id = resource.Resource.get_id(network) network = self._get_resource(_network.Network, network) agent = self._get_resource(_agent.Agent, agent) - body = {'network_id': network.id} - return agent.remove_agent_from_network(self.session, **body) + return agent.remove_agent_from_network(self, network.id) def network_hosting_dhcp_agents(self, network, **query): """A generator of DHCP agents hosted on a network. :param network: The instance of - :class:`~openstack.network.v2.network.Network` + :class:`~openstack.network.v2.network.Network` :param dict query: Optional query parameters to be sent to limit the - resources returned. + resources returned. :return: A generator of hosted DHCP agents """ net = self._get_resource(_network.Network, network) - return self._list(_network.NetworkHostingDHCPAgent, paginated=False, - network_id=net.id, **query) + return self._list( + _agent.NetworkHostingDHCPAgent, network_id=net.id, **query + ) def get_auto_allocated_topology(self, project=None): """Get the auto-allocated topology of a given tenant :param project: - The value is the ID or name of a project + The value is the ID or name of a project :returns: The auto-allocated topology - :rtype: :class:`~openstack.network.v2.\ - auto_allocated_topology.AutoAllocatedTopology` + :rtype: + :class:`~openstack.network.v2.auto_allocated_topology.AutoAllocatedTopology` """ # If project option is not given, grab project id from session if project is None: - project = self.session.get_project_id() - return self._get(_auto_allocated_topology.AutoAllocatedTopology, - project) - - def delete_auto_allocated_topology(self, project=None, - ignore_missing=False): + project = self.get_project_id() + return self._get( + _auto_allocated_topology.AutoAllocatedTopology, project + ) + + def delete_auto_allocated_topology( + self, project=None, ignore_missing=False + ): """Delete auto-allocated topology :param project: The value is the ID or name of a project :param ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the topology does not exist. - When set to ``True``, no exception will be raised when - attempting to delete nonexistant topology + :class:`~openstack.exceptions.NotFoundException` will be + raised when the topology does not exist. + When set to ``True``, no exception will be raised when + attempting to delete nonexistant topology :returns: ``None`` """ # If project option is not given, grab project id from session if project is None: - project = self.session.get_project_id() - self._delete(_auto_allocated_topology.AutoAllocatedTopology, - project, ignore_missing=ignore_missing) + project = self.get_project_id() + self._delete( + _auto_allocated_topology.AutoAllocatedTopology, + project, + ignore_missing=ignore_missing, + ) def validate_auto_allocated_topology(self, project=None): """Validate the resources for auto allocation :param project: - The value is the ID or name of a project + The value is the ID or name of a project :returns: Whether all resources are correctly configured or not - :rtype: :class:`~openstack.network.v2.\ - auto_allocated_topology.ValidateTopology` + :rtype: + :class:`~openstack.network.v2.auto_allocated_topology.ValidateTopology` """ # If project option is not given, grab project id from session if project is None: - project = self.session.get_project_id() - return self._get(_auto_allocated_topology.ValidateTopology, - project=project) + project = self.get_project_id() + return self._get( + _auto_allocated_topology.ValidateTopology, + project=project, + requires_id=False, + ) def availability_zones(self, **query): """Return a generator of availability zones :param dict query: optional query parameters to be set to limit the - returned resources. Valid parameters include: + returned resources. Valid parameters include: * ``name``: The name of an availability zone. * ``resource``: The type of resource for the availability zone. @@ -323,41 +652,574 @@ def availability_zones(self, **query): :rtype: :class:`~openstack.network.v2.availability_zone.AvailabilityZone` """ - return self._list(availability_zone.AvailabilityZone, paginated=False) + return self._list(availability_zone.AvailabilityZone) + + def create_bgp_peer(self, **attrs): + """Create a new BGP Peer from attributes""" + return self._create(_bgp_peer.BgpPeer, **attrs) + + def delete_bgp_peer(self, peer, ignore_missing=True): + """Delete a BGP Peer""" + self._delete(_bgp_peer.BgpPeer, peer, ignore_missing=ignore_missing) + + def find_bgp_peer(self, name_or_id, ignore_missing=True, **query): + """Find a single BGP Peer""" + return self._find( + _bgp_peer.BgpPeer, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_bgp_peer(self, peer): + """Get a signle BGP Peer""" + return self._get(_bgp_peer.BgpPeer, peer) + + def update_bgp_peer(self, peer, **attrs): + """Update a BGP Peer""" + return self._update(_bgp_peer.BgpPeer, peer, **attrs) + + def bgp_peers(self, **query): + """Return a generator of BGP Peers""" + return self._list(_bgp_peer.BgpPeer, **query) + + def create_bgp_speaker(self, **attrs): + """Create a new BGP Speaker""" + return self._create(_bgp_speaker.BgpSpeaker, **attrs) + + def delete_bgp_speaker(self, speaker, ignore_missing=True): + """Delete a BGP Speaker""" + self._delete( + _bgp_speaker.BgpSpeaker, speaker, ignore_missing=ignore_missing + ) + + def find_bgp_speaker(self, name_or_id, ignore_missing=True, **query): + """Find a single BGP Peer""" + return self._find( + _bgp_speaker.BgpSpeaker, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_bgp_speaker(self, speaker): + """Get a signle BGP Speaker""" + return self._get(_bgp_speaker.BgpSpeaker, speaker) + + def update_bgp_speaker(self, speaker, **attrs): + """Update a BGP Speaker""" + return self._update(_bgp_speaker.BgpSpeaker, speaker, **attrs) + + def bgp_speakers(self, **query): + """Return a generator of BGP Peers""" + return self._list(_bgp_speaker.BgpSpeaker, **query) + + def add_bgp_peer_to_speaker(self, speaker, peer_id): + """Bind the BGP peer to the specified BGP Speaker.""" + speaker = self._get_resource(_bgp_speaker.BgpSpeaker, speaker) + return speaker.add_bgp_peer(self, peer_id) + + def remove_bgp_peer_from_speaker(self, speaker, peer_id): + """Unbind the BGP peer from a BGP Speaker.""" + speaker = self._get_resource(_bgp_speaker.BgpSpeaker, speaker) + return speaker.remove_bgp_peer(self, peer_id) + + def add_gateway_network_to_speaker(self, speaker, network_id): + """Add a network to the specified BGP speaker.""" + speaker = self._get_resource(_bgp_speaker.BgpSpeaker, speaker) + return speaker.add_gateway_network(self, network_id) + + def remove_gateway_network_from_speaker(self, speaker, network_id): + """Remove a network from the specified BGP speaker.""" + speaker = self._get_resource(_bgp_speaker.BgpSpeaker, speaker) + return speaker.remove_gateway_network(self, network_id) + + def get_advertised_routes_of_speaker(self, speaker): + """List all routes advertised by the specified BGP Speaker.""" + speaker = self._get_resource(_bgp_speaker.BgpSpeaker, speaker) + return speaker.get_advertised_routes(self) + + def get_bgp_dragents_hosting_speaker(self, speaker): + """List all BGP dynamic agents which are hosting the + specified BGP Speaker.""" + speaker = self._get_resource(_bgp_speaker.BgpSpeaker, speaker) + return speaker.get_bgp_dragents(self) + + def add_bgp_speaker_to_dragent(self, bgp_agent, bgp_speaker_id): + """Add a BGP Speaker to the specified dynamic routing agent.""" + speaker = self._get_resource(_bgp_speaker.BgpSpeaker, bgp_speaker_id) + speaker.add_bgp_speaker_to_dragent(self, bgp_agent) + + def get_bgp_speakers_hosted_by_dragent(self, bgp_agent): + """List all BGP Seakers hosted on the specified dynamic routing + agent.""" + agent = self._get_resource(_agent.Agent, bgp_agent) + return agent.get_bgp_speakers_hosted_by_dragent(self) + + def remove_bgp_speaker_from_dragent(self, bgp_agent, bgp_speaker_id): + """Delete the BGP Speaker hosted by the specified dynamic + routing agent.""" + speaker = self._get_resource(_bgp_speaker.BgpSpeaker, bgp_speaker_id) + speaker.remove_bgp_speaker_from_dragent(self, bgp_agent) + + def create_bgpvpn(self, **attrs): + """Create a new BGPVPN + + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.bgpvpn.BgpVpn`, comprised of the + properties on the BGPVPN class, for details see the Neutron + api-ref. + + :returns: The result of BGPVPN creation + :rtype: :class:`~openstack.network.v2.bgpvpn.BgpVpn` + """ + return self._create(_bgpvpn.BgpVpn, **attrs) + + def delete_bgpvpn(self, bgpvpn, ignore_missing=True): + """Delete a BGPVPN + + :param bgpvpn: The value can be either the ID of a bgpvpn or + a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the BGPVPN does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent BGPVPN. + + :returns: ``None`` + """ + self._delete(_bgpvpn.BgpVpn, bgpvpn, ignore_missing=ignore_missing) + + def find_bgpvpn(self, name_or_id, ignore_missing=True, **query): + """Find a single BGPVPN + + :param name_or_id: The name or ID of a BGPVPN. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One :class:`~openstack.network.v2.bgpvpn.BGPVPN` + or None + """ + return self._find( + _bgpvpn.BgpVpn, name_or_id, ignore_missing=ignore_missing, **query + ) + + def get_bgpvpn(self, bgpvpn): + """Get a signle BGPVPN + + :param bgpvpn: The value can be the ID of a BGPVPN or a + :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + + :returns: One :class:`~openstack.network.v2.bgpvpn.BgpVpn` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_bgpvpn.BgpVpn, bgpvpn) + + def update_bgpvpn(self, bgppvpn, **attrs): + """Update a BGPVPN + + :param bgpvpn: Either the ID of a BGPVPN or a + :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param attrs: The attributes to update on the BGPVPN represented + by ``value``. + + :returns: The updated BGPVPN + :rtype: :class:`~openstack.network.v2.bgpvpn.BgpVpn` + """ + return self._update(_bgpvpn.BgpVpn, bgppvpn, **attrs) + + def bgpvpns(self, **query): + """Return a generator of BGP VPNs + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of BgpVPN objects + :rtype: :class:`~openstack.network.v2.bgpvpn.BgpVpn` + """ + return self._list(_bgpvpn.BgpVpn, **query) + + def create_bgpvpn_network_association(self, bgpvpn, **attrs): + """Create a new BGPVPN Network Association + + :param bgpvpn: The value can be either the ID of a bgpvpn or + a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.bgpvpn_network_association. + BgpVpnNetworkAssociation`, + comprised of the properties on the BgpVpnNetworkAssociation class. + + :returns: The results of BgpVpnNetworkAssociation creation + :rtype: :class:`~openstack.network.v2.bgpvpn_network_association. + BgpVpnNetworkAssociation` + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + return self._create( + _bgpvpn_network_association.BgpVpnNetworkAssociation, + bgpvpn_id=bgpvpn_res.id, + **attrs, + ) + + def delete_bgpvpn_network_association( + self, bgpvpn, net_association, ignore_missing=True + ): + """Delete a BGPVPN Network Association + + :param bgpvpn: The value can be either the ID of a bgpvpn or + a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param net_association: The value can be either the ID of a + bgpvpn_network_association or + a :class:`~openstack.network.v2.bgpvpn_network_association. + BgpVpnNetworkAssociation` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the BgpVpnNetworkAssociation does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent BgpVpnNetworkAssociation. + + :returns: ``None`` + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + self._delete( + _bgpvpn_network_association.BgpVpnNetworkAssociation, + net_association, + ignore_missing=ignore_missing, + bgpvpn_id=bgpvpn_res.id, + ) + + def get_bgpvpn_network_association(self, bgpvpn, net_association): + """Get a signle BGPVPN Network Association + + :param bgpvpn: The value can be the ID of a BGPVPN or a + :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param net_association: The value can be the ID of a + BgpVpnNetworkAssociation or a + :class:`~openstack.network.v2.bgpvpn_network_association. + BgpVpnNetworkAssociation` instance. + + :returns: One :class:`~openstack.network.v2. + bgpvpn_network_associaition.BgpVpnNetworkAssociation` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + return self._get( + _bgpvpn_network_association.BgpVpnNetworkAssociation, + net_association, + bgpvpn_id=bgpvpn_res.id, + ) + + def bgpvpn_network_associations(self, bgpvpn, **query): + """Return a generator of BGP VPN Network Associations + + :param: bgpvpn: The value can be the ID of a BGPVPN or a + :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param dict query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of BgpVpnNetworkAssociation objects + :rtype: :class:`~openstack.network.v2.bgpvpn_network_association. + BgpVpnNetworkAssociation` + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + return self._list( + _bgpvpn_network_association.BgpVpnNetworkAssociation, + bgpvpn_id=bgpvpn_res.id, + **query, + ) + + def create_bgpvpn_port_association(self, bgpvpn, **attrs): + """Create a new BGPVPN Port Association + + :param bgpvpn: The value can be either the ID of a bgpvpn or + a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.bgpvpn_port_association. + BgpVpnPortAssociation`, + comprised of the properties on the BgpVpnPortAssociation class. + + :returns: The results of BgpVpnPortAssociation creation + :rtype: :class:`~openstack.network.v2.bgpvpn_port_association. + BgpVpnPortAssociation` + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + return self._create( + _bgpvpn_port_association.BgpVpnPortAssociation, + bgpvpn_id=bgpvpn_res.id, + **attrs, + ) + + def delete_bgpvpn_port_association( + self, bgpvpn, port_association, ignore_missing=True + ): + """Delete a BGPVPN Port Association + + :param bgpvpn: The value can be either the ID of a bgpvpn or + a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param port_association: The value can be either the ID of a + bgpvpn_port_association or + a :class:`~openstack.network.v2.bgpvpn_port_association. + BgpVpnPortAssociation` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the BgpVpnPortAssociation does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent BgpVpnPortAssociation. + + :returns: ``None`` + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + self._delete( + _bgpvpn_port_association.BgpVpnPortAssociation, + port_association, + ignore_missing=ignore_missing, + bgpvpn_id=bgpvpn_res.id, + ) + + def find_bgpvpn_port_association( + self, name_or_id, bgpvpn_id, ignore_missing=True, **query + ): + """Find a single BGPVPN Port Association + + :param name_or_id: The name or ID of a BgpVpnNetworkAssociation. + :param bgpvpn_id: The value can be the ID of a BGPVPN. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One :class:`~openstack.network.v2.bgpvpn.BGPVPN` + or None + """ + return self._find( + _bgpvpn_port_association.BgpVpnPortAssociation, + name_or_id, + ignore_missing=ignore_missing, + bgpvpn_id=bgpvpn_id, + **query, + ) + + def get_bgpvpn_port_association(self, bgpvpn, port_association): + """Get a signle BGPVPN Port Association + + :param bgpvpn: The value can be the ID of a BGPVPN or a + :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param port_association: The value can be the ID of a + BgpVpnPortAssociation or a + :class:`~openstack.network.v2.bgpvpn_port_association. + BgpVpnPortAssociation` instance. + + :returns: One :class:`~openstack.network.v2. + bgpvpn_port_associaition.BgpVpnPortAssociation` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + return self._get( + _bgpvpn_port_association.BgpVpnPortAssociation, + port_association, + bgpvpn_id=bgpvpn_res.id, + ) + + def update_bgpvpn_port_association( + self, bgpvpn, port_association, **attrs + ): + """Update a BPGPN Port Association + + :param bgpvpn: Either the ID of a BGPVPN or a + :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param port_association: The value can be the ID of a + BgpVpnPortAssociation or a + :class:`~openstack.network.v2.bgpvpn_port_association. + BgpVpnPortAssociation` instance. + :param attrs: The attributes to update on the BGPVPN represented + by ``value``. + + :returns: The updated BgpVpnPortAssociation. + :rtype: :class:`~openstack.network.v2.bgpvpn.BgpVpn` + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + return self._update( + _bgpvpn_port_association.BgpVpnPortAssociation, + port_association, + bgpvpn_id=bgpvpn_res.id, + **attrs, + ) + + def bgpvpn_port_associations(self, bgpvpn, **query): + """Return a generator of BGP VPN Port Associations + + :param: bgpvpn: The value can be the ID of a BGPVPN or a + :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param dict query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of BgpVpnNetworkAssociation objects + :rtype: :class:`~openstack.network.v2.bgpvpn_network_association. + BgpVpnNetworkAssociation` + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + return self._list( + _bgpvpn_port_association.BgpVpnPortAssociation, + bgpvpn_id=bgpvpn_res.id, + **query, + ) + + def create_bgpvpn_router_association(self, bgpvpn, **attrs): + """Create a new BGPVPN Router Association + + :param bgpvpn: The value can be either the ID of a bgpvpn or + a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.bgpvpn_router_association. + BgpVpnRouterAssociation`, + comprised of the properties on the BgpVpnRouterAssociation class. + + :returns: The results of BgpVpnRouterAssociation creation + :rtype: :class:`~openstack.network.v2.bgpvpn_router_association. + BgpVpnRouterAssociation` + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + return self._create( + _bgpvpn_router_association.BgpVpnRouterAssociation, + bgpvpn_id=bgpvpn_res.id, + **attrs, + ) + + def delete_bgpvpn_router_association( + self, bgpvpn, router_association, ignore_missing=True + ): + """Delete a BGPVPN Router Association + + :param bgpvpn: The value can be either the ID of a bgpvpn or + a :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param port_association: The value can be either the ID of a + bgpvpn_router_association or + a :class:`~openstack.network.v2.bgpvpn_router_association. + BgpVpnRouterAssociation` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the BgpVpnRouterAssociation does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent BgpVpnRouterAsociation. + + :returns: ``None`` + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + self._delete( + _bgpvpn_router_association.BgpVpnRouterAssociation, + router_association, + ignore_missing=ignore_missing, + bgpvpn_id=bgpvpn_res.id, + ) + + def get_bgpvpn_router_association(self, bgpvpn, router_association): + """Get a signle BGPVPN Router Association + + :param bgpvpn: The value can be the ID of a BGPVPN or a + :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param router_association: The value can be the ID of a + BgpVpnRouterAssociation or a + :class:`~openstack.network.v2.bgpvpn_router_association. + BgpVpnRouterAssociation` instance. + + :returns: One :class:`~openstack.network.v2. + bgpvpn_router_associaition.BgpVpnRouterAssociation` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + return self._get( + _bgpvpn_router_association.BgpVpnRouterAssociation, + router_association, + bgpvpn_id=bgpvpn_res.id, + ) + + def update_bgpvpn_router_association( + self, bgpvpn, router_association, **attrs + ): + """Update a BPGPN Router Association + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of BgpVpnNetworkAssociation objects + :rtype: :class:`~openstack.network.v2.bgpvpn_network_association. + BgpVpnNetworkAssociation` + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + return self._update( + _bgpvpn_router_association.BgpVpnRouterAssociation, + router_association, + bgpvpn_id=bgpvpn_res.id, + **attrs, + ) + + def bgpvpn_router_associations(self, bgpvpn, **query): + """Return a generator of BGP VPN router Associations + + :param: bgpvpn: The value can be the ID of a BGPVPN or a + :class:`~openstack.network.v2.bgpvpn.BgpVpn` instance. + :param dict query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of BgpVpnRouterAssociation objects + :rtype: :class:`~openstack.network.v2.bgpvpn_router_association. + BgpVpnRouterAssociation` + """ + bgpvpn_res = self._get_resource(_bgpvpn.BgpVpn, bgpvpn) + return self._list( + _bgpvpn_router_association.BgpVpnRouterAssociation, + bgpvpn_id=bgpvpn_res.id, + **query, + ) - def find_extension(self, name_or_id, ignore_missing=True): + def find_extension(self, name_or_id, ignore_missing=True, **query): """Find a single extension :param name_or_id: The name or ID of a extension. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.extension.Extension` - or None + or None """ - return self._find(extension.Extension, name_or_id, - ignore_missing=ignore_missing) + return self._find( + extension.Extension, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def extensions(self, **query): """Return a generator of extensions :param dict query: Optional query parameters to be sent to limit - the resources being returned. Currently no - parameter is supported. + the resources being returned. Currently no + parameter is supported. :returns: A generator of extension objects :rtype: :class:`~openstack.network.v2.extension.Extension` """ - return self._list(extension.Extension, paginated=False, **query) + return self._list(extension.Extension, **query) def create_flavor(self, **attrs): """Create a new network service flavor from attributes - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.flavor.Flavor`, - comprised of the properties on the Flavor class. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.flavor.Flavor`, + comprised of the properties on the Flavor class. :returns: The results of flavor creation :rtype: :class:`~openstack.network.v2.flavor.Flavor` @@ -371,28 +1233,31 @@ def delete_flavor(self, flavor, ignore_missing=True): The value can be either the ID of a flavor or a :class:`~openstack.network.v2.flavor.Flavor` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the flavor does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent flavor. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the flavor does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent flavor. :returns: ``None`` """ self._delete(_flavor.Flavor, flavor, ignore_missing=ignore_missing) - def find_flavor(self, name_or_id, ignore_missing=True): + def find_flavor(self, name_or_id, ignore_missing=True, **query): """Find a single network service flavor :param name_or_id: The name or ID of a flavor. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.flavor.Flavor` or None """ - return self._find(_flavor.Flavor, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _flavor.Flavor, name_or_id, ignore_missing=ignore_missing, **query + ) def get_flavor(self, flavor): """Get a single network service flavor @@ -402,19 +1267,18 @@ def get_flavor(self, flavor): :class:`~openstack.network.v2.flavor.Flavor` instance. :returns: One :class:`~openstack.network.v2.flavor.Flavor` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_flavor.Flavor, flavor) def update_flavor(self, flavor, **attrs): """Update a network service flavor - :param flavor: - Either the id of a flavor or a + :param flavor: Either the id of a flavor or a :class:`~openstack.network.v2.flavor.Flavor` instance. - :attrs kwargs: The attributes to update on the flavor represented - by ``value``. + :param attrs: The attributes to update on the flavor represented + by ``flavor``. :returns: The updated flavor :rtype: :class:`~openstack.network.v2.flavor.Flavor` @@ -425,8 +1289,8 @@ def flavors(self, **query): """Return a generator of network service flavors :param dict query: Optional query parameters to be sent to limit - the resources being returned. Valid parameters - include: + the resources being returned. Valid parameters + include: * ``description``: The description of a flavor. * ``is_enabled``: Whether a flavor is enabled. @@ -436,12 +1300,290 @@ def flavors(self, **query): :returns: A generator of flavor objects :rtype: :class:`~openstack.network.v2.flavor.Flavor` """ - return self._list(_flavor.Flavor, paginated=True, **query) + return self._list(_flavor.Flavor, **query) + + def associate_flavor_with_service_profile(self, flavor, service_profile): + """Associate network flavor with service profile. + + :param flavor: + Either the id of a flavor or a + :class:`~openstack.network.v2.flavor.Flavor` instance. + :param service_profile: + The value can be either the ID of a service profile or a + :class:`~openstack.network.v2.service_profile.ServiceProfile` + instance. + :return: + """ + flavor = self._get_resource(_flavor.Flavor, flavor) + service_profile = self._get_resource( + _service_profile.ServiceProfile, service_profile + ) + return flavor.associate_flavor_with_service_profile( + self, service_profile.id + ) + + def disassociate_flavor_from_service_profile( + self, flavor, service_profile + ): + """Disassociate network flavor from service profile. + + :param flavor: + Either the id of a flavor or a + :class:`~openstack.network.v2.flavor.Flavor` instance. + :param service_profile: + The value can be either the ID of a service profile or a + :class:`~openstack.network.v2.service_profile.ServiceProfile` + instance. + :return: + """ + flavor = self._get_resource(_flavor.Flavor, flavor) + service_profile = self._get_resource( + _service_profile.ServiceProfile, service_profile + ) + return flavor.disassociate_flavor_from_service_profile( + self, service_profile.id + ) + + def create_local_ip(self, **attrs): + """Create a new local ip from attributes + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.local_ip.LocalIP`, + comprised of the properties on the LocalIP class. + + :returns: The results of local ip creation + :rtype: :class:`~openstack.network.v2.local_ip.LocalIP` + """ + return self._create(_local_ip.LocalIP, **attrs) + + def delete_local_ip(self, local_ip, ignore_missing=True, if_revision=None): + """Delete a local ip + + :param local_ip: The value can be either the ID of a local ip or a + :class:`~openstack.network.v2.local_ip.LocalIP` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the local ip does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent ip. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. + + :returns: ``None`` + """ + self._delete( + _local_ip.LocalIP, + local_ip, + ignore_missing=ignore_missing, + if_revision=if_revision, + ) + + def find_local_ip(self, name_or_id, ignore_missing=True, **query): + """Find a local IP + + :param name_or_id: The name or ID of an local IP. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One :class:`~openstack.network.v2.local_ip.LocalIP` + or None + """ + return self._find( + _local_ip.LocalIP, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_local_ip(self, local_ip): + """Get a single local ip + + :param local_ip: The value can be the ID of a local ip or a + :class:`~openstack.network.v2.local_ip.LocalIP` + instance. + + :returns: One :class:`~openstack.network.v2.local_ip.LocalIP` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_local_ip.LocalIP, local_ip) + + def local_ips(self, **query): + """Return a generator of local ips + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. + + * ``name``: Local IP name + * ``description``: Local IP description + * ``project_id``: Owner project ID + * ``network_id``: Local IP network + * ``local_port_id``: Local port ID + * ``local_ip_address``: The IP address of a Local IP + * ``ip_mode``: The Local IP mode + + :returns: A generator of local ip objects + :rtype: :class:`~openstack.network.v2.local_ip.LocalIP` + """ + return self._list(_local_ip.LocalIP, **query) + + def update_local_ip(self, local_ip, if_revision=None, **attrs): + """Update a local ip + + :param local_ip: Either the id of a local ip or a + :class:`~openstack.network.v2.local_ip.LocalIP` + instance. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. + :param attrs: The attributes to update on the ip represented + by ``value``. + + :returns: The updated ip + :rtype: :class:`~openstack.network.v2.local_ip.LocalIP` + """ + return self._update( + _local_ip.LocalIP, local_ip, if_revision=if_revision, **attrs + ) + + def create_local_ip_association(self, local_ip, **attrs): + """Create a new local ip association from attributes + + :param local_ip: The value can be the ID of a Local IP or a + :class:`~openstack.network.v2.local_ip.LocalIP` + instance. + :param attrs: Keyword arguments which will be used to create + a + :class:`~openstack.network.v2.local_ip_association.LocalIPAssociation`, + comprised of the properties on the LocalIP class. + + :returns: The results of local ip association creation + :rtype: + :class:`~openstack.network.v2.local_ip_association.LocalIPAssociation` + """ + local_ip = self._get_resource(_local_ip.LocalIP, local_ip) + return self._create( + _local_ip_association.LocalIPAssociation, + local_ip_id=local_ip.id, + **attrs, + ) + + def delete_local_ip_association( + self, local_ip, fixed_port_id, ignore_missing=True, if_revision=None + ): + """Delete a local ip association + + :param local_ip: The value can be the ID of a Local IP or a + :class:`~openstack.network.v2.local_ip.LocalIP` + instance. + :param fixed_port_id: The value can be either the fixed port ID + or a :class: + `~openstack.network.v2.local_ip_association.LocalIPAssociation` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the local ip association does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent ip. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. + + :returns: ``None`` + """ + local_ip = self._get_resource(_local_ip.LocalIP, local_ip) + self._delete( + _local_ip_association.LocalIPAssociation, + fixed_port_id, + local_ip_id=local_ip.id, + ignore_missing=ignore_missing, + if_revision=if_revision, + ) + + def find_local_ip_association( + self, name_or_id, local_ip, ignore_missing=True, **query + ): + """Find a local ip association + + :param name_or_id: The name or ID of local ip association. + :param local_ip: The value can be the ID of a Local IP or a + :class:`~openstack.network.v2.local_ip.LocalIP` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.local_ip_association.LocalIPAssociation` + or None + """ + local_ip = self._get_resource(_local_ip.LocalIP, local_ip) + return self._find( + _local_ip_association.LocalIPAssociation, + name_or_id, + local_ip_id=local_ip.id, + ignore_missing=ignore_missing, + **query, + ) + + def get_local_ip_association(self, local_ip_association, local_ip): + """Get a single local ip association + + :param local_ip: The value can be the ID of a Local IP or a + :class:`~openstack.network.v2.local_ip.LocalIP` + instance. + :param local_ip_association: The value can be the ID + of a local ip association or a + :class:`~openstack.network.v2.local_ip_association.LocalIPAssociation` + instance. + + :returns: One + :class:`~openstack.network.v2.local_ip_association.LocalIPAssociation` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + local_ip = self._get_resource(_local_ip.LocalIP, local_ip) + return self._get( + _local_ip_association.LocalIPAssociation, + local_ip_association, + local_ip_id=local_ip.id, + ) + + def local_ip_associations(self, local_ip, **query): + """Return a generator of local ip associations + + :param local_ip: The value can be the ID of a Local IP or a + :class:`~openstack.network.v2.local_ip.LocalIP` instance. + :param dict query: Optional query parameters to be sent to limit + the resources being returned. + + * ``fixed_port_id``: The ID of the port to which a local IP + is associated + * ``fixed_ip``: The fixed ip address associated with a + a Local IP + * ``host``: Host where local ip is associated + + :returns: A generator of local ip association objects + :rtype: + :class:`~openstack.network.v2.local_ip_association.LocalIPAssociation` + """ + local_ip = self._get_resource(_local_ip.LocalIP, local_ip) + return self._list( + _local_ip_association.LocalIPAssociation, + local_ip_id=local_ip.id, + **query, + ) def create_ip(self, **attrs): """Create a new floating ip from attributes - :param dict attrs: Keyword arguments which will be used to create + :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.floating_ip.FloatingIP`, comprised of the properties on the FloatingIP class. @@ -450,56 +1592,68 @@ def create_ip(self, **attrs): """ return self._create(_floating_ip.FloatingIP, **attrs) - def delete_ip(self, floating_ip, ignore_missing=True): + def delete_ip(self, floating_ip, ignore_missing=True, if_revision=None): """Delete a floating ip :param floating_ip: The value can be either the ID of a floating ip - or a :class:`~openstack.network.v2.floating_ip.FloatingIP` - instance. + or a :class:`~openstack.network.v2.floating_ip.FloatingIP` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the floating ip does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent ip. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the floating ip does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent ip. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. :returns: ``None`` """ - self._delete(_floating_ip.FloatingIP, floating_ip, - ignore_missing=ignore_missing) + self._delete( + _floating_ip.FloatingIP, + floating_ip, + ignore_missing=ignore_missing, + if_revision=if_revision, + ) def find_available_ip(self): """Find an available IP :returns: One :class:`~openstack.network.v2.floating_ip.FloatingIP` - or None + or None """ - return _floating_ip.FloatingIP.find_available(self.session) + return _floating_ip.FloatingIP.find_available(self) - def find_ip(self, name_or_id, ignore_missing=True): + def find_ip(self, name_or_id, ignore_missing=True, **query): """Find a single IP :param name_or_id: The name or ID of an IP. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.floating_ip.FloatingIP` - or None + or None """ - return self._find(_floating_ip.FloatingIP, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _floating_ip.FloatingIP, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_ip(self, floating_ip): """Get a single floating ip :param floating_ip: The value can be the ID of a floating ip or a - :class:`~openstack.network.v2.floating_ip.FloatingIP` - instance. + :class:`~openstack.network.v2.floating_ip.FloatingIP` + instance. :returns: One :class:`~openstack.network.v2.floating_ip.FloatingIP` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_floating_ip.FloatingIP, floating_ip) @@ -507,45 +1661,186 @@ def ips(self, **query): """Return a generator of ips :param dict query: Optional query parameters to be sent to limit - the resources being returned. Valid parameters are: + the resources being returned. Valid parameters are: * ``description``: The description of a floating IP. * ``fixed_ip_address``: The fixed IP address associated with a - floating IP address. + floating IP address. * ``floating_ip_address``: The IP address of a floating IP. * ``floating_network_id``: The ID of the network associated with - a floating IP. + a floating IP. * ``port_id``: The ID of the port to which a floating IP is - associated. + associated. * ``project_id``: The ID of the project a floating IP is - associated with. + associated with. * ``router_id``: The ID of an associated router. * ``status``: The status of a floating IP, which can be ``ACTIVE`` - or ``DOWN``. + or ``DOWN``. :returns: A generator of floating IP objects :rtype: :class:`~openstack.network.v2.floating_ip.FloatingIP` """ - return self._list(_floating_ip.FloatingIP, paginated=False, **query) + return self._list(_floating_ip.FloatingIP, **query) - def update_ip(self, floating_ip, **attrs): + def update_ip(self, floating_ip, if_revision=None, **attrs): """Update a ip :param floating_ip: Either the id of a ip or a - :class:`~openstack.network.v2.floating_ip.FloatingIP` - instance. - :param dict attrs: The attributes to update on the ip represented - by ``value``. + :class:`~openstack.network.v2.floating_ip.FloatingIP` + instance. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. + :param attrs: The attributes to update on the ip represented + by ``value``. :returns: The updated ip :rtype: :class:`~openstack.network.v2.floating_ip.FloatingIP` """ - return self._update(_floating_ip.FloatingIP, floating_ip, **attrs) + return self._update( + _floating_ip.FloatingIP, + floating_ip, + if_revision=if_revision, + **attrs, + ) + + def create_port_forwarding(self, **attrs): + """Create a new floating ip port forwarding from attributes + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.port_forwarding.PortForwarding`, + comprised of the properties on the PortForwarding class. + + :returns: The results of port forwarding creation + :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` + """ + return self._create(_port_forwarding.PortForwarding, **attrs) + + def get_port_forwarding(self, port_forwarding, floating_ip): + """Get a single port forwarding + + :param port_forwarding: The value can be the ID of a port forwarding + or a :class:`~openstack.network.v2.port_forwarding.PortForwarding` + instance. + :param floating_ip: The value can be the ID of a Floating IP or a + :class:`~openstack.network.v2.floating_ip.FloatingIP` + instance. + + :returns: One + :class:`~openstack.network.v2.port_forwarding.PortForwarding` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + floating_ip = self._get_resource(_floating_ip.FloatingIP, floating_ip) + return self._get( + _port_forwarding.PortForwarding, + port_forwarding, + floatingip_id=floating_ip.id, + ) + + def find_port_forwarding( + self, pf_id, floating_ip, ignore_missing=True, **query + ): + """Find a single port forwarding + + :param pf_id: The ID of a port forwarding. + :param floating_ip: The value can be the ID of a Floating IP or a + :class:`~openstack.network.v2.floating_ip.FloatingIP` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: + One :class:`~openstack.network.v2.port_forwarding.PortForwarding` + or None + """ + floating_ip = self._get_resource(_floating_ip.FloatingIP, floating_ip) + return self._find( + _port_forwarding.PortForwarding, + pf_id, + floatingip_id=floating_ip.id, + ignore_missing=ignore_missing, + **query, + ) + + def delete_port_forwarding( + self, port_forwarding, floating_ip, ignore_missing=True + ): + """Delete a port forwarding + + :param port_forwarding: The value can be the ID of a port forwarding + or a :class:`~openstack.network.v2.port_forwarding.PortForwarding` + instance. + :param floating_ip: The value can be the ID of a Floating IP or a + :class:`~openstack.network.v2.floating_ip.FloatingIP` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the floating ip does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent ip. + + :returns: ``None`` + """ + fip = self._get_resource(_floating_ip.FloatingIP, floating_ip) + self._delete( + _port_forwarding.PortForwarding, + port_forwarding, + floatingip_id=fip.id, + ignore_missing=ignore_missing, + ) + + def port_forwardings(self, floating_ip, **query): + """Return a generator of port forwardings + + :param floating_ip: The value can be the ID of a Floating IP or a + :class:`~openstack.network.v2.floating_ip.FloatingIP` + instance. + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Valid parameters are: + + * ``internal_port_id``: The ID of internal port. + * ``external_port``: The external TCP/UDP/other port number + * ``protocol``: TCP/UDP/other protocol + + :returns: A generator of port forwarding objects + :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` + """ + fip = self._get_resource(_floating_ip.FloatingIP, floating_ip) + return self._list( + _port_forwarding.PortForwarding, floatingip_id=fip.id, **query + ) + + def update_port_forwarding(self, port_forwarding, floating_ip, **attrs): + """Update a port forwarding + + :param port_forwarding: The value can be the ID of a port forwarding + or a :class:`~openstack.network.v2.port_forwarding.PortForwarding` + instance. + :param floating_ip: The value can be the ID of a Floating IP or a + :class:`~openstack.network.v2.floating_ip.FloatingIP` + instance. + :param attrs: The attributes to update on the ip represented + by ``value``. + + :returns: The updated port_forwarding + :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` + """ + fip = self._get_resource(_floating_ip.FloatingIP, floating_ip) + return self._update( + _port_forwarding.PortForwarding, + port_forwarding, + floatingip_id=fip.id, + **attrs, + ) def create_health_monitor(self, **attrs): """Create a new health monitor from attributes - :param dict attrs: Keyword arguments which will be used to create + :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.health_monitor.HealthMonitor`, comprised of the properties on the HealthMonitor class. @@ -562,42 +1857,52 @@ def delete_health_monitor(self, health_monitor, ignore_missing=True): :class:`~openstack.network.v2.health_monitor.HealthMonitor` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the health monitor does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent health monitor. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the health monitor does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent health monitor. :returns: ``None`` """ - self._delete(_health_monitor.HealthMonitor, health_monitor, - ignore_missing=ignore_missing) + self._delete( + _health_monitor.HealthMonitor, + health_monitor, + ignore_missing=ignore_missing, + ) - def find_health_monitor(self, name_or_id, ignore_missing=True): + def find_health_monitor(self, name_or_id, ignore_missing=True, **query): """Find a single health monitor :param name_or_id: The name or ID of a health monitor. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.health_monitor. - HealthMonitor` or None - """ - return self._find(_health_monitor.HealthMonitor, - name_or_id, ignore_missing=ignore_missing) - - def get_health_monitor(self, health_monitor): - """Get a single health monitor + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.health_monitor.HealthMonitor` + or None + """ + return self._find( + _health_monitor.HealthMonitor, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_health_monitor(self, health_monitor): + """Get a single health monitor :param health_monitor: The value can be the ID of a health monitor or a - :class:`~openstack.network.v2.health_monitor.HealthMonitor` - instance. + :class:`~openstack.network.v2.health_monitor.HealthMonitor` + instance. :returns: One - :class:`~openstack.network.v2.health_monitor.HealthMonitor` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :class:`~openstack.network.v2.health_monitor.HealthMonitor` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_health_monitor.HealthMonitor, health_monitor) @@ -605,52 +1910,52 @@ def health_monitors(self, **query): """Return a generator of health monitors :param dict query: Optional query parameters to be sent to limit - the resources being returned. Valid parameters are: + the resources being returned. Valid parameters are: * ``delay``: the time in milliseconds between sending probes. * ``expected_codes``: The expected HTTP codes for a pssing HTTP(S) - monitor. + monitor. * ``http_method``: The HTTP method a monitor uses for requests. * ``is_admin_state_up``: The administrative state of a health - monitor. + monitor. * ``max_retries``: The maximum consecutive health probe attempts. * ``project_id``: The ID of the project this health monitor is - associated with. + associated with. * ``timeout``: The maximum number of milliseconds for a monitor to - wait for a connection to be established before it - times out. + wait for a connection to be established before it + times out. * ``type``: The type of probe sent by the load balancer for health - check, which can be ``PING``, ``TCP``, ``HTTP`` or - ``HTTPS``. + check, which can be ``PING``, ``TCP``, ``HTTP`` or + ``HTTPS``. * ``url_path``: The path portion of a URI that will be probed. :returns: A generator of health monitor objects :rtype: :class:`~openstack.network.v2.health_monitor.HealthMonitor` """ - return self._list(_health_monitor.HealthMonitor, paginated=False, - **query) + return self._list(_health_monitor.HealthMonitor, **query) def update_health_monitor(self, health_monitor, **attrs): """Update a health monitor :param health_monitor: Either the id of a health monitor or a - :class:`~openstack.network.v2.health_monitor. - HealthMonitor` instance. - :param dict attrs: The attributes to update on the health monitor - represented by ``value``. + :class:`~openstack.network.v2.health_monitor.HealthMonitor` + instance. + :param attrs: The attributes to update on the health monitor + represented by ``value``. :returns: The updated health monitor :rtype: :class:`~openstack.network.v2.health_monitor.HealthMonitor` """ - return self._update(_health_monitor.HealthMonitor, health_monitor, - **attrs) + return self._update( + _health_monitor.HealthMonitor, health_monitor, **attrs + ) def create_listener(self, **attrs): """Create a new listener from attributes - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.listener.Listener`, - comprised of the properties on the Listener class. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.listener.Listener`, + comprised of the properties on the Listener class. :returns: The results of listener creation :rtype: :class:`~openstack.network.v2.listener.Listener` @@ -661,42 +1966,49 @@ def delete_listener(self, listener, ignore_missing=True): """Delete a listener :param listener: The value can be either the ID of a listner or a - :class:`~openstack.network.v2.listener.Listener` instance. + :class:`~openstack.network.v2.listener.Listener` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the listner does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent listener. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the listner does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent listener. :returns: ``None`` """ - self._delete(_listener.Listener, listener, - ignore_missing=ignore_missing) + self._delete( + _listener.Listener, listener, ignore_missing=ignore_missing + ) - def find_listener(self, name_or_id, ignore_missing=True): + def find_listener(self, name_or_id, ignore_missing=True, **query): """Find a single listener :param name_or_id: The name or ID of a listener. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.listener.Listener` or None """ - return self._find(_listener.Listener, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _listener.Listener, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_listener(self, listener): """Get a single listener :param listener: The value can be the ID of a listener or a - :class:`~openstack.network.v2.listener.Listener` - instance. + :class:`~openstack.network.v2.listener.Listener` + instance. :returns: One :class:`~openstack.network.v2.listener.Listener` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_listener.Listener, listener) @@ -704,13 +2016,13 @@ def listeners(self, **query): """Return a generator of listeners :param dict query: Optional query parameters to be sent to limit - the resources being returned. Valid parameters are: + the resources being returned. Valid parameters are: * ``connection_limit``: The maximum number of connections - permitted for the load-balancer. + permitted for the load-balancer. * ``default_pool_id``: The ID of the default pool. * ``default_tls_container_ref``: A reference to a container of TLS - secret. + secret. * ``description``: The description of a listener. * ``is_admin_state_up``: The administrative state of the listener. * ``name``: The name of a listener. @@ -721,16 +2033,16 @@ def listeners(self, **query): :returns: A generator of listener objects :rtype: :class:`~openstack.network.v2.listener.Listener` """ - return self._list(_listener.Listener, paginated=False, **query) + return self._list(_listener.Listener, **query) def update_listener(self, listener, **attrs): """Update a listener :param listener: Either the id of a listener or a - :class:`~openstack.network.v2.listener.Listener` - instance. - :param dict attrs: The attributes to update on the listener - represented by ``listener``. + :class:`~openstack.network.v2.listener.Listener` + instance. + :param attrs: The attributes to update on the listener + represented by ``listener``. :returns: The updated listener :rtype: :class:`~openstack.network.v2.listener.Listener` @@ -740,7 +2052,7 @@ def update_listener(self, listener, **attrs): def create_load_balancer(self, **attrs): """Create a new load balancer from attributes - :param dict attrs: Keyword arguments which will be used to create + :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.load_balancer.LoadBalancer`, comprised of the properties on the LoadBalancer class. @@ -753,44 +2065,53 @@ def delete_load_balancer(self, load_balancer, ignore_missing=True): """Delete a load balancer :param load_balancer: The value can be the ID of a load balancer or a - :class:`~openstack.network.v2.load_balancer.LoadBalancer` - instance. + :class:`~openstack.network.v2.load_balancer.LoadBalancer` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the load balancer does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent load balancer. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the load balancer does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent load balancer. :returns: ``None`` """ - self._delete(_load_balancer.LoadBalancer, load_balancer, - ignore_missing=ignore_missing) + self._delete( + _load_balancer.LoadBalancer, + load_balancer, + ignore_missing=ignore_missing, + ) - def find_load_balancer(self, name_or_id, ignore_missing=True): + def find_load_balancer(self, name_or_id, ignore_missing=True, **query): """Find a single load balancer :param name_or_id: The name or ID of a load balancer. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.load_balancer.LoadBalancer` - or None + or None """ - return self._find(_load_balancer.LoadBalancer, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _load_balancer.LoadBalancer, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_load_balancer(self, load_balancer): """Get a single load balancer :param load_balancer: The value can be the ID of a load balancer or a - :class:`~openstack.network.v2.load_balancer.LoadBalancer` - instance. + :class:`~openstack.network.v2.load_balancer.LoadBalancer` + instance. :returns: One :class:`~openstack.network.v2.load_balancer.LoadBalancer` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_load_balancer.LoadBalancer, load_balancer) @@ -798,33 +2119,33 @@ def load_balancers(self, **query): """Return a generator of load balancers :param dict query: Optional query parameters to be sent to limit - the resources being returned. + the resources being returned. :returns: A generator of load balancer objects :rtype: :class:`~openstack.network.v2.load_balancer.LoadBalancer` """ - return self._list(_load_balancer.LoadBalancer, paginated=False, - **query) + return self._list(_load_balancer.LoadBalancer, **query) def update_load_balancer(self, load_balancer, **attrs): """Update a load balancer :param load_balancer: Either the id of a load balancer or a - :class:`~openstack.network.v2.load_balancer.LoadBalancer` - instance. - :param dict attrs: The attributes to update on the load balancer - represented by ``load_balancer``. + :class:`~openstack.network.v2.load_balancer.LoadBalancer` + instance. + :param attrs: The attributes to update on the load balancer + represented by ``load_balancer``. :returns: The updated load balancer :rtype: :class:`~openstack.network.v2.load_balancer.LoadBalancer` """ - return self._update(_load_balancer.LoadBalancer, load_balancer, - **attrs) + return self._update( + _load_balancer.LoadBalancer, load_balancer, **attrs + ) def create_metering_label(self, **attrs): """Create a new metering label from attributes - :param dict attrs: Keyword arguments which will be used to create + :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.metering_label.MeteringLabel`, comprised of the properties on the MeteringLabel class. @@ -837,46 +2158,56 @@ def delete_metering_label(self, metering_label, ignore_missing=True): """Delete a metering label :param metering_label: - The value can be either the ID of a metering label or a - :class:`~openstack.network.v2.metering_label.MeteringLabel` - instance. + The value can be either the ID of a metering label or a + :class:`~openstack.network.v2.metering_label.MeteringLabel` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the metering label does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent metering label. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the metering label does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent metering label. :returns: ``None`` """ - self._delete(_metering_label.MeteringLabel, metering_label, - ignore_missing=ignore_missing) + self._delete( + _metering_label.MeteringLabel, + metering_label, + ignore_missing=ignore_missing, + ) - def find_metering_label(self, name_or_id, ignore_missing=True): + def find_metering_label(self, name_or_id, ignore_missing=True, **query): """Find a single metering label :param name_or_id: The name or ID of a metering label. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.metering_label. - MeteringLabel` or None + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.metering_label.MeteringLabel` + or None """ - return self._find(_metering_label.MeteringLabel, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _metering_label.MeteringLabel, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_metering_label(self, metering_label): """Get a single metering label :param metering_label: The value can be the ID of a metering label or a - :class:`~openstack.network.v2.metering_label.MeteringLabel` - instance. + :class:`~openstack.network.v2.metering_label.MeteringLabel` + instance. :returns: One - :class:`~openstack.network.v2.metering_label.MeteringLabel` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :class:`~openstack.network.v2.metering_label.MeteringLabel` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_metering_label.MeteringLabel, metering_label) @@ -884,181 +2215,209 @@ def metering_labels(self, **query): """Return a generator of metering labels :param dict query: Optional query parameters to be sent to limit - the resources being returned. Valid parameters are: + the resources being returned. Valid parameters are: * ``description``: Description of a metering label. * ``name``: Name of a metering label. * ``is_shared``: Boolean indicating whether a metering label is - shared. + shared. * ``project_id``: The ID of the project a metering label is - associated with. + associated with. :returns: A generator of metering label objects :rtype: :class:`~openstack.network.v2.metering_label.MeteringLabel` """ - return self._list(_metering_label.MeteringLabel, paginated=False, - **query) + return self._list(_metering_label.MeteringLabel, **query) def update_metering_label(self, metering_label, **attrs): """Update a metering label :param metering_label: Either the id of a metering label or a - :class:`~openstack.network.v2.metering_label. - MeteringLabel` instance. - :param dict attrs: The attributes to update on the metering label - represented by ``metering_label``. + :class:`~openstack.network.v2.metering_label.MeteringLabel` + instance. + :param attrs: The attributes to update on the metering label + represented by ``metering_label``. :returns: The updated metering label :rtype: :class:`~openstack.network.v2.metering_label.MeteringLabel` """ - return self._update(_metering_label.MeteringLabel, metering_label, - **attrs) + return self._update( + _metering_label.MeteringLabel, metering_label, **attrs + ) def create_metering_label_rule(self, **attrs): """Create a new metering label rule from attributes - :param dict attrs: Keyword arguments which will be used to create a - :class:`~openstack.network.v2.metering_label_rule.\ - MeteringLabelRule`, comprised of the properties on - the MeteringLabelRule class. + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule`, + comprised of the properties on the MeteringLabelRule class. :returns: The results of metering label rule creation - :rtype: :class:`~openstack.network.v2.metering_label_rule.\ - MeteringLabelRule` + :rtype: + :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` """ return self._create(_metering_label_rule.MeteringLabelRule, **attrs) - def delete_metering_label_rule(self, metering_label_rule, - ignore_missing=True): + def delete_metering_label_rule( + self, metering_label_rule, ignore_missing=True + ): """Delete a metering label rule :param metering_label_rule: The value can be either the ID of a metering label rule - or a :class:`~openstack.network.v2.metering_label_rule.\ - MeteringLabelRule` instance. + or a + :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be raised + :class:`~openstack.exceptions.NotFoundException` will be raised when the metering label rule does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent metering label rule. :returns: ``None`` """ - self._delete(_metering_label_rule.MeteringLabelRule, - metering_label_rule, ignore_missing=ignore_missing) + self._delete( + _metering_label_rule.MeteringLabelRule, + metering_label_rule, + ignore_missing=ignore_missing, + ) - def find_metering_label_rule(self, name_or_id, ignore_missing=True): + def find_metering_label_rule( + self, name_or_id, ignore_missing=True, **query + ): """Find a single metering label rule :param name_or_id: The name or ID of a metering label rule. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.metering_label_rule. - MeteringLabelRule` or None + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` + or None """ - return self._find(_metering_label_rule.MeteringLabelRule, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _metering_label_rule.MeteringLabelRule, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_metering_label_rule(self, metering_label_rule): """Get a single metering label rule :param metering_label_rule: The value can be the ID of a metering label rule or a - :class:`~openstack.network.v2.metering_label_rule.\ - MeteringLabelRule` instance. + :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` + instance. :returns: One - :class:`~openstack.network.v2.metering_label_rule.\ - MeteringLabelRule` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._get(_metering_label_rule.MeteringLabelRule, - metering_label_rule) + return self._get( + _metering_label_rule.MeteringLabelRule, metering_label_rule + ) def metering_label_rules(self, **query): """Return a generator of metering label rules :param dict query: Optional query parameters to be sent to limit - the resources being returned. Valid parameters are: + the resources being returned. Valid parameters are: * ``direction``: The direction in which metering label rule is - applied. + applied. * ``metering_label_id``: The ID of a metering label this rule is - associated with. + associated with. * ``project_id``: The ID of the project the metering label rule is - associated with. + associated with. * ``remote_ip_prefix``: The remote IP prefix to be associated with - this metering label rule. + this metering label rule. :returns: A generator of metering label rule objects - :rtype: :class:`~openstack.network.v2.metering_label_rule. - MeteringLabelRule` + :rtype: + :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` """ - return self._list(_metering_label_rule.MeteringLabelRule, - paginated=False, **query) + return self._list(_metering_label_rule.MeteringLabelRule, **query) def update_metering_label_rule(self, metering_label_rule, **attrs): """Update a metering label rule :param metering_label_rule: - Either the id of a metering label rule or a - :class:`~openstack.network.v2.metering_label_rule. - MeteringLabelRule` instance. - :param dict attrs: The attributes to update on the metering label rule - represented by ``metering_label_rule``. + Either the id of a metering label rule or a + :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` + instance. + :param attrs: The attributes to update on the metering label rule + represented by ``metering_label_rule``. :returns: The updated metering label rule - :rtype: :class:`~openstack.network.v2.metering_label_rule. - MeteringLabelRule` + :rtype: + :class:`~openstack.network.v2.metering_label_rule.MeteringLabelRule` """ - return self._update(_metering_label_rule.MeteringLabelRule, - metering_label_rule, **attrs) + return self._update( + _metering_label_rule.MeteringLabelRule, + metering_label_rule, + **attrs, + ) def create_network(self, **attrs): """Create a new network from attributes - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.network.Network`, - comprised of the properties on the Network class. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.network.Network`, + comprised of the properties on the Network class. :returns: The results of network creation :rtype: :class:`~openstack.network.v2.network.Network` """ return self._create(_network.Network, **attrs) - def delete_network(self, network, ignore_missing=True): + def delete_network(self, network, ignore_missing=True, if_revision=None): """Delete a network :param network: The value can be either the ID of a network or a :class:`~openstack.network.v2.network.Network` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the network does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent network. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the network does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent network. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. :returns: ``None`` """ - self._delete(_network.Network, network, ignore_missing=ignore_missing) + self._delete( + _network.Network, + network, + ignore_missing=ignore_missing, + if_revision=if_revision, + ) - def find_network(self, name_or_id, ignore_missing=True): + def find_network(self, name_or_id, ignore_missing=True, **query): """Find a single network :param name_or_id: The name or ID of a network. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.network.Network` or None """ - return self._find(_network.Network, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _network.Network, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_network(self, network): """Get a single network @@ -1068,22 +2427,22 @@ def get_network(self, network): :class:`~openstack.network.v2.network.Network` instance. :returns: One :class:`~openstack.network.v2.network.Network` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_network.Network, network) def networks(self, **query): """Return a generator of networks - :param kwargs \*\*query: Optional query parameters to be sent to limit + :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``description``: The network description. * ``ipv4_address_scope_id``: The ID of the IPv4 address scope for - the network. + the network. * ``ipv6_address_scope_id``: The ID of the IPv6 address scope for - the network. + the network. * ``is_admin_state_up``: Network administrative state * ``is_port_security_enabled``: The port security status. * ``is_router_external``: Network is external or not. @@ -1094,40 +2453,53 @@ def networks(self, **query): * ``provider_network_type``: Network physical mechanism * ``provider_physical_network``: Physical network * ``provider_segmentation_id``: VLAN ID for VLAN networks or Tunnel - ID for GENEVE/GRE/VXLAN networks + ID for GENEVE/GRE/VXLAN networks :returns: A generator of network objects :rtype: :class:`~openstack.network.v2.network.Network` """ - return self._list(_network.Network, paginated=False, **query) + return self._list(_network.Network, **query) - def update_network(self, network, **attrs): + def update_network(self, network, if_revision=None, **attrs): """Update a network :param network: Either the id of a network or an instance of type - :class:`~openstack.network.v2.network.Network`. - :param dict attrs: The attributes to update on the network represented - by ``network``. + :class:`~openstack.network.v2.network.Network`. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. + :param attrs: The attributes to update on the network represented + by ``network``. :returns: The updated network :rtype: :class:`~openstack.network.v2.network.Network` """ - return self._update(_network.Network, network, **attrs) + return self._update( + _network.Network, network, if_revision=if_revision, **attrs + ) - def find_network_ip_availability(self, name_or_id, ignore_missing=True): + def find_network_ip_availability( + self, name_or_id, ignore_missing=True, **query + ): """Find IP availability of a network :param name_or_id: The name or ID of a network. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.network_ip_availability. - NetworkIPAvailability` or None + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.network_ip_availability.NetworkIPAvailability` + or None """ - return self._find(network_ip_availability.NetworkIPAvailability, - name_or_id, ignore_missing=ignore_missing) + return self._find( + network_ip_availability.NetworkIPAvailability, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) def get_network_ip_availability(self, network): """Get IP availability of a network @@ -1136,40 +2508,173 @@ def get_network_ip_availability(self, network): The value can be the ID of a network or a :class:`~openstack.network.v2.network.Network` instance. - :returns: One :class:`~openstack.network.v2.network_ip_availability. - NetworkIPAvailability` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :returns: One + :class:`~openstack.network.v2.network_ip_availability.NetworkIPAvailability` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._get(network_ip_availability.NetworkIPAvailability, - network) + return self._get( + network_ip_availability.NetworkIPAvailability, network + ) def network_ip_availabilities(self, **query): """Return a generator of network ip availabilities - :param kwargs \*\*query: Optional query parameters to be sent to limit + :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``ip_version``: IP version of the network * ``network_id``: ID of network to use when listening network IP - availability. + availability. * ``network_name``: The name of the network for the particular - network IP availability. + network IP availability. * ``project_id``: Owner tenant ID :returns: A generator of network ip availability objects - :rtype: :class:`~openstack.network.v2.network_ip_availability. - NetworkIPAvailability` + :rtype: + :class:`~openstack.network.v2.network_ip_availability.NetworkIPAvailability` + """ + return self._list( + network_ip_availability.NetworkIPAvailability, **query + ) + + def create_network_segment_range(self, **attrs): + """Create a new network segment range from attributes + + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.network_segment_range.NetworkSegmentRange`, + comprised of the properties on the + NetworkSegmentRange class. + + :returns: The results of network segment range creation + :rtype: + :class:`~openstack.network.v2.network_segment_range.NetworkSegmentRange` + """ + return self._create( + _network_segment_range.NetworkSegmentRange, **attrs + ) + + def delete_network_segment_range( + self, network_segment_range, ignore_missing=True + ): + """Delete a network segment range + + :param network_segment_range: The value can be either the ID of a + network segment range or a + :class:`~openstack.network.v2.network_segment_range.NetworkSegmentRange` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the network segment range does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent network segment range. + + :returns: ``None`` + """ + self._delete( + _network_segment_range.NetworkSegmentRange, + network_segment_range, + ignore_missing=ignore_missing, + ) + + def find_network_segment_range( + self, name_or_id, ignore_missing=True, **query + ): + """Find a single network segment range + + :param name_or_id: The name or ID of a network segment range. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.network_segment_range.NetworkSegmentRange` + or None + """ + return self._find( + _network_segment_range.NetworkSegmentRange, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_network_segment_range(self, network_segment_range): + """Get a single network segment range + + :param network_segment_range: The value can be the ID of a network + segment range or a + :class:`~openstack.network.v2.network_segment_range.NetworkSegmentRange` + instance. + + :returns: One + :class:`~openstack.network.v2._network_segment_range.NetworkSegmentRange` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _network_segment_range.NetworkSegmentRange, network_segment_range + ) + + def network_segment_ranges(self, **query): + """Return a generator of network segment ranges + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. Available parameters include: + + * ``name``: Name of the segments + * ``default``: The network segment range is loaded from the host + configuration file. + * ``shared``: The network segment range is shared with other + projects + * ``project_id``: ID of the project that owns the network + segment range + * ``network_type``: Network type for the network segment ranges + * ``physical_network``: Physical network name for the network + segment ranges + * ``minimum``: Minimum segmentation ID for the network segment + ranges + * ``maximum``: Maximum Segmentation ID for the network segment + ranges + * ``used``: Mapping of which segmentation ID in the range is + used by which tenant + * ``available``: List of available segmentation IDs in this + network segment range + + :returns: A generator of network segment range objects + :rtype: + :class:`~openstack.network.v2._network_segment_range.NetworkSegmentRange` + """ + return self._list(_network_segment_range.NetworkSegmentRange, **query) + + def update_network_segment_range(self, network_segment_range, **attrs): + """Update a network segment range + + :param network_segment_range: Either the ID of a network segment range + or a + :class:`~openstack.network.v2._network_segment_range.NetworkSegmentRange` + instance. + :param attrs: The attributes to update on the network segment range + represented by ``network_segment_range``. + + :returns: The updated network segment range + :rtype: + :class:`~openstack.network.v2._network_segment_range.NetworkSegmentRange` """ - return self._list(network_ip_availability.NetworkIPAvailability, - paginated=False, **query) + return self._update( + _network_segment_range.NetworkSegmentRange, + network_segment_range, + **attrs, + ) def create_pool(self, **attrs): """Create a new pool from attributes - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.pool.Pool`, - comprised of the properties on the Pool class. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.pool.Pool`, + comprised of the properties on the Pool class. :returns: The results of pool creation :rtype: :class:`~openstack.network.v2.pool.Pool` @@ -1180,40 +2685,43 @@ def delete_pool(self, pool, ignore_missing=True): """Delete a pool :param pool: The value can be either the ID of a pool or a - :class:`~openstack.network.v2.pool.Pool` instance. + :class:`~openstack.network.v2.pool.Pool` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the pool does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent pool. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the pool does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent pool. :returns: ``None`` """ self._delete(_pool.Pool, pool, ignore_missing=ignore_missing) - def find_pool(self, name_or_id, ignore_missing=True): + def find_pool(self, name_or_id, ignore_missing=True, **query): """Find a single pool :param name_or_id: The name or ID of a pool. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.pool.Pool` or None """ - return self._find(_pool.Pool, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _pool.Pool, name_or_id, ignore_missing=ignore_missing, **query + ) def get_pool(self, pool): """Get a single pool :param pool: The value can be the ID of a pool or a - :class:`~openstack.network.v2.pool.Pool` instance. + :class:`~openstack.network.v2.pool.Pool` instance. :returns: One :class:`~openstack.network.v2.pool.Pool` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_pool.Pool, pool) @@ -1221,35 +2729,35 @@ def pools(self, **query): """Return a generator of pools :param dict query: Optional query parameters to be sent to limit - the resources being returned. Valid parameters are: + the resources being returned. Valid parameters are: * ``description``: The description for the pool. * ``is_admin_state_up``: The administrative state of the pool. * ``lb_algorithm``: The load-balancer algorithm used, which is one - of ``round-robin``, ``least-connections`` and so on. + of ``round-robin``, ``least-connections`` and so on. * ``name``: The name of the node pool. * ``project_id``: The ID of the project the pool is associated - with. + with. * ``protocol``: The protocol used by the pool, which is one of - ``TCP``, ``HTTP`` or ``HTTPS``. + ``TCP``, ``HTTP`` or ``HTTPS``. * ``provider``: The name of the provider of the load balancer - service. + service. * ``subnet_id``: The subnet on which the members of the pool are - located. + located. * ``virtual_ip_id``: The ID of the virtual IP used. :returns: A generator of pool objects :rtype: :class:`~openstack.network.v2.pool.Pool` """ - return self._list(_pool.Pool, paginated=False, **query) + return self._list(_pool.Pool, **query) def update_pool(self, pool, **attrs): """Update a pool :param pool: Either the id of a pool or a - :class:`~openstack.network.v2.pool.Pool` instance. - :param dict attrs: The attributes to update on the pool represented - by ``pool``. + :class:`~openstack.network.v2.pool.Pool` instance. + :param attrs: The attributes to update on the pool represented + by ``pool``. :returns: The updated pool :rtype: :class:`~openstack.network.v2.pool.Pool` @@ -1260,9 +2768,9 @@ def create_pool_member(self, pool, **attrs): """Create a new pool member from attributes :param pool: The pool can be either the ID of a pool or a - :class:`~openstack.network.v2.pool.Pool` instance that - the member will be created in. - :param dict attrs: Keyword arguments which will be used to create + :class:`~openstack.network.v2.pool.Pool` instance that + the member will be created in. + :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.pool_member.PoolMember`, comprised of the properties on the PoolMember class. @@ -1270,8 +2778,9 @@ def create_pool_member(self, pool, **attrs): :rtype: :class:`~openstack.network.v2.pool_member.PoolMember` """ poolobj = self._get_resource(_pool.Pool, pool) - return self._create(_pool_member.PoolMember, pool_id=poolobj.id, - **attrs) + return self._create( + _pool_member.PoolMember, pool_id=poolobj.id, **attrs + ) def delete_pool_member(self, pool_member, pool, ignore_missing=True): """Delete a pool member @@ -1280,167 +2789,201 @@ def delete_pool_member(self, pool_member, pool, ignore_missing=True): The member can be either the ID of a pool member or a :class:`~openstack.network.v2.pool_member.PoolMember` instance. :param pool: The pool can be either the ID of a pool or a - :class:`~openstack.network.v2.pool.Pool` instance that - the member belongs to. + :class:`~openstack.network.v2.pool.Pool` instance that + the member belongs to. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the pool member does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent pool member. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the pool member does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent pool member. :returns: ``None`` """ poolobj = self._get_resource(_pool.Pool, pool) - self._delete(_pool_member.PoolMember, pool_member, - ignore_missing=ignore_missing, pool_id=poolobj.id) - - def find_pool_member(self, name_or_id, pool, ignore_missing=True): + self._delete( + _pool_member.PoolMember, + pool_member, + ignore_missing=ignore_missing, + pool_id=poolobj.id, + ) + + def find_pool_member(self, name_or_id, pool, ignore_missing=True, **query): """Find a single pool member :param str name_or_id: The name or ID of a pool member. :param pool: The pool can be either the ID of a pool or a - :class:`~openstack.network.v2.pool.Pool` instance that - the member belongs to. + :class:`~openstack.network.v2.pool.Pool` instance that + the member belongs to. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.pool_member.PoolMember` - or None + or None """ poolobj = self._get_resource(_pool.Pool, pool) - return self._find(_pool_member.PoolMember, name_or_id, - ignore_missing=ignore_missing, pool_id=poolobj.id) + return self._find( + _pool_member.PoolMember, + name_or_id, + ignore_missing=ignore_missing, + pool_id=poolobj.id, + **query, + ) def get_pool_member(self, pool_member, pool): """Get a single pool member :param pool_member: The member can be the ID of a pool member or a - :class:`~openstack.network.v2.pool_member.PoolMember` - instance. + :class:`~openstack.network.v2.pool_member.PoolMember` + instance. :param pool: The pool can be either the ID of a pool or a - :class:`~openstack.network.v2.pool.Pool` instance that - the member belongs to. + :class:`~openstack.network.v2.pool.Pool` instance that + the member belongs to. :returns: One :class:`~openstack.network.v2.pool_member.PoolMember` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ poolobj = self._get_resource(_pool.Pool, pool) - return self._get(_pool_member.PoolMember, pool_member, - pool_id=poolobj.id) + return self._get( + _pool_member.PoolMember, pool_member, pool_id=poolobj.id + ) def pool_members(self, pool, **query): """Return a generator of pool members :param pool: The pool can be either the ID of a pool or a - :class:`~openstack.network.v2.pool.Pool` instance that - the member belongs to. + :class:`~openstack.network.v2.pool.Pool` instance that + the member belongs to. :param dict query: Optional query parameters to be sent to limit - the resources being returned. Valid parameters are: + the resources being returned. Valid parameters are: * ``address``: The IP address of the pool member. * ``is_admin_state_up``: The administrative state of the pool - member. + member. * ``name``: Name of the pool member. * ``project_id``: The ID of the project this pool member is - associated with. + associated with. * ``protocol_port``: The port on which the application is hosted. * ``subnet_id``: Subnet ID in which to access this pool member. * ``weight``: A positive integer value that indicates the relative - portion of traffic that this member should receive from the - pool. + portion of traffic that this member should receive from the + pool. :returns: A generator of pool member objects :rtype: :class:`~openstack.network.v2.pool_member.PoolMember` """ poolobj = self._get_resource(_pool.Pool, pool) - return self._list(_pool_member.PoolMember, paginated=False, - pool_id=poolobj.id, **query) + return self._list(_pool_member.PoolMember, pool_id=poolobj.id, **query) def update_pool_member(self, pool_member, pool, **attrs): """Update a pool member :param pool_member: Either the ID of a pool member or a - :class:`~openstack.network.v2.pool_member.PoolMember` - instance. + :class:`~openstack.network.v2.pool_member.PoolMember` + instance. :param pool: The pool can be either the ID of a pool or a - :class:`~openstack.network.v2.pool.Pool` instance that - the member belongs to. - :param dict attrs: The attributes to update on the pool member - represented by ``pool_member``. + :class:`~openstack.network.v2.pool.Pool` instance that + the member belongs to. + :param attrs: The attributes to update on the pool member + represented by ``pool_member``. :returns: The updated pool member :rtype: :class:`~openstack.network.v2.pool_member.PoolMember` """ poolobj = self._get_resource(_pool.Pool, pool) - return self._update(_pool_member.PoolMember, pool_member, - pool_id=poolobj.id, **attrs) + return self._update( + _pool_member.PoolMember, pool_member, pool_id=poolobj.id, **attrs + ) def create_port(self, **attrs): """Create a new port from attributes - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.port.Port`, - comprised of the properties on the Port class. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.port.Port`, + comprised of the properties on the Port class. :returns: The results of port creation :rtype: :class:`~openstack.network.v2.port.Port` """ return self._create(_port.Port, **attrs) - def delete_port(self, port, ignore_missing=True): + def create_ports(self, data): + """Create ports from the list of attributes + + :param list data: List of dicts of attributes which will be used to + create a :class:`~openstack.network.v2.port.Port`, + comprised of the properties on the Port class. + + :returns: A generator of port objects + :rtype: :class:`~openstack.network.v2.port.Port` + """ + return self._bulk_create(_port.Port, data) + + def delete_port(self, port, ignore_missing=True, if_revision=None): """Delete a port :param port: The value can be either the ID of a port or a - :class:`~openstack.network.v2.port.Port` instance. + :class:`~openstack.network.v2.port.Port` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the port does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent port. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the port does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent port. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. :returns: ``None`` """ - self._delete(_port.Port, port, ignore_missing=ignore_missing) + self._delete( + _port.Port, + port, + ignore_missing=ignore_missing, + if_revision=if_revision, + ) - def find_port(self, name_or_id, ignore_missing=True): + def find_port(self, name_or_id, ignore_missing=True, **query): """Find a single port :param name_or_id: The name or ID of a port. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.port.Port` or None """ - return self._find(_port.Port, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _port.Port, name_or_id, ignore_missing=ignore_missing, **query + ) def get_port(self, port): """Get a single port :param port: The value can be the ID of a port or a - :class:`~openstack.network.v2.port.Port` instance. + :class:`~openstack.network.v2.port.Port` instance. :returns: One :class:`~openstack.network.v2.port.Port` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_port.Port, port) def ports(self, **query): """Return a generator of ports - :param kwargs \*\*query: Optional query parameters to be sent to limit + :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: * ``description``: The port description. * ``device_id``: Port device ID. * ``device_owner``: Port device owner (e.g. ``network:dhcp``). - * ``ip_adress``: IP addresses of an allowed address pair. + * ``ip_address``: IP addresses of an allowed address pair. * ``is_admin_state_up``: The administrative state of the port. * ``is_port_security_enabled``: The port security status. * ``mac_address``: Port MAC address. @@ -1453,28 +2996,35 @@ def ports(self, **query): :returns: A generator of port objects :rtype: :class:`~openstack.network.v2.port.Port` """ - return self._list(_port.Port, paginated=False, **query) + return self._list(_port.Port, **query) - def update_port(self, port, **attrs): + def update_port( + self, + port: str | _port.Port, + if_revision: int | None = None, + **attrs: ty.Any, + ) -> _port.Port: """Update a port :param port: Either the id of a port or a - :class:`~openstack.network.v2.port.Port` instance. - :param dict attrs: The attributes to update on the port represented - by ``port``. + :class:`~openstack.network.v2.port.Port` instance. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. + :param attrs: The attributes to update on the port represented + by ``port``. :returns: The updated port :rtype: :class:`~openstack.network.v2.port.Port` """ - return self._update(_port.Port, port, **attrs) + return self._update(_port.Port, port, if_revision=if_revision, **attrs) def add_ip_to_port(self, port, ip): - ip['port_id'] = port.id - return ip.update(self.session) + ip.port_id = port.id + return ip.commit(self) def remove_ip_from_port(self, ip): - ip['port_id'] = None - return ip.update(self.session) + ip.port_id = None + return ip.commit(self) def get_subnet_ports(self, subnet_id): result = [] @@ -1485,434 +3035,890 @@ def get_subnet_ports(self, subnet_id): result.append(puerta) return result + def create_port_binding(self, port, **attrs): + """Create a port binding + + :param port: The value can be the ID of a port or a + :class:`~openstack.network.v2.port.Port` instance. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.port.Port`, + comprised of the properties on the Port class. + + :returns: The results of port binding creation + :rtype: :class:`~openstack.network.v2.port_binding.PortBinding` + """ + port_id = self._get(_port.Port, port).id + return self._create( + _port_binding.PortBinding, + port_id=port_id, + **attrs, + ) + + def activate_port_binding( + self, + port, + **attrs, + ): + """Activate a port binding + + :param port: The value can be the ID of a port or a + :class:`~openstack.network.v2.port.Port` instance. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.port.Port`, + comprised of the properties on the Port class. + + :returns: The results of port binding creation + :rtype: :class:`~openstack.network.v2.port_binding.PortBinding` + """ + port_id = self._get(_port.Port, port).id + host = attrs['host'] + bindings_on_host = self.port_bindings(port=port_id, host=host) + # There can be only 1 binding on a host at a time + for binding in bindings_on_host: + return binding.activate_port_binding(self, **attrs) + + def port_bindings(self, port, **query): + """Get a single port binding + + :param port: The value can be the ID of a port or a + :class:`~openstack.network.v2.port.Port` instance. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. Available parameters include: + + * ``host``: The host on which the port is bound. + * ``vif_type``: The mechanism used for the port like bridge or ovs. + * ``vnic_type``: The type of the vnic, like normal or baremetal. + * ``status``: The port status. Value is ``ACTIVE`` or ``DOWN``. + + :returns: A generator of PortBinding objects + :rtype: :class:`~openstack.network.v2.port_binding.PortBinding` + """ + port_id = self._get(_port.Port, port).id + return self._list( + _port_binding.PortBinding, + port_id=port_id, + **query, + ) + + def delete_port_binding(self, port, host): + """Delete a Port Binding + + :param port: The value can be either the ID of a port or a + :class:`~openstack.network.v2.port.Port` instance. + :param host: The host on which the port is bound. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.ResourceNotFound` will be + raised when the port does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent port. + + :returns: ``None`` + """ + port_id = self._get(_port.Port, port).id + bindings_on_host = self.port_bindings(port=port_id, host=host) + # There can be only 1 binding on a host at a time + for binding in bindings_on_host: + return binding.delete_port_binding(self, host=host) + def create_qos_bandwidth_limit_rule(self, qos_policy, **attrs): """Create a new bandwidth limit rule - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2. - qos_bandwidth_limit_rule.QoSBandwidthLimitRule`, - comprised of the properties on the - QoSBandwidthLimitRule class. + :param attrs: Keyword arguments which will be used to create + a + :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule`, + comprised of the properties on the + QoSBandwidthLimitRule class. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :returns: The results of resource creation - :rtype: :class:`~openstack.network.v2.qos_bandwidth_limit_rule. - QoSBandwidthLimitRule` + :rtype: + :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._create(_qos_bandwidth_limit_rule.QoSBandwidthLimitRule, - qos_policy_id=policy.id, **attrs) - - def delete_qos_bandwidth_limit_rule(self, qos_rule, qos_policy, - ignore_missing=True): + return self._create( + _qos_bandwidth_limit_rule.QoSBandwidthLimitRule, + qos_policy_id=policy.id, + **attrs, + ) + + def delete_qos_bandwidth_limit_rule( + self, qos_rule, qos_policy, ignore_missing=True + ): """Delete a bandwidth limit rule :param qos_rule: The value can be either the ID of a bandwidth limit - rule or a :class:`~openstack.network.v2. - qos_bandwidth_limit_rule.QoSBandwidthLimitRule` - instance. + rule or a + :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` + instance. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent bandwidth limit rule. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent bandwidth limit rule. :returns: ``None`` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - self._delete(_qos_bandwidth_limit_rule.QoSBandwidthLimitRule, - qos_rule, ignore_missing=ignore_missing, - qos_policy_id=policy.id) - - def find_qos_bandwidth_limit_rule(self, qos_rule_id, qos_policy, - ignore_missing=True): + self._delete( + _qos_bandwidth_limit_rule.QoSBandwidthLimitRule, + qos_rule, + ignore_missing=ignore_missing, + qos_policy_id=policy.id, + ) + + def find_qos_bandwidth_limit_rule( + self, qos_rule_id, qos_policy, ignore_missing=True, **query + ): """Find a bandwidth limit rule :param qos_rule_id: The ID of a bandwidth limit rule. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.qos_bandwidth_limit_rule. - QoSBandwidthLimitRule` or None + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` + or None """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._find(_qos_bandwidth_limit_rule.QoSBandwidthLimitRule, - qos_rule_id, ignore_missing=ignore_missing, - qos_policy_id=policy.id) + return self._find( + _qos_bandwidth_limit_rule.QoSBandwidthLimitRule, + qos_rule_id, + ignore_missing=ignore_missing, + qos_policy_id=policy.id, + **query, + ) def get_qos_bandwidth_limit_rule(self, qos_rule, qos_policy): """Get a single bandwidth limit rule :param qos_rule: The value can be the ID of a minimum bandwidth rule or - a :class:`~openstack.network.v2. - qos_bandwidth_limit_rule.QoSBandwidthLimitRule` - instance. + a + :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` + instance. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. - :returns: One :class:`~openstack.network.v2.qos_bandwidth_limit_rule. - QoSBandwidthLimitRule` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :returns: One + :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._get(_qos_bandwidth_limit_rule.QoSBandwidthLimitRule, - qos_rule, qos_policy_id=policy.id) + return self._get( + _qos_bandwidth_limit_rule.QoSBandwidthLimitRule, + qos_rule, + qos_policy_id=policy.id, + ) def qos_bandwidth_limit_rules(self, qos_policy, **query): """Return a generator of bandwidth limit rules :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of bandwidth limit rule objects - :rtype: :class:`~openstack.network.v2.qos_bandwidth_limit_rule. - QoSBandwidthLimitRule` + :rtype: + :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._list(_qos_bandwidth_limit_rule.QoSBandwidthLimitRule, - paginated=False, qos_policy_id=policy.id, **query) - - def update_qos_bandwidth_limit_rule(self, qos_rule, qos_policy, - **attrs): + return self._list( + _qos_bandwidth_limit_rule.QoSBandwidthLimitRule, + qos_policy_id=policy.id, + **query, + ) + + def update_qos_bandwidth_limit_rule( + self, + qos_rule, + qos_policy, + **attrs, + ): """Update a bandwidth limit rule :param qos_rule: Either the id of a bandwidth limit rule or a - :class:`~openstack.network.v2. - qos_bandwidth_limit_rule.QoSBandwidthLimitRule` - instance. + :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` + instance. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. - :attrs kwargs: The attributes to update on the bandwidth limit rule - represented by ``value``. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :param attrs: The attributes to update on the bandwidth limit rule + represented by ``qos_rule``. :returns: The updated minimum bandwidth rule - :rtype: :class:`~openstack.network.v2.qos_bandwidth_limit_rule. - QoSBandwidthLimitRule` + :rtype: + :class:`~openstack.network.v2.qos_bandwidth_limit_rule.QoSBandwidthLimitRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._update(_qos_bandwidth_limit_rule.QoSBandwidthLimitRule, - qos_rule, qos_policy_id=policy.id, **attrs) + return self._update( + _qos_bandwidth_limit_rule.QoSBandwidthLimitRule, + qos_rule, + qos_policy_id=policy.id, + **attrs, + ) def create_qos_dscp_marking_rule(self, qos_policy, **attrs): """Create a new QoS DSCP marking rule - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2. - qos_dscp_marking_rule.QoSDSCPMarkingRule`, - comprised of the properties on the - QosDscpMarkingRule class. + :param attrs: Keyword arguments which will be used to create + a + :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule`, + comprised of the properties on the + QosDscpMarkingRule class. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :returns: The results of router creation - :rtype: :class:`~openstack.network.v2.qos_dscp_marking_rule. - QoSDSCPMarkingRule` + :rtype: + :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._create(_qos_dscp_marking_rule.QoSDSCPMarkingRule, - qos_policy_id=policy.id, **attrs) - - def delete_qos_dscp_marking_rule(self, qos_rule, qos_policy, - ignore_missing=True): + return self._create( + _qos_dscp_marking_rule.QoSDSCPMarkingRule, + qos_policy_id=policy.id, + **attrs, + ) + + def delete_qos_dscp_marking_rule( + self, qos_rule, qos_policy, ignore_missing=True + ): """Delete a QoS DSCP marking rule :param qos_rule: The value can be either the ID of a minimum bandwidth - rule or a :class:`~openstack.network.v2. - qos_dscp_marking_rule.QoSDSCPMarkingRule` - instance. + rule or a + :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` + instance. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent minimum bandwidth rule. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent minimum bandwidth rule. :returns: ``None`` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - self._delete(_qos_dscp_marking_rule.QoSDSCPMarkingRule, - qos_rule, ignore_missing=ignore_missing, - qos_policy_id=policy.id) - - def find_qos_dscp_marking_rule(self, qos_rule_id, qos_policy, - ignore_missing=True): + self._delete( + _qos_dscp_marking_rule.QoSDSCPMarkingRule, + qos_rule, + ignore_missing=ignore_missing, + qos_policy_id=policy.id, + ) + + def find_qos_dscp_marking_rule( + self, qos_rule_id, qos_policy, ignore_missing=True, **query + ): """Find a QoS DSCP marking rule :param qos_rule_id: The ID of a QoS DSCP marking rule. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.qos_dscp_marking_rule. - QoSDSCPMarkingRule` or None + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` + or None """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._find(_qos_dscp_marking_rule.QoSDSCPMarkingRule, - qos_rule_id, ignore_missing=ignore_missing, - qos_policy_id=policy.id) + return self._find( + _qos_dscp_marking_rule.QoSDSCPMarkingRule, + qos_rule_id, + ignore_missing=ignore_missing, + qos_policy_id=policy.id, + **query, + ) def get_qos_dscp_marking_rule(self, qos_rule, qos_policy): """Get a single QoS DSCP marking rule :param qos_rule: The value can be the ID of a minimum bandwidth rule or - a :class:`~openstack.network.v2.qos_dscp_marking_rule. - QoSDSCPMarkingRule` instance. + a + :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` + instance. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. - :returns: One :class:`~openstack.network.v2.qos_dscp_marking_rule. - QoSDSCPMarkingRule` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :returns: One + :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._get(_qos_dscp_marking_rule.QoSDSCPMarkingRule, - qos_rule, qos_policy_id=policy.id) + return self._get( + _qos_dscp_marking_rule.QoSDSCPMarkingRule, + qos_rule, + qos_policy_id=policy.id, + ) def qos_dscp_marking_rules(self, qos_policy, **query): """Return a generator of QoS DSCP marking rules :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of QoS DSCP marking rule objects - :rtype: :class:`~openstack.network.v2.qos_dscp_marking_rule. - QoSDSCPMarkingRule` + :rtype: + :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._list(_qos_dscp_marking_rule.QoSDSCPMarkingRule, - paginated=False, qos_policy_id=policy.id, **query) + return self._list( + _qos_dscp_marking_rule.QoSDSCPMarkingRule, + qos_policy_id=policy.id, + **query, + ) def update_qos_dscp_marking_rule(self, qos_rule, qos_policy, **attrs): """Update a QoS DSCP marking rule :param qos_rule: Either the id of a minimum bandwidth rule or a - :class:`~openstack.network.v2.qos_dscp_marking_rule. - QoSDSCPMarkingRule` instance. + :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` + instance. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. - :attrs kwargs: The attributes to update on the QoS DSCP marking rule - represented by ``value``. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :param attrs: The attributes to update on the QoS DSCP marking rule + represented by ``qos_rule``. :returns: The updated QoS DSCP marking rule - :rtype: :class:`~openstack.network.v2.qos_dscp_marking_rule. - QoSDSCPMarkingRule` + :rtype: + :class:`~openstack.network.v2.qos_dscp_marking_rule.QoSDSCPMarkingRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._update(_qos_dscp_marking_rule.QoSDSCPMarkingRule, - qos_rule, qos_policy_id=policy.id, **attrs) + return self._update( + _qos_dscp_marking_rule.QoSDSCPMarkingRule, + qos_rule, + qos_policy_id=policy.id, + **attrs, + ) def create_qos_minimum_bandwidth_rule(self, qos_policy, **attrs): """Create a new minimum bandwidth rule - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2. - qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule`, - comprised of the properties on the - QoSMinimumBandwidthRule class. + :param attrs: Keyword arguments which will be used to create + a + :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule`, + comprised of the properties on the + QoSMinimumBandwidthRule class. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :returns: The results of resource creation - :rtype: :class:`~openstack.network.v2.qos_minimum_bandwidth_rule. - QoSMinimumBandwidthRule` + :rtype: + :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) return self._create( _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, - qos_policy_id=policy.id, **attrs) + qos_policy_id=policy.id, + **attrs, + ) - def delete_qos_minimum_bandwidth_rule(self, qos_rule, qos_policy, - ignore_missing=True): + def delete_qos_minimum_bandwidth_rule( + self, qos_rule, qos_policy, ignore_missing=True + ): """Delete a minimum bandwidth rule :param qos_rule: The value can be either the ID of a minimum bandwidth - rule or a :class:`~openstack.network.v2. - qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` - instance. + rule or a + :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` + instance. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent minimum bandwidth rule. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent minimum bandwidth rule. :returns: ``None`` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - self._delete(_qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, - qos_rule, ignore_missing=ignore_missing, - qos_policy_id=policy.id) - - def find_qos_minimum_bandwidth_rule(self, qos_rule_id, qos_policy, - ignore_missing=True): + self._delete( + _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, + qos_rule, + ignore_missing=ignore_missing, + qos_policy_id=policy.id, + ) + + def find_qos_minimum_bandwidth_rule( + self, qos_rule_id, qos_policy, ignore_missing=True, **query + ): """Find a minimum bandwidth rule :param qos_rule_id: The ID of a minimum bandwidth rule. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.qos_minimum_bandwidth_rule. - QoSMinimumBandwidthRule` or None + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` + or None """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._find(_qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, - qos_rule_id, ignore_missing=ignore_missing, - qos_policy_id=policy.id) + return self._find( + _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, + qos_rule_id, + ignore_missing=ignore_missing, + qos_policy_id=policy.id, + **query, + ) def get_qos_minimum_bandwidth_rule(self, qos_rule, qos_policy): """Get a single minimum bandwidth rule :param qos_rule: The value can be the ID of a minimum bandwidth rule or - a :class:`~openstack.network.v2. - qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` - instance. + a + :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` + instance. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. - :returns: One :class:`~openstack.network.v2.qos_minimum_bandwidth_rule. - QoSMinimumBandwidthRule` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` + instance. + :returns: One + :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` + :raises: + :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._get(_qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, - qos_rule, qos_policy_id=policy.id) + return self._get( + _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, + qos_rule, + qos_policy_id=policy.id, + ) def qos_minimum_bandwidth_rules(self, qos_policy, **query): """Return a generator of minimum bandwidth rules :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of minimum bandwidth rule objects - :rtype: :class:`~openstack.network.v2.qos_minimum_bandwidth_rule. - QoSMinimumBandwidthRule` + :rtype: + :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._list(_qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, - paginated=False, qos_policy_id=policy.id, **query) + return self._list( + _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, + qos_policy_id=policy.id, + **query, + ) - def update_qos_minimum_bandwidth_rule(self, qos_rule, qos_policy, - **attrs): + def update_qos_minimum_bandwidth_rule(self, qos_rule, qos_policy, **attrs): """Update a minimum bandwidth rule :param qos_rule: Either the id of a minimum bandwidth rule or a - :class:`~openstack.network.v2. - qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` - instance. + :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` + instance. :param qos_policy: The value can be the ID of the QoS policy that the - rule belongs or a :class:`~openstack.network.v2. - qos_policy.QoSPolicy` instance. - :attrs kwargs: The attributes to update on the minimum bandwidth rule - represented by ``value``. + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` + instance. + :param attrs: The attributes to update on the minimum bandwidth rule + represented by ``qos_rule``. :returns: The updated minimum bandwidth rule - :rtype: :class:`~openstack.network.v2.qos_minimum_bandwidth_rule. - QoSMinimumBandwidthRule` + :rtype: + :class:`~openstack.network.v2.qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule` """ policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) - return self._update(_qos_minimum_bandwidth_rule. - QoSMinimumBandwidthRule, qos_rule, - qos_policy_id=policy.id, **attrs) + return self._update( + _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, + qos_rule, + qos_policy_id=policy.id, + **attrs, + ) - def create_qos_policy(self, **attrs): - """Create a new QoS policy from attributes + def create_qos_minimum_packet_rate_rule(self, qos_policy, **attrs): + """Create a new minimum packet rate rule - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.qos_policy. - QoSPolicy`, comprised of the properties on the - QoSPolicy class. + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule`, + comprised of the properties on the QoSMinimumPacketRateRule class. + :param qos_policy: The value can be the ID of the QoS policy that the + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. - :returns: The results of QoS policy creation - :rtype: :class:`~openstack.network.v2.qos_policy.QoSPolicy` + :returns: The results of resource creation + :rtype: + :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` """ - return self._create(_qos_policy.QoSPolicy, **attrs) - - def delete_qos_policy(self, qos_policy, ignore_missing=True): - """Delete a QoS policy - - :param qos_policy: The value can be either the ID of a QoS policy or a - :class:`~openstack.network.v2.qos_policy.QoSPolicy` - instance. + policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) + return self._create( + _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + qos_policy_id=policy.id, + **attrs, + ) + + def delete_qos_minimum_packet_rate_rule( + self, qos_rule, qos_policy, ignore_missing=True + ): + """Delete a minimum packet rate rule + + :param qos_rule: The value can be either the ID of a minimum packet + rate rule or a + :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` + instance. + :param qos_policy: The value can be the ID of the QoS policy that the + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the QoS policy does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent QoS policy. + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + minimum packet rate rule. :returns: ``None`` """ - self._delete(_qos_policy.QoSPolicy, qos_policy, - ignore_missing=ignore_missing) - - def find_qos_policy(self, name_or_id, ignore_missing=True): - """Find a single QoS policy - - :param name_or_id: The name or ID of a QoS policy. + policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) + self._delete( + _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + qos_rule, + ignore_missing=ignore_missing, + qos_policy_id=policy.id, + ) + + def find_qos_minimum_packet_rate_rule( + self, qos_rule_id, qos_policy, ignore_missing=True, **query + ): + """Find a minimum packet rate rule + + :param qos_rule_id: The ID of a minimum packet rate rule. + :param qos_policy: The value can be the ID of the QoS policy that the + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.qos_policy.QoSPolicy` or - None + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` + or None + """ + policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) + return self._find( + _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + qos_rule_id, + ignore_missing=ignore_missing, + qos_policy_id=policy.id, + **query, + ) + + def get_qos_minimum_packet_rate_rule(self, qos_rule, qos_policy): + """Get a single minimum packet rate rule + + :param qos_rule: The value can be the ID of a minimum packet rate rule + or a + :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` + instance. + :param qos_policy: The value can be the ID of the QoS policy that the + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :returns: One + :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. """ - return self._find(_qos_policy.QoSPolicy, name_or_id, - ignore_missing=ignore_missing) + policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) + return self._get( + _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + qos_rule, + qos_policy_id=policy.id, + ) - def get_qos_policy(self, qos_policy): - """Get a single QoS policy + def qos_minimum_packet_rate_rules(self, qos_policy, **query): + """Return a generator of minimum packet rate rules - :param qos_policy: The value can be the ID of a QoS policy or a - :class:`~openstack.network.v2.qos_policy.QoSPolicy` - instance. + :param qos_policy: The value can be the ID of the QoS policy that the + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :param kwargs query: Optional query parameters to be sent to limit the + resources being returned. + :returns: A generator of minimum packet rate rule objects + :rtype: + :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` + """ + policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) + return self._list( + _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + qos_policy_id=policy.id, + **query, + ) + + def update_qos_minimum_packet_rate_rule( + self, qos_rule, qos_policy, **attrs + ): + """Update a minimum packet rate rule + + :param qos_rule: Either the id of a minimum packet rate rule or a + :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` + instance. + :param qos_policy: The value can be the ID of the QoS policy that the + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :param attrs: The attributes to update on the minimum packet rate rule + represented by ``qos_rule``. - :returns: One :class:`~openstack.network.v2.qos_policy.QoSPolicy` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :returns: The updated minimum packet rate rule + :rtype: + :class:`~openstack.network.v2.qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule` """ - return self._get(_qos_policy.QoSPolicy, qos_policy) + policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) + return self._update( + _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + qos_rule, + qos_policy_id=policy.id, + **attrs, + ) + + def create_qos_packet_rate_limit_rule(self, qos_policy, **attrs): + """Create a new packet rate limit rule + + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.qos_packet_rate_limit_rule.QoSPacketRateLimitRule`, + comprised of the properties on the QoSPacketRateLimitRule class. + :param qos_policy: The value can be the ID of the QoS policy that the + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. - def qos_policies(self, **query): + :returns: The results of resource creation + :rtype: + :class:`~openstack.network.v2.qos_packet_rate_limit_rule.QoSPacketRateLimitRule` + """ + policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) + return self._create( + _qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + qos_policy_id=policy.id, + **attrs, + ) + + def delete_qos_packet_rate_limit_rule( + self, qos_rule, qos_policy, ignore_missing=True + ): + """Delete a packet rate limit rule + + :param qos_rule: The value can be either the ID of a packet rate limit + rule or a + :class:`~openstack.network.v2.qos_packet_rate_limit_rule.QoSPacketRateLimitRule` + instance. + :param qos_policy: The value can be the ID of the QoS policy that the + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + minimum packet rate rule. + + :returns: ``None`` + """ + policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) + self._delete( + _qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + qos_rule, + ignore_missing=ignore_missing, + qos_policy_id=policy.id, + ) + + def find_qos_packet_rate_limit_rule( + self, qos_rule_id, qos_policy, ignore_missing=True, **query + ): + """Find a packet rate limit rule + + :param qos_rule_id: The ID of a packet rate limit rule. + :param qos_policy: The value can be the ID of the QoS policy that the + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.qos_packet_rate_limit_rule.QoSPacketRateLimitRule` + or None + """ + policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) + return self._find( + _qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + qos_rule_id, + ignore_missing=ignore_missing, + qos_policy_id=policy.id, + **query, + ) + + def get_qos_packet_rate_limit_rule(self, qos_rule, qos_policy): + """Get a single packet rate limit rule + + :param qos_rule: The value can be the ID of a packet rate limit rule + or a + :class:`~openstack.network.v2.qos_packet_rate_limit_rule.QoSPacketRateLimitRule` + instance. + :param qos_policy: The value can be the ID of the QoS policy that the + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :returns: One + :class:`~openstack.network.v2.qos_packet_rate_limit_rule.QoSPacketRateLimitRule` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) + return self._get( + _qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + qos_rule, + qos_policy_id=policy.id, + ) + + def qos_packet_rate_limit_rules(self, qos_policy, **query): + """Return a generator of packet rate limit rules + + :param qos_policy: The value can be the ID of the QoS policy that the + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :param kwargs query: Optional query parameters to be sent to limit the + resources being returned. + :returns: A generator of minimum packet rate rule objects + :rtype: + :class:`~openstack.network.v2.qos_packet_rate_limit_rule.QoSPacketRateLimitRule` + """ + policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) + return self._list( + _qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + qos_policy_id=policy.id, + **query, + ) + + def update_qos_packet_rate_limit_rule(self, qos_rule, qos_policy, **attrs): + """Update a minimum packet rate rule + + :param qos_rule: Either the id of a minimum packet rate rule or a + :class:`~openstack.network.v2.qos_packet_rate_limit_rule.QoSPacketRateLimitRule` + instance. + :param qos_policy: The value can be the ID of the QoS policy that the + rule belongs or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :param attrs: The attributes to update on the minimum packet rate rule + represented by ``qos_rule``. + + :returns: The updated minimum packet rate rule + :rtype: + :class:`~openstack.network.v2.qos_packet_rate_limit_rule.QoSPacketRateLimitRule` + """ + policy = self._get_resource(_qos_policy.QoSPolicy, qos_policy) + return self._update( + _qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + qos_rule, + qos_policy_id=policy.id, + **attrs, + ) + + def create_qos_policy(self, **attrs): + """Create a new QoS policy from attributes + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.qos_policy.QoSPolicy`, + comprised of the properties on the + QoSPolicy class. + + :returns: The results of QoS policy creation + :rtype: :class:`~openstack.network.v2.qos_policy.QoSPolicy` + """ + return self._create(_qos_policy.QoSPolicy, **attrs) + + def delete_qos_policy(self, qos_policy, ignore_missing=True): + """Delete a QoS policy + + :param qos_policy: The value can be either the ID of a QoS policy or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the QoS policy does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent QoS policy. + + :returns: ``None`` + """ + self._delete( + _qos_policy.QoSPolicy, qos_policy, ignore_missing=ignore_missing + ) + + def find_qos_policy(self, name_or_id, ignore_missing=True, **query): + """Find a single QoS policy + + :param name_or_id: The name or ID of a QoS policy. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One :class:`~openstack.network.v2.qos_policy.QoSPolicy` or + None + """ + return self._find( + _qos_policy.QoSPolicy, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_qos_policy(self, qos_policy): + """Get a single QoS policy + + :param qos_policy: The value can be the ID of a QoS policy or a + :class:`~openstack.network.v2.qos_policy.QoSPolicy` + instance. + + :returns: One :class:`~openstack.network.v2.qos_policy.QoSPolicy` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_qos_policy.QoSPolicy, qos_policy) + + def qos_policies(self, **query): """Return a generator of QoS policies :param dict query: Optional query parameters to be sent to limit - the resources being returned. Valid parameters are: + the resources being returned. Valid parameters are: * ``description``: The description of a QoS policy. * ``is_shared``: Whether the policy is shared among projects. @@ -1922,102 +3928,144 @@ def qos_policies(self, **query): :returns: A generator of QoS policy objects :rtype: :class:`~openstack.network.v2.qos_policy.QoSPolicy` """ - return self._list(_qos_policy.QoSPolicy, paginated=False, **query) + return self._list(_qos_policy.QoSPolicy, **query) def update_qos_policy(self, qos_policy, **attrs): """Update a QoS policy :param qos_policy: Either the id of a QoS policy or a - :class:`~openstack.network.v2.qos_policy.QoSPolicy` - instance. - :attrs kwargs: The attributes to update on the QoS policy represented - by ``value``. + :class:`~openstack.network.v2.qos_policy.QoSPolicy` instance. + :param attrs: The attributes to update on the QoS policy represented + by ``qos_policy``. :returns: The updated QoS policy :rtype: :class:`~openstack.network.v2.qos_policy.QoSPolicy` """ return self._update(_qos_policy.QoSPolicy, qos_policy, **attrs) + def find_qos_rule_type(self, rule_type_name, ignore_missing=True): + """Find a single QoS rule type details + + :param rule_type_name: The name of a QoS rule type. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :returns: One :class:`~openstack.network.v2.qos_rule_type.QoSRuleType` + or None + """ + return self._find( + _qos_rule_type.QoSRuleType, + rule_type_name, + ignore_missing=ignore_missing, + ) + + def get_qos_rule_type(self, qos_rule_type): + """Get details about single QoS rule type + + :param qos_rule_type: The value can be the name of a QoS policy + rule type or a + :class:`~openstack.network.v2.qos_rule_type.QoSRuleType` + instance. + + :returns: One :class:`~openstack.network.v2.qos_rule_type.QoSRuleType` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_qos_rule_type.QoSRuleType, qos_rule_type) + def qos_rule_types(self, **query): """Return a generator of QoS rule types :param dict query: Optional query parameters to be sent to limit the - resources returned. Valid parameters include: + resources returned. Valid parameters include: * ``type``: The type of the QoS rule type. :returns: A generator of QoS rule type objects :rtype: :class:`~openstack.network.v2.qos_rule_type.QoSRuleType` """ - return self._list(_qos_rule_type.QoSRuleType, paginated=False, **query) + return self._list(_qos_rule_type.QoSRuleType, **query) def delete_quota(self, quota, ignore_missing=True): """Delete a quota (i.e. reset to the default quota) :param quota: The value can be either the ID of a quota or a - :class:`~openstack.network.v2.quota.Quota` instance. - The ID of a quota is the same as the project ID - for the quota. + :class:`~openstack.network.v2.quota.Quota` instance. + The ID of a quota is the same as the project ID + for the quota. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when quota does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent quota. + :class:`~openstack.exceptions.NotFoundException` will be + raised when quota does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent quota. :returns: ``None`` """ self._delete(_quota.Quota, quota, ignore_missing=ignore_missing) - def get_quota(self, quota): + def get_quota(self, quota, details=False): """Get a quota :param quota: The value can be the ID of a quota or a - :class:`~openstack.network.v2.quota.Quota` instance. - The ID of a quota is the same as the project ID - for the quota. + :class:`~openstack.network.v2.quota.Quota` instance. + The ID of a quota is the same as the project ID + for the quota. + :param details: If set to True, details about quota usage will + be returned. :returns: One :class:`~openstack.network.v2.quota.Quota` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._get(_quota.Quota, quota) + if details: + quota_obj = self._get_resource(_quota.Quota, quota) + quota = self._get( + _quota.QuotaDetails, project=quota_obj.id, requires_id=False + ) + else: + quota = self._get(_quota.Quota, quota) + return quota def get_quota_default(self, quota): """Get a default quota :param quota: The value can be the ID of a default quota or a - :class:`~openstack.network.v2.quota.QuotaDefault` - instance. The ID of a default quota is the same - as the project ID for the default quota. + :class:`~openstack.network.v2.quota.QuotaDefault` + instance. The ID of a default quota is the same + as the project ID for the default quota. :returns: One :class:`~openstack.network.v2.quota.QuotaDefault` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ quota_obj = self._get_resource(_quota.Quota, quota) - return self._get(_quota.QuotaDefault, project=quota_obj.project_id) + return self._get( + _quota.QuotaDefault, project=quota_obj.id, requires_id=False + ) def quotas(self, **query): """Return a generator of quotas :param dict query: Optional query parameters to be sent to limit - the resources being returned. Currently no query - parameter is supported. + the resources being returned. Currently no query + parameter is supported. :returns: A generator of quota objects :rtype: :class:`~openstack.network.v2.quota.Quota` """ - return self._list(_quota.Quota, paginated=False, **query) + return self._list(_quota.Quota, **query) def update_quota(self, quota, **attrs): """Update a quota :param quota: Either the ID of a quota or a - :class:`~openstack.network.v2.quota.Quota` instance. - The ID of a quota is the same as the project ID - for the quota. - :param dict attrs: The attributes to update on the quota represented - by ``quota``. + :class:`~openstack.network.v2.quota.Quota` instance. + The ID of a quota is the same as the project ID + for the quota. + :param attrs: The attributes to update on the quota represented + by ``quota``. :returns: The updated quota :rtype: :class:`~openstack.network.v2.quota.Quota` @@ -2027,7 +4075,7 @@ def update_quota(self, quota, **attrs): def create_rbac_policy(self, **attrs): """Create a new RBAC policy from attributes - :param dict attrs: Keyword arguments which will be used to create a + :param attrs: Keyword arguments which will be used to create a :class:`~openstack.network.v2.rbac_policy.RBACPolicy`, comprised of the properties on the RBACPolicy class. @@ -2042,30 +4090,37 @@ def delete_rbac_policy(self, rbac_policy, ignore_missing=True): :param rbac_policy: The value can be either the ID of a RBAC policy or a :class:`~openstack.network.v2.rbac_policy.RBACPolicy` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be + :class:`~openstack.exceptions.NotFoundException` will be raised when the RBAC policy does not exist. When set to ``True``, no exception will be set when attempting to delete a nonexistent RBAC policy. :returns: ``None`` """ - self._delete(_rbac_policy.RBACPolicy, rbac_policy, - ignore_missing=ignore_missing) + self._delete( + _rbac_policy.RBACPolicy, rbac_policy, ignore_missing=ignore_missing + ) - def find_rbac_policy(self, rbac_policy, ignore_missing=True): + def find_rbac_policy(self, rbac_policy, ignore_missing=True, **query): """Find a single RBAC policy :param rbac_policy: The ID of a RBAC policy. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be + :class:`~openstack.exceptions.NotFoundException` will be raised when the resource does not exist. When set to ``True``, None will be returned when attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.rbac_policy.RBACPolicy` or None """ - return self._find(_rbac_policy.RBACPolicy, rbac_policy, - ignore_missing=ignore_missing) + return self._find( + _rbac_policy.RBACPolicy, + rbac_policy, + ignore_missing=ignore_missing, + **query, + ) def get_rbac_policy(self, rbac_policy): """Get a single RBAC policy @@ -2074,7 +4129,7 @@ def get_rbac_policy(self, rbac_policy): :class:`~openstack.network.v2.rbac_policy.RBACPolicy` instance. :returns: One :class:`~openstack.network.v2.rbac_policy.RBACPolicy` - :raises: :class:`~openstack.exceptions.ResourceNotFound` + :raises: :class:`~openstack.exceptions.NotFoundException` when no resource can be found. """ return self._get(_rbac_policy.RBACPolicy, rbac_policy) @@ -2083,27 +4138,27 @@ def rbac_policies(self, **query): """Return a generator of RBAC policies :param dict query: Optional query parameters to be sent to limit - the resources being returned. Available parameters - include: + the resources being returned. Available parameters + include: * ``action``: RBAC policy action * ``object_type``: Type of the object that the RBAC policy affects * ``target_project_id``: ID of the tenant that the RBAC policy - affects + affects * ``project_id``: Owner tenant ID :returns: A generator of rbac objects :rtype: :class:`~openstack.network.v2.rbac_policy.RBACPolicy` """ - return self._list(_rbac_policy.RBACPolicy, paginated=False, **query) + return self._list(_rbac_policy.RBACPolicy, **query) def update_rbac_policy(self, rbac_policy, **attrs): """Update a RBAC policy :param rbac_policy: Either the id of a RBAC policy or a :class:`~openstack.network.v2.rbac_policy.RBACPolicy` instance. - :param dict attrs: The attributes to update on the RBAC policy - represented by ``rbac_policy``. + :param attrs: The attributes to update on the RBAC policy + represented by ``rbac_policy``. :returns: The updated RBAC policy :rtype: :class:`~openstack.network.v2.rbac_policy.RBACPolicy` @@ -2113,53 +4168,63 @@ def update_rbac_policy(self, rbac_policy, **attrs): def create_router(self, **attrs): """Create a new router from attributes - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.router.Router`, - comprised of the properties on the Router class. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.router.Router`, + comprised of the properties on the Router class. :returns: The results of router creation :rtype: :class:`~openstack.network.v2.router.Router` """ return self._create(_router.Router, **attrs) - def delete_router(self, router, ignore_missing=True): + def delete_router(self, router, ignore_missing=True, if_revision=None): """Delete a router :param router: The value can be either the ID of a router or a - :class:`~openstack.network.v2.router.Router` instance. + :class:`~openstack.network.v2.router.Router` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the router does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent router. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the router does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent router. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. :returns: ``None`` """ - self._delete(_router.Router, router, ignore_missing=ignore_missing) + self._delete( + _router.Router, + router, + ignore_missing=ignore_missing, + if_revision=if_revision, + ) - def find_router(self, name_or_id, ignore_missing=True): + def find_router(self, name_or_id, ignore_missing=True, **query): """Find a single router :param name_or_id: The name or ID of a router. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. :returns: One :class:`~openstack.network.v2.router.Router` or None """ - return self._find(_router.Router, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _router.Router, name_or_id, ignore_missing=ignore_missing, **query + ) def get_router(self, router): """Get a single router :param router: The value can be the ID of a router or a - :class:`~openstack.network.v2.router.Router` instance. + :class:`~openstack.network.v2.router.Router` instance. :returns: One :class:`~openstack.network.v2.router.Router` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._get(_router.Router, router) @@ -2167,7 +4232,7 @@ def routers(self, **query): """Return a generator of routers :param dict query: Optional query parameters to be sent to limit - the resources being returned. Valid parameters are: + the resources being returned. Valid parameters are: * ``description``: The description of a router. * ``flavor_id``: The ID of the flavor. @@ -2176,54 +4241,60 @@ def routers(self, **query): * ``is_ha``: The highly-available state of a router * ``name``: Router name * ``project_id``: The ID of the project this router is associated - with. + with. * ``status``: The status of the router. :returns: A generator of router objects :rtype: :class:`~openstack.network.v2.router.Router` """ - return self._list(_router.Router, paginated=False, **query) + return self._list(_router.Router, **query) - def update_router(self, router, **attrs): + def update_router(self, router, if_revision=None, **attrs): """Update a router :param router: Either the id of a router or a - :class:`~openstack.network.v2.router.Router` instance. - :param dict attrs: The attributes to update on the router represented - by ``router``. + :class:`~openstack.network.v2.router.Router` instance. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. + :param attrs: The attributes to update on the router represented + by ``router``. :returns: The updated router :rtype: :class:`~openstack.network.v2.router.Router` """ - return self._update(_router.Router, router, **attrs) + return self._update( + _router.Router, router, if_revision=if_revision, **attrs + ) def add_interface_to_router(self, router, subnet_id=None, port_id=None): """Add Interface to a router :param router: Either the router ID or an instance of - :class:`~openstack.network.v2.router.Router` + :class:`~openstack.network.v2.router.Router` :param subnet_id: ID of the subnet :param port_id: ID of the port :returns: Router with updated interface - :rtype: :class: `~openstack.network.v2.router.Router` + :rtype: :class:`~openstack.network.v2.router.Router` """ body = {} if port_id: body = {'port_id': port_id} else: body = {'subnet_id': subnet_id} - return router.add_interface(self.session, **body) + router = self._get_resource(_router.Router, router) + return router.add_interface(self, **body) - def remove_interface_from_router(self, router, subnet_id=None, - port_id=None): + def remove_interface_from_router( + self, router, subnet_id=None, port_id=None + ): """Remove Interface from a router :param router: Either the router ID or an instance of - :class:`~openstack.network.v2.router.Router` + :class:`~openstack.network.v2.router.Router` :param subnet: ID of the subnet :param port: ID of the port :returns: Router with updated interface - :rtype: :class: `~openstack.network.v2.router.Router` + :rtype: :class:`~openstack.network.v2.router.Router` """ body = {} @@ -2231,681 +4302,3290 @@ def remove_interface_from_router(self, router, subnet_id=None, body = {'port_id': port_id} else: body = {'subnet_id': subnet_id} - return router.remove_interface(self.session, **body) + router = self._get_resource(_router.Router, router) + return router.remove_interface(self, **body) + + def add_extra_routes_to_router(self, router, body): + """Add extra routes to a router + + :param router: Either the router ID or an instance of + :class:`~openstack.network.v2.router.Router` + :param body: The request body as documented in the api-ref. + :returns: Router with updated extra routes + :rtype: :class:`~openstack.network.v2.router.Router` + """ + router = self._get_resource(_router.Router, router) + return router.add_extra_routes(self, body=body) + + def remove_extra_routes_from_router(self, router, body): + """Remove extra routes from a router + + :param router: Either the router ID or an instance of + :class:`~openstack.network.v2.router.Router` + :param body: The request body as documented in the api-ref. + :returns: Router with updated extra routes + :rtype: :class:`~openstack.network.v2.router.Router` + """ + router = self._get_resource(_router.Router, router) + return router.remove_extra_routes(self, body=body) def add_gateway_to_router(self, router, **body): """Add Gateway to a router :param router: Either the router ID or an instance of - :class:`~openstack.network.v2.router.Router` + :class:`~openstack.network.v2.router.Router` :param body: Body with the gateway information :returns: Router with updated interface - :rtype: :class: `~openstack.network.v2.router.Router` + :rtype: :class:`~openstack.network.v2.router.Router` """ - return router.add_gateway(self.session, **body) + router = self._get_resource(_router.Router, router) + return router.add_gateway(self, **body) def remove_gateway_from_router(self, router, **body): """Remove Gateway from a router :param router: Either the router ID or an instance of - :class:`~openstack.network.v2.router.Router` + :class:`~openstack.network.v2.router.Router` :param body: Body with the gateway information :returns: Router with updated interface - :rtype: :class: `~openstack.network.v2.router.Router` + :rtype: :class:`~openstack.network.v2.router.Router` """ - return router.remove_gateway(self.session, **body) - - def create_security_group(self, **attrs): - """Create a new security group from attributes + router = self._get_resource(_router.Router, router) + return router.remove_gateway(self, **body) - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.security_group.SecurityGroup`, - comprised of the properties on the SecurityGroup class. + def add_external_gateways(self, router, body): + """Add router external gateways - :returns: The results of security group creation - :rtype: :class:`~openstack.network.v2.security_group.SecurityGroup` + :param router: Either the router ID or an instance of + :class:`~openstack.network.v2.router.Router` + :param body: Body containing the external_gateways parameter. + :returns: Router with added gateways + :rtype: :class:`~openstack.network.v2.router.Router` """ - return self._create(_security_group.SecurityGroup, **attrs) + router = self._get_resource(_router.Router, router) + return router.add_external_gateways(self, body) - def delete_security_group(self, security_group, ignore_missing=True): - """Delete a security group + def update_external_gateways(self, router, body): + """Update router external gateways - :param security_group: - The value can be either the ID of a security group or a - :class:`~openstack.network.v2.security_group.SecurityGroup` - instance. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the security group does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent security group. + :param router: Either the router ID or an instance of + :class:`~openstack.network.v2.router.Router` + :param body: Body containing the external_gateways parameter. + :returns: Router with updated gateways + :rtype: :class:`~openstack.network.v2.router.Router` + """ + router = self._get_resource(_router.Router, router) + return router.update_external_gateways(self, body) - :returns: ``None`` + def remove_external_gateways(self, router, body): + """Remove router external gateways + + :param router: Either the router ID or an instance of + :class:`~openstack.network.v2.router.Router` + :param body: Body containing the external_gateways parameter. + :returns: Router without the removed gateways + :rtype: :class:`~openstack.network.v2.router.Router` """ - self._delete(_security_group.SecurityGroup, security_group, - ignore_missing=ignore_missing) + router = self._get_resource(_router.Router, router) + return router.remove_external_gateways(self, body) - def find_security_group(self, name_or_id, ignore_missing=True): - """Find a single security group + def routers_hosting_l3_agents(self, router, **query): + """Return a generator of L3 agent hosting a router - :param name_or_id: The name or ID of a security group. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.security_group. - SecurityGroup` or None + :param router: Either the router id or an instance of + :class:`~openstack.network.v2.router.Router` + :param kwargs query: Optional query parameters to be sent to limit + the resources returned + + :returns: A generator of Router L3 Agents + :rtype: :class:`~openstack.network.v2.router.RouterL3Agents` """ - return self._find(_security_group.SecurityGroup, name_or_id, - ignore_missing=ignore_missing) + router = self._get_resource(_router.Router, router) + return self._list(_agent.RouterL3Agent, router_id=router.id, **query) - def get_security_group(self, security_group): - """Get a single security group + def agent_hosted_routers(self, agent, **query): + """Return a generator of routers hosted by a L3 agent - :param security_group: The value can be the ID of a security group or a - :class:`~openstack.network.v2.security_group.SecurityGroup` - instance. + :param agent: Either the agent id of an instance of + :class:`~openstack.network.v2.network_agent.Agent` + :param kwargs query: Optional query parameters to be sent to limit + the resources returned - :returns: One - :class:`~openstack.network.v2.security_group.SecurityGroup` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :returns: A generator of routers + :rtype: :class:`~openstack.network.v2.agent.L3AgentRouters` """ - return self._get(_security_group.SecurityGroup, security_group) + agent = self._get_resource(_agent.Agent, agent) + return self._list(_router.L3AgentRouter, agent_id=agent.id, **query) - def security_groups(self, **query): - """Return a generator of security groups + def add_router_to_agent(self, agent, router): + """Add router to L3 agent - :param dict query: Optional query parameters to be sent to limit - the resources being returned. Valid parameters are: + :param agent: Either the id of an agent + :class:`~openstack.network.v2.agent.Agent` instance + :param router: A router instance + :returns: Agent with attached router + :rtype: :class:`~openstack.network.v2.agent.Agent` + """ + agent = self._get_resource(_agent.Agent, agent) + router = self._get_resource(_router.Router, router) + return agent.add_router_to_agent(self, router.id) - * ``description``: Security group description - * ``name``: The name of a security group - * ``project_id``: The ID of the project this security group is - associated with. + def remove_router_from_agent(self, agent, router): + """Remove router from L3 agent - :returns: A generator of security group objects - :rtype: :class:`~openstack.network.v2.security_group.SecurityGroup` + :param agent: Either the id of an agent or an + :class:`~openstack.network.v2.agent.Agent` instance + :param router: A router instance + :returns: Agent with removed router + :rtype: :class:`~openstack.network.v2.agent.Agent` """ - return self._list(_security_group.SecurityGroup, paginated=False, - **query) + agent = self._get_resource(_agent.Agent, agent) + router = self._get_resource(_router.Router, router) + return agent.remove_router_from_agent(self, router.id) - def update_security_group(self, security_group, **attrs): - """Update a security group + def create_ndp_proxy(self, **attrs): + """Create a new ndp proxy from attributes - :param security_group: Either the id of a security group or a - :class:`~openstack.network.v2.security_group.SecurityGroup` - instance. - :param dict attrs: The attributes to update on the security group - represented by ``security_group``. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.ndp_proxy.NDPProxxy`, + comprised of the properties on the NDPProxy class. - :returns: The updated security group - :rtype: :class:`~openstack.network.v2.security_group.SecurityGroup` + :returns: The results of ndp proxy creation + :rtype: :class:`~openstack.network.v2.ndp_proxy.NDPProxxy` """ - return self._update(_security_group.SecurityGroup, security_group, - **attrs) - - def security_group_open_port(self, sgid, port, protocol='tcp'): - rule = { - 'direction': 'ingress', - 'remote_ip_prefix': '0.0.0.0/0', - 'protocol': protocol, - 'port_range_max': port, - 'port_range_min': port, - 'security_group_id': sgid, - 'ethertype': 'IPv4' - } - return self.create_security_group_rule(**rule) - - def security_group_allow_ping(self, sgid): - rule = { - 'direction': 'ingress', - 'remote_ip_prefix': '0.0.0.0/0', - 'protocol': 'icmp', - 'port_range_max': None, - 'port_range_min': None, - 'security_group_id': sgid, - 'ethertype': 'IPv4' - } - return self.create_security_group_rule(**rule) + return self._create(_ndp_proxy.NDPProxy, **attrs) - def create_security_group_rule(self, **attrs): - """Create a new security group rule from attributes + def get_ndp_proxy(self, ndp_proxy): + """Get a single ndp proxy - :param dict attrs: Keyword arguments which will be used to create a - :class:`~openstack.network.v2.security_group_rule. - SecurityGroupRule`, comprised of the properties on the - SecurityGroupRule class. + :param ndp_proxy: The value can be the ID of a ndp proxy + or a :class:`~openstack.network.v2.ndp_proxy.NDPProxy` + instance. - :returns: The results of security group rule creation - :rtype: :class:`~openstack.network.v2.security_group_rule.\ - SecurityGroupRule` + :returns: One + :class:`~openstack.network.v2.ndp_proxy.NDPProxy` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._create(_security_group_rule.SecurityGroupRule, **attrs) + return self._get(_ndp_proxy.NDPProxy, ndp_proxy) - def delete_security_group_rule(self, security_group_rule, - ignore_missing=True): - """Delete a security group rule + def find_ndp_proxy(self, ndp_proxy_id, ignore_missing=True, **query): + """Find a single ndp proxy - :param security_group_rule: - The value can be either the ID of a security group rule - or a :class:`~openstack.network.v2.security_group_rule. - SecurityGroupRule` instance. + :param ndp_proxy_id: The ID of a ndp proxy. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: + One :class:`~openstack.network.v2.ndp_proxy.NDPProxy` or None + """ + return self._find( + _ndp_proxy.NDPProxy, + ndp_proxy_id, + ignore_missing=ignore_missing, + **query, + ) + + def delete_ndp_proxy(self, ndp_proxy, ignore_missing=True): + """Delete a ndp proxy + + :param ndp_proxy: The value can be the ID of a ndp proxy + or a :class:`~openstack.network.v2.ndp_proxy.NDPProxy` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the security group rule does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent security group rule. + :class:`~openstack.exceptions.NotFoundException` will be raised + when the router does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent ndp proxy. :returns: ``None`` """ - self._delete(_security_group_rule.SecurityGroupRule, - security_group_rule, ignore_missing=ignore_missing) - - def find_security_group_rule(self, name_or_id, ignore_missing=True): - """Find a single security group rule + self._delete( + _ndp_proxy.NDPProxy, ndp_proxy, ignore_missing=ignore_missing + ) - :param str name_or_id: The ID of a security group rule. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.security_group_rule. - SecurityGroupRule` or None - """ - return self._find(_security_group_rule.SecurityGroupRule, - name_or_id, ignore_missing=ignore_missing) + def ndp_proxies(self, **query): + """Return a generator of ndp proxies - def get_security_group_rule(self, security_group_rule): - """Get a single security group rule + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Valid parameters are: - :param security_group_rule: - The value can be the ID of a security group rule or a - :class:`~openstack.network.v2.security_group_rule.\ - SecurityGroupRule` instance. + * ``router_id``: The ID fo the router + * ``port_id``: The ID of internal port. + * ``ip_address``: The internal IP address - :returns: :class:`~openstack.network.v2.security_group_rule.\ - SecurityGroupRule` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :returns: A generator of port forwarding objects + :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` """ - return self._get(_security_group_rule.SecurityGroupRule, - security_group_rule) - - def security_group_rules(self, **query): - """Return a generator of security group rules + return self._list(_ndp_proxy.NDPProxy, paginated=False, **query) - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. Available parameters include: + def update_ndp_proxy(self, ndp_proxy, **attrs): + """Update a ndp proxy - * ``description``: The security group rule description - * ``direction``: Security group rule direction - * ``ether_type``: Must be IPv4 or IPv6, and addresses represented - in CIDR must match the ingress or egress rule. - * ``project_id``: The ID of the project this security group rule - is associated with. - * ``protocol``: Security group rule protocol - * ``remote_group_id``: ID of a remote security group - * ``security_group_id``: ID of security group that owns the rules + :param ndp_proxy: The value can be the ID of a ndp proxy or a + :class:`~openstack.network.v2.ndp_proxy.NDPProxy` instance. + :param attrs: The attributes to update on the ip represented + by ``value``. - :returns: A generator of security group rule objects - :rtype: :class:`~openstack.network.v2.security_group_rule. - SecurityGroupRule` + :returns: The updated ndp_proxy + :rtype: :class:`~openstack.network.v2.ndp_proxy.NDPProxy` """ - return self._list(_security_group_rule.SecurityGroupRule, - paginated=False, **query) + return self._update(_ndp_proxy.NDPProxy, ndp_proxy, **attrs) - def create_segment(self, **attrs): - """Create a new segment from attributes + def create_firewall_group(self, **attrs): + """Create a new firewall group from attributes - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.segment.Segment`, - comprised of the properties on the Segment class. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.firewall_group.FirewallGroup`, + comprised of the properties on the FirewallGroup class. - :returns: The results of segment creation - :rtype: :class:`~openstack.network.v2.segment.Segment` + :returns: The results of firewall group creation + :rtype: :class:`~openstack.network.v2.firewall_group.FirewallGroup` """ - return self._create(_segment.Segment, **attrs) + return self._create(_firewall_group.FirewallGroup, **attrs) - def delete_segment(self, segment, ignore_missing=True): - """Delete a segment + def delete_firewall_group(self, firewall_group, ignore_missing=True): + """Delete a firewall group - :param segment: The value can be either the ID of a segment or a - :class:`~openstack.network.v2.segment.Segment` - instance. + :param firewall_group: + The value can be either the ID of a firewall group or a + :class:`~openstack.network.v2.firewall_group.FirewallGroup` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the segment does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent segment. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the firewall group does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent firewall group. :returns: ``None`` """ - self._delete(_segment.Segment, segment, ignore_missing=ignore_missing) + self._delete( + _firewall_group.FirewallGroup, + firewall_group, + ignore_missing=ignore_missing, + ) - def find_segment(self, name_or_id, ignore_missing=True): - """Find a single segment + def find_firewall_group(self, name_or_id, ignore_missing=True, **query): + """Find a single firewall group - :param name_or_id: The name or ID of a segment. + :param name_or_id: The name or ID of a firewall group. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.segment.Segment` or None + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.firewall_group.FirewallGroup` or None """ - return self._find(_segment.Segment, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _firewall_group.FirewallGroup, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) - def get_segment(self, segment): - """Get a single segment + def get_firewall_group(self, firewall_group): + """Get a single firewall group - :param segment: The value can be the ID of a segment or a - :class:`~openstack.network.v2.segment.Segment` - instance. + :param firewall_group: The value can be the ID of a firewall group or a + :class:`~openstack.network.v2.firewall_group.FirewallGroup` + instance. - :returns: One :class:`~openstack.network.v2.segment.Segment` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :returns: One + :class:`~openstack.network.v2.firewall_group.FirewallGroup` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._get(_segment.Segment, segment) - - def segments(self, **query): - """Return a generator of segments + return self._get(_firewall_group.FirewallGroup, firewall_group) - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. Available parameters include: + def firewall_groups(self, **query): + """Return a generator of firewall_groups - * ``description``: The segment description - * ``name``: Name of the segments - * ``network_id``: ID of the network that owns the segments - * ``network_type``: Network type for the segments - * ``physical_network``: Physical network name for the segments - * ``segmentation_id``: Segmentation ID for the segments + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Valid parameters are: + + * ``description``: Firewall group description + * ``egress_policy_id``: The ID of egress firewall policy + * ``ingress_policy_id``: The ID of ingress firewall policy + * ``name``: The name of a firewall group + * ``shared``: Indicates whether this firewall group is shared + across all projects. + * ``status``: The status of the firewall group. Valid values are + ACTIVE, INACTIVE, ERROR, PENDING_UPDATE, or + PENDING_DELETE. + * ``ports``: A list of the IDs of the ports associated with the + firewall group. + * ``project_id``: The ID of the project this firewall group is + associated with. + + :returns: A generator of firewall group objects + """ + return self._list(_firewall_group.FirewallGroup, **query) + + def update_firewall_group(self, firewall_group, **attrs): + """Update a firewall group + + :param firewall_group: Either the id of a firewall group or a + :class:`~openstack.network.v2.firewall_group.FirewallGroup` + instance. + :param attrs: The attributes to update on the firewall group + represented by ``firewall_group``. - :returns: A generator of segment objects - :rtype: :class:`~openstack.network.v2.segment.Segment` + :returns: The updated firewall group + :rtype: :class:`~openstack.network.v2.firewall_group.FirewallGroup` """ - return self._list(_segment.Segment, paginated=False, **query) + return self._update( + _firewall_group.FirewallGroup, firewall_group, **attrs + ) - def update_segment(self, segment, **attrs): - """Update a segment + def create_firewall_policy(self, **attrs): + """Create a new firewall policy from attributes - :param segment: Either the id of a segment or a - :class:`~openstack.network.v2.segment.Segment` - instance. - :attrs kwargs: The attributes to update on the segment represented - by ``value``. + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.firewall_policy.FirewallPolicy`, + comprised of the properties on the FirewallPolicy class. - :returns: The update segment - :rtype: :class:`~openstack.network.v2.segment.Segment` + :returns: The results of firewall policy creation + :rtype: :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` """ - return self._update(_segment.Segment, segment, **attrs) - - def service_providers(self, **query): - """Return a generator of service providers + return self._create(_firewall_policy.FirewallPolicy, **attrs) - :param kwargs \*\* query: Optional query parameters to be sent to limit - the resources being returned. + def delete_firewall_policy(self, firewall_policy, ignore_missing=True): + """Delete a firewall policy + + :param firewall_policy: + The value can be either the ID of a firewall policy or a + :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the firewall policy does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent firewall policy. + + :returns: ``None`` + """ + self._delete( + _firewall_policy.FirewallPolicy, + firewall_policy, + ignore_missing=ignore_missing, + ) + + def find_firewall_policy(self, name_or_id, ignore_missing=True, **query): + """Find a single firewall policy + + :param name_or_id: The name or ID of a firewall policy. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` + or None + """ + return self._find( + _firewall_policy.FirewallPolicy, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_firewall_policy(self, firewall_policy): + """Get a single firewall policy + + :param firewall_policy: The value can be the ID of a firewall policy + or a + :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` + instance. + + :returns: One + :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_firewall_policy.FirewallPolicy, firewall_policy) + + def firewall_policies(self, **query): + """Return a generator of firewall_policies + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Valid parameters are: + + * ``description``: Firewall policy description + * ``firewall_rule``: A list of the IDs of the firewall rules + associated with the firewall policy. + * ``name``: The name of a firewall policy + * ``shared``: Indicates whether this firewall policy is shared + across all projects. + * ``project_id``: The ID of the project that owns the resource. + + :returns: A generator of firewall policy objects + """ + return self._list(_firewall_policy.FirewallPolicy, **query) + + def update_firewall_policy(self, firewall_policy, **attrs): + """Update a firewall policy + + :param firewall_policy: Either the id of a firewall policy or a + :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` + instance. + :param attrs: The attributes to update on the firewall policy + represented by ``firewall_policy``. + + :returns: The updated firewall policy + :rtype: :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` + """ + return self._update( + _firewall_policy.FirewallPolicy, firewall_policy, **attrs + ) + + def insert_rule_into_policy( + self, + firewall_policy_id, + firewall_rule_id, + insert_after=None, + insert_before=None, + ): + """Insert a firewall_rule into a firewall_policy in order + + :param firewall_policy_id: The ID of the firewall policy. + :param firewall_rule_id: The ID of the firewall rule. + :param insert_after: The ID of the firewall rule to insert the new + rule after. It will be worked only when + insert_before is none. + :param insert_before: The ID of the firewall rule to insert the new + rule before. + + :returns: The updated firewall policy + :rtype: :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` + """ + body = { + 'firewall_rule_id': firewall_rule_id, + 'insert_after': insert_after, + 'insert_before': insert_before, + } + policy = self._get_resource( + _firewall_policy.FirewallPolicy, firewall_policy_id + ) + return policy.insert_rule(self, **body) + + def remove_rule_from_policy(self, firewall_policy_id, firewall_rule_id): + """Remove a firewall_rule from a firewall_policy. + + :param firewall_policy_id: The ID of the firewall policy. + :param firewall_rule_id: The ID of the firewall rule. + + :returns: The updated firewall policy + :rtype: :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` + """ + body = {'firewall_rule_id': firewall_rule_id} + policy = self._get_resource( + _firewall_policy.FirewallPolicy, firewall_policy_id + ) + return policy.remove_rule(self, **body) + + def create_firewall_rule(self, **attrs): + """Create a new firewall rule from attributes + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.firewall_rule.FirewallRule`, + comprised of the properties on the FirewallRule class. + + :returns: The results of firewall rule creation + :rtype: :class:`~openstack.network.v2.firewall_rule.FirewallRule` + """ + return self._create(_firewall_rule.FirewallRule, **attrs) + + def delete_firewall_rule(self, firewall_rule, ignore_missing=True): + """Delete a firewall rule + + :param firewall_rule: + The value can be either the ID of a firewall rule or a + :class:`~openstack.network.v2.firewall_rule.FirewallRule` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the firewall rule does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent firewall rule. + + :returns: ``None`` + """ + self._delete( + _firewall_rule.FirewallRule, + firewall_rule, + ignore_missing=ignore_missing, + ) + + def find_firewall_rule(self, name_or_id, ignore_missing=True, **query): + """Find a single firewall rule + + :param name_or_id: The name or ID of a firewall rule. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.firewall_rule.FirewallRule` + or None + """ + return self._find( + _firewall_rule.FirewallRule, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_firewall_rule(self, firewall_rule): + """Get a single firewall rule + + :param firewall_rule: The value can be the ID of a firewall rule or a + :class:`~openstack.network.v2.firewall_rule.FirewallRule` + instance. + + :returns: One + :class:`~openstack.network.v2.firewall_rule.FirewallRule` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_firewall_rule.FirewallRule, firewall_rule) + + def firewall_rules(self, **query): + """Return a generator of firewall_rules + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Valid parameters are: + + * ``action``: The action that the API performs on traffic that + matches the firewall rule. + * ``description``: Firewall rule description + * ``name``: The name of a firewall group + * ``destination_ip_address``: The destination IPv4 or IPv6 address + or CIDR for the firewall rule. + * ``destination_port``: The destination port or port range for + the firewall rule. + * ``enabled``: Facilitates selectively turning off rules. + * ``shared``: Indicates whether this firewall group is shared + across all projects. + * ``ip_version``: The IP protocol version for the firewall rule. + * ``protocol``: The IP protocol for the firewall rule. + * ``source_ip_address``: The source IPv4 or IPv6 address or CIDR + for the firewall rule. + * ``source_port``: The source port or port range for the firewall + rule. + * ``project_id``: The ID of the project this firewall group is + associated with. + + :returns: A generator of firewall rule objects + """ + return self._list(_firewall_rule.FirewallRule, **query) + + def update_firewall_rule(self, firewall_rule, **attrs): + """Update a firewall rule + + :param firewall_rule: Either the id of a firewall rule or a + :class:`~openstack.network.v2.firewall_rule.FirewallRule` + instance. + :param attrs: The attributes to update on the firewall rule + represented by ``firewall_rule``. + + :returns: The updated firewall rule + :rtype: :class:`~openstack.network.v2.firewall_rule.FirewallRule` + """ + return self._update( + _firewall_rule.FirewallRule, firewall_rule, **attrs + ) + + def create_security_group(self, **attrs): + """Create a new security group from attributes + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.security_group.SecurityGroup`, + comprised of the properties on the SecurityGroup class. + + :returns: The results of security group creation + :rtype: :class:`~openstack.network.v2.security_group.SecurityGroup` + """ + return self._create(_security_group.SecurityGroup, **attrs) + + def delete_security_group( + self, security_group, ignore_missing=True, if_revision=None + ): + """Delete a security group + + :param security_group: + The value can be either the ID of a security group or a + :class:`~openstack.network.v2.security_group.SecurityGroup` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the security group does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent security group. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. + + :returns: ``None`` + """ + self._delete( + _security_group.SecurityGroup, + security_group, + ignore_missing=ignore_missing, + if_revision=if_revision, + ) + + def find_security_group(self, name_or_id, ignore_missing=True, **query): + """Find a single security group + + :param name_or_id: The name or ID of a security group. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.security_group.SecurityGroup` + or None + """ + return self._find( + _security_group.SecurityGroup, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_security_group(self, security_group): + """Get a single security group + + :param security_group: The value can be the ID of a security group or a + :class:`~openstack.network.v2.security_group.SecurityGroup` + instance. + + :returns: One + :class:`~openstack.network.v2.security_group.SecurityGroup` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_security_group.SecurityGroup, security_group) + + def security_groups(self, **query): + """Return a generator of security groups + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Valid parameters are: + + * ``description``: Security group description + * ``ìd``: The id of a security group, or list of security group ids + * ``name``: The name of a security group + * ``project_id``: The ID of the project this security group is + associated with. + + :returns: A generator of security group objects + :rtype: :class:`~openstack.network.v2.security_group.SecurityGroup` + """ + return self._list(_security_group.SecurityGroup, **query) + + def update_security_group(self, security_group, if_revision=None, **attrs): + """Update a security group + + :param security_group: Either the id of a security group or a + :class:`~openstack.network.v2.security_group.SecurityGroup` + instance. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. + :param attrs: The attributes to update on the security group + represented by ``security_group``. + + :returns: The updated security group + :rtype: :class:`~openstack.network.v2.security_group.SecurityGroup` + """ + return self._update( + _security_group.SecurityGroup, + security_group, + if_revision=if_revision, + **attrs, + ) + + def create_security_group_rule(self, **attrs): + """Create a new security group rule from attributes + + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule`, + comprised of the properties on the + SecurityGroupRule class. + + :returns: The results of security group rule creation + :rtype: + :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` + """ + return self._create(_security_group_rule.SecurityGroupRule, **attrs) + + def create_security_group_rules(self, data): + """Create new security group rules from the list of attributes + + :param list data: List of dicts of attributes which will be used to + create a + :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule`, + comprised of the properties on the SecurityGroupRule + class. + + :returns: A generator of security group rule objects + :rtype: + :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` + """ + return self._bulk_create(_security_group_rule.SecurityGroupRule, data) + + def delete_security_group_rule( + self, security_group_rule, ignore_missing=True, if_revision=None + ): + """Delete a security group rule + + :param security_group_rule: + The value can be either the ID of a security group rule + or a + :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the security group rule does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent security group rule. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. + + :returns: ``None`` + """ + self._delete( + _security_group_rule.SecurityGroupRule, + security_group_rule, + ignore_missing=ignore_missing, + if_revision=if_revision, + ) + + def find_security_group_rule( + self, name_or_id, ignore_missing=True, **query + ): + """Find a single security group rule + + :param str name_or_id: The ID of a security group rule. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` + or None + """ + return self._find( + _security_group_rule.SecurityGroupRule, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_security_group_rule(self, security_group_rule): + """Get a single security group rule + + :param security_group_rule: + The value can be the ID of a security group rule or a + :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` + instance. + + :returns: + :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _security_group_rule.SecurityGroupRule, security_group_rule + ) + + def security_group_rules(self, **query): + """Return a generator of security group rules + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. Available parameters include: + + * ``description``: The security group rule description + * ``direction``: Security group rule direction + * ``ether_type``: Must be IPv4 or IPv6, and addresses represented + in CIDR must match the ingress or egress rule. + * ``project_id``: The ID of the project this security group rule + is associated with. + * ``protocol``: Security group rule protocol + * ``remote_group_id``: ID of a remote security group + * ``security_group_id``: ID of security group that owns the rules + + :returns: A generator of security group rule objects + :rtype: + :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` + """ + return self._list(_security_group_rule.SecurityGroupRule, **query) + + def create_default_security_group_rule(self, **attrs): + """Create a new default security group rule from attributes + + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.default_security_group_rule. + DefaultSecurityGroupRule`, + comprised of the properties on the DefaultSecurityGroupRule class. + + :returns: The results of default security group rule creation + :rtype: + :class:`~openstack.network.v2.default_security_group_rule. + DefaultSecurityGroupRule` + """ + return self._create( + _default_security_group_rule.DefaultSecurityGroupRule, **attrs + ) + + def delete_default_security_group_rule( + self, + default_security_group_rule, + ignore_missing=True, + ): + """Delete a default security group rule + + :param default_security_group_rule: + The value can be either the ID of a default security group rule + or a + :class:`~openstack.network.v2.default_security_group_rule. + DefaultSecurityGroupRule` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the defaul security group rule does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent default security group rule. + + :returns: ``None`` + """ + self._delete( + _default_security_group_rule.DefaultSecurityGroupRule, + default_security_group_rule, + ignore_missing=ignore_missing, + ) + + def find_default_security_group_rule( + self, name_or_id, ignore_missing=True, **query + ): + """Find a single default security group rule + + :param str name_or_id: The ID of a default security group rule. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.default_security_group_rule. + DefaultSecurityGroupRule` or None + """ + return self._find( + _default_security_group_rule.DefaultSecurityGroupRule, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_default_security_group_rule(self, default_security_group_rule): + """Get a single default security group rule + + :param default_security_group_rule: + The value can be the ID of a default security group rule or a + :class:`~openstack.network.v2.default_security_group_rule. + DefaultSecurityGroupRule` instance. + + :returns: + :class:`~openstack.network.v2.default_security_group_rule. + DefaultSecurityGroupRule` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _default_security_group_rule.DefaultSecurityGroupRule, + default_security_group_rule, + ) + + def default_security_group_rules(self, **query): + """Return a generator of default security group rules + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. Available parameters include: + + * ``description``: The default security group rule description + * ``direction``: Default security group rule direction + * ``ether_type``: Must be IPv4 or IPv6, and addresses represented + in CIDR must match the ingress or egress rule. + * ``protocol``: Default security group rule protocol + * ``remote_group_id``: ID of a remote security group + + :returns: A generator of default security group rule objects + :rtype: + :class:`~openstack.network.v2.default_security_group_rule. + DefaultSecurityGroupRule` + """ + return self._list( + _default_security_group_rule.DefaultSecurityGroupRule, **query + ) + + def create_segment(self, **attrs): + """Create a new segment from attributes + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.segment.Segment`, + comprised of the properties on the Segment class. + + :returns: The results of segment creation + :rtype: :class:`~openstack.network.v2.segment.Segment` + """ + return self._create(_segment.Segment, **attrs) + + def delete_segment(self, segment, ignore_missing=True): + """Delete a segment + + :param segment: The value can be either the ID of a segment or a + :class:`~openstack.network.v2.segment.Segment` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the segment does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent segment. + + :returns: ``None`` + """ + self._delete(_segment.Segment, segment, ignore_missing=ignore_missing) + + def find_segment(self, name_or_id, ignore_missing=True, **query): + """Find a single segment + + :param name_or_id: The name or ID of a segment. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One :class:`~openstack.network.v2.segment.Segment` or None + """ + return self._find( + _segment.Segment, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_segment(self, segment): + """Get a single segment + + :param segment: The value can be the ID of a segment or a + :class:`~openstack.network.v2.segment.Segment` + instance. + + :returns: One :class:`~openstack.network.v2.segment.Segment` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_segment.Segment, segment) + + def segments(self, **query): + """Return a generator of segments + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. Available parameters include: + + * ``description``: The segment description + * ``name``: Name of the segments + * ``network_id``: ID of the network that owns the segments + * ``network_type``: Network type for the segments + * ``physical_network``: Physical network name for the segments + * ``segmentation_id``: Segmentation ID for the segments + + :returns: A generator of segment objects + :rtype: :class:`~openstack.network.v2.segment.Segment` + """ + return self._list(_segment.Segment, **query) + + def update_segment(self, segment, **attrs): + """Update a segment + + :param segment: Either the id of a segment or a + :class:`~openstack.network.v2.segment.Segment` instance. + :param attrs: The attributes to update on the segment represented + by ``segment``. + + :returns: The update segment + :rtype: :class:`~openstack.network.v2.segment.Segment` + """ + return self._update(_segment.Segment, segment, **attrs) + + def service_providers(self, **query): + """Return a generator of service providers + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of service provider objects :rtype: :class:`~openstack.network.v2.service_provider.ServiceProvider` """ - return self._list(_service_provider.ServiceProvider, - paginated=False, **query) + return self._list(_service_provider.ServiceProvider, **query) + + def create_service_profile(self, **attrs): + """Create a new network service flavor profile from attributes + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.service_profile.ServiceProfile`, + comprised of the properties on the ServiceProfile + class. + + :returns: The results of service profile creation + :rtype: :class:`~openstack.network.v2.service_profile.ServiceProfile` + """ + return self._create(_service_profile.ServiceProfile, **attrs) + + def delete_service_profile(self, service_profile, ignore_missing=True): + """Delete a network service flavor profile + + :param service_profile: The value can be either the ID of a service + profile or a + :class:`~openstack.network.v2.service_profile.ServiceProfile` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the service profile does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent service profile. + + :returns: ``None`` + """ + self._delete( + _service_profile.ServiceProfile, + service_profile, + ignore_missing=ignore_missing, + ) + + def find_service_profile(self, name_or_id, ignore_missing=True, **query): + """Find a single network service flavor profile + + :param name_or_id: The name or ID of a service profile. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.service_profile.ServiceProfile` + or None + """ + return self._find( + _service_profile.ServiceProfile, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_service_profile(self, service_profile): + """Get a single network service flavor profile + + :param service_profile: The value can be the ID of a service_profile or + a :class:`~openstack.network.v2.service_profile.ServiceProfile` + instance. + + :returns: One + :class:`~openstack.network.v2.service_profile.ServiceProfile` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_service_profile.ServiceProfile, service_profile) + + def service_profiles(self, **query): + """Return a generator of network service flavor profiles + + :param dict query: Optional query parameters to be sent to limit the + resources returned. Available parameters inclue: + + * ``description``: The description of the service flavor profile + * ``driver``: Provider driver for the service flavor profile + * ``is_enabled``: Whether the profile is enabled + * ``project_id``: The owner project ID + + :returns: A generator of service profile objects + :rtype: :class:`~openstack.network.v2.service_profile.ServiceProfile` + """ + return self._list(_service_profile.ServiceProfile, **query) + + def update_service_profile(self, service_profile, **attrs): + """Update a network flavor service profile + + :param service_profile: Either the id of a service profile or a + :class:`~openstack.network.v2.service_profile.ServiceProfile` + instance. + :param attrs: The attributes to update on the service profile + represented by ``service_profile``. + + :returns: The updated service profile + :rtype: :class:`~openstack.network.v2.service_profile.ServiceProfile` + """ + return self._update( + _service_profile.ServiceProfile, service_profile, **attrs + ) + + def create_subnet(self, **attrs): + """Create a new subnet from attributes + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.subnet.Subnet`, + comprised of the properties on the Subnet class. + + :returns: The results of subnet creation + :rtype: :class:`~openstack.network.v2.subnet.Subnet` + """ + return self._create(_subnet.Subnet, **attrs) + + def delete_subnet(self, subnet, ignore_missing=True, if_revision=None): + """Delete a subnet + + :param subnet: The value can be either the ID of a subnet or a + :class:`~openstack.network.v2.subnet.Subnet` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the subnet does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent subnet. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. + + :returns: ``None`` + """ + self._delete( + _subnet.Subnet, + subnet, + ignore_missing=ignore_missing, + if_revision=if_revision, + ) + + def find_subnet(self, name_or_id, ignore_missing=True, **query): + """Find a single subnet + + :param name_or_id: The name or ID of a subnet. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One :class:`~openstack.network.v2.subnet.Subnet` or None + """ + return self._find( + _subnet.Subnet, name_or_id, ignore_missing=ignore_missing, **query + ) + + def get_subnet(self, subnet): + """Get a single subnet + + :param subnet: The value can be the ID of a subnet or a + :class:`~openstack.network.v2.subnet.Subnet` instance. + + :returns: One :class:`~openstack.network.v2.subnet.Subnet` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_subnet.Subnet, subnet) + + def subnets(self, **query): + """Return a generator of subnets + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. Available parameters include: + + * ``cidr``: Subnet CIDR + * ``description``: The subnet description + * ``gateway_ip``: Subnet gateway IP address + * ``ip_version``: Subnet IP address version + * ``ipv6_address_mode``: The IPv6 address mode + * ``ipv6_ra_mode``: The IPv6 router advertisement mode + * ``is_dhcp_enabled``: Subnet has DHCP enabled (boolean) + * ``name``: Subnet name + * ``network_id``: ID of network that owns the subnets + * ``project_id``: Owner tenant ID + * ``subnet_pool_id``: The subnet pool ID from which to obtain a + CIDR. + + :returns: A generator of subnet objects + :rtype: :class:`~openstack.network.v2.subnet.Subnet` + """ + return self._list(_subnet.Subnet, **query) + + def update_subnet(self, subnet, if_revision=None, **attrs): + """Update a subnet + + :param subnet: Either the id of a subnet or a + :class:`~openstack.network.v2.subnet.Subnet` instance. + :param int if_revision: Revision to put in If-Match header of update + request to perform compare-and-swap update. + :param attrs: The attributes to update on the subnet represented + by ``subnet``. + + :returns: The updated subnet + :rtype: :class:`~openstack.network.v2.subnet.Subnet` + """ + return self._update( + _subnet.Subnet, subnet, if_revision=if_revision, **attrs + ) + + def create_subnet_pool(self, **attrs): + """Create a new subnet pool from attributes + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.subnet_pool.SubnetPool`, + comprised of the properties on the SubnetPool class. + + :returns: The results of subnet pool creation + :rtype: :class:`~openstack.network.v2.subnet_pool.SubnetPool` + """ + return self._create(_subnet_pool.SubnetPool, **attrs) + + def delete_subnet_pool(self, subnet_pool, ignore_missing=True): + """Delete a subnet pool + + :param subnet_pool: The value can be either the ID of a subnet pool or + a :class:`~openstack.network.v2.subnet_pool.SubnetPool` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the subnet pool does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent subnet pool. + + :returns: ``None`` + """ + self._delete( + _subnet_pool.SubnetPool, subnet_pool, ignore_missing=ignore_missing + ) + + def find_subnet_pool(self, name_or_id, ignore_missing=True, **query): + """Find a single subnet pool + + :param name_or_id: The name or ID of a subnet pool. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One :class:`~openstack.network.v2.subnet_pool.SubnetPool` + or None + """ + return self._find( + _subnet_pool.SubnetPool, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_subnet_pool(self, subnet_pool): + """Get a single subnet pool + + :param subnet_pool: The value can be the ID of a subnet pool or a + :class:`~openstack.network.v2.subnet_pool.SubnetPool` instance. + + :returns: One :class:`~openstack.network.v2.subnet_pool.SubnetPool` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_subnet_pool.SubnetPool, subnet_pool) + + def subnet_pools(self, **query): + """Return a generator of subnet pools + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. Available parameters include: + + * ``address_scope_id``: Subnet pool address scope ID + * ``description``: The subnet pool description + * ``ip_version``: The IP address family + * ``is_default``: Subnet pool is the default (boolean) + * ``is_shared``: Subnet pool is shared (boolean) + * ``name``: Subnet pool name + * ``project_id``: Owner tenant ID + + :returns: A generator of subnet pool objects + :rtype: :class:`~openstack.network.v2.subnet_pool.SubnetPool` + """ + return self._list(_subnet_pool.SubnetPool, **query) + + def update_subnet_pool(self, subnet_pool, **attrs): + """Update a subnet pool + + :param subnet_pool: Either the ID of a subnet pool or a + :class:`~openstack.network.v2.subnet_pool.SubnetPool` instance. + :param attrs: The attributes to update on the subnet pool + represented by ``subnet_pool``. + + :returns: The updated subnet pool + :rtype: :class:`~openstack.network.v2.subnet_pool.SubnetPool` + """ + return self._update(_subnet_pool.SubnetPool, subnet_pool, **attrs) + + @staticmethod + def _check_tag_support(resource): + try: + # Check 'tags' attribute exists + resource.tags + except AttributeError: + raise exceptions.InvalidRequest( + f'{resource.__class__.__name__} resource does not support tag' + ) + + def get_tags(self, resource): + """Retrieve the tags of a specified resource + + :param resource: :class:`~openstack.resource.Resource` instance. + + :returns: The resource tags list + :rtype: "list" + """ + self._check_tag_support(resource) + return resource.fetch_tags(self).tags + + def set_tags(self, resource, tags): + """Replace tags of a specified resource with specified tags + + :param resource: + :class:`~openstack.resource.Resource` instance. + :param tags: New tags to be set. + :type tags: "list" + + :returns: The updated resource + :rtype: :class:`~openstack.resource.Resource` + """ + self._check_tag_support(resource) + return resource.set_tags(self, tags) + + def add_tags(self, resource, tags): + """Add tags to a specified resource + + :param resource: :class:`~openstack.resource.Resource` instance. + :param tags: New tags to be set. + :type tags: "list" + + :returns: The updated resource + :rtype: :class:`~openstack.resource.Resource` + """ + self._check_tag_support(resource) + return resource.add_tags(self, tags) + + def add_tag(self, resource, tag): + """Add one single tag to a specified resource + + :param resource: :class:`~openstack.resource.Resource` instance. + :param tag: New tag to be set. + :type tag: "str" + + :returns: The updated resource + :rtype: :class:`~openstack.resource.Resource` + """ + self._check_tag_support(resource) + return resource.add_tag(self, tag) + + def remove_tag(self, resource, tag): + """Remove one single tag of a specified resource + + :param resource: :class:`~openstack.resource.Resource` instance. + :param tag: New tag to be set. + :type tag: "str" + + :returns: The updated resource + :rtype: :class:`~openstack.resource.Resource` + """ + self._check_tag_support(resource) + return resource.remove_tag(self, tag) + + def remove_all_tags(self, resource): + """Remove all tags of a specified resource + + :param resource: :class:`~openstack.resource.Resource` instance. + + :returns: The updated resource + :rtype: :class:`~openstack.resource.Resource` + """ + self._check_tag_support(resource) + return resource.remove_all_tags(self) + + def check_tag(self, resource, tag): + """Checks if tag exists on the specified resource + + :param resource: :class:`~openstack.resource.Resource` instance. + :param tag: Tag to be tested + :type tags: "string" + + :returns: If the tag exists in the specified resource + :rtype: bool + """ + self._check_tag_support(resource) + return resource.check_tag(self, tag) + + def create_trunk(self, **attrs): + """Create a new trunk from attributes + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.trunk.Trunk`, + comprised of the properties on the Trunk class. + + :returns: The results of trunk creation + :rtype: :class:`~openstack.network.v2.trunk.Trunk` + """ + return self._create(_trunk.Trunk, **attrs) + + def delete_trunk(self, trunk, ignore_missing=True): + """Delete a trunk + + :param trunk: The value can be either the ID of trunk or a + :class:`openstack.network.v2.trunk.Trunk` instance + + :returns: ``None`` + """ + self._delete(_trunk.Trunk, trunk, ignore_missing=ignore_missing) + + def find_trunk(self, name_or_id, ignore_missing=True, **query): + """Find a single trunk + + :param name_or_id: The name or ID of a trunk. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One :class:`~openstack.network.v2.trunk.Trunk` + or None + """ + return self._find( + _trunk.Trunk, name_or_id, ignore_missing=ignore_missing, **query + ) + + def get_trunk(self, trunk): + """Get a single trunk + + :param trunk: The value can be the ID of a trunk or a + :class:`~openstack.network.v2.trunk.Trunk` instance. + + :returns: One + :class:`~openstack.network.v2.trunk.Trunk` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_trunk.Trunk, trunk) + + def trunks(self, **query): + """Return a generator of trunks + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of trunk objects + :rtype: :class:`~openstack.network.v2.trunk.trunk` + """ + return self._list(_trunk.Trunk, **query) + + def update_trunk(self, trunk, **attrs): + """Update a trunk + + :param trunk: Either the id of a trunk or a + :class:`~openstack.network.v2.trunk.Trunk` instance. + :param attrs: The attributes to update on the trunk + represented by ``trunk``. + + :returns: The updated trunk + :rtype: :class:`~openstack.network.v2.trunk.Trunk` + """ + return self._update(_trunk.Trunk, trunk, **attrs) + + def add_trunk_subports(self, trunk, subports): + """Set sub_ports on trunk + + :param trunk: The value can be the ID of a trunk or a + :class:`~openstack.network.v2.trunk.Trunk` instance. + :param subports: New subports to be set. + :type subports: "list" + + :returns: The updated trunk + :rtype: :class:`~openstack.network.v2.trunk.Trunk` + """ + trunk = self._get_resource(_trunk.Trunk, trunk) + return trunk.add_subports(self, subports) + + def delete_trunk_subports(self, trunk, subports): + """Remove sub_ports from trunk + + :param trunk: The value can be the ID of a trunk or a + :class:`~openstack.network.v2.trunk.Trunk` instance. + :param subports: Subports to be removed. + :type subports: "list" + + :returns: The updated trunk + :rtype: :class:`~openstack.network.v2.trunk.Trunk` + """ + trunk = self._get_resource(_trunk.Trunk, trunk) + return trunk.delete_subports(self, subports) + + def get_trunk_subports(self, trunk): + """Get sub_ports configured on trunk + + :param trunk: The value can be the ID of a trunk or a + :class:`~openstack.network.v2.trunk.Trunk` instance. + + :returns: Trunk sub_ports + :rtype: "list" + """ + trunk = self._get_resource(_trunk.Trunk, trunk) + return trunk.get_subports(self) + + # ========== VPNaas ========== + # ========== VPN Endpoint group ========== + + def create_vpn_endpoint_group(self, **attrs): + """Create a new vpn endpoint group from attributes + + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup`, + comprised of the properties on the VpnEndpointGroup class. + + :returns: The results of vpn endpoint group creation. + :rtype: + :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` + """ + return self._create(_vpn_endpoint_group.VpnEndpointGroup, **attrs) + + def delete_vpn_endpoint_group( + self, vpn_endpoint_group, ignore_missing=True + ): + """Delete a vpn service + + :param vpn_endpoint_group: + The value can be either the ID of a vpn service or a + :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the vpn service does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent vpn service. + + :returns: ``None`` + """ + self._delete( + _vpn_endpoint_group.VpnEndpointGroup, + vpn_endpoint_group, + ignore_missing=ignore_missing, + ) + + def find_vpn_endpoint_group( + self, name_or_id, ignore_missing=True, **query + ): + """Find a single vpn service + + :param name_or_id: The name or ID of a vpn service. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` + or None + """ + return self._find( + _vpn_endpoint_group.VpnEndpointGroup, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_vpn_endpoint_group(self, vpn_endpoint_group): + """Get a single vpn service + + :param vpn_endpoint_group: The value can be the ID of a vpn service + or a + :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` + instance. + + :returns: One + :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _vpn_endpoint_group.VpnEndpointGroup, vpn_endpoint_group + ) + + def vpn_endpoint_groups(self, **query): + """Return a generator of vpn services + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of vpn service objects + :rtype: + :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` + """ + return self._list(_vpn_endpoint_group.VpnEndpointGroup, **query) + + def update_vpn_endpoint_group(self, vpn_endpoint_group, **attrs): + """Update a vpn service + + :param vpn_endpoint_group: Either the id of a vpn service or a + :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` + instance. + :param attrs: The attributes to update on the VPN service + represented by ``vpn_endpoint_group``. + + :returns: The updated vpnservice + :rtype: + :class:`~openstack.network.v2.vpn_endpoint_group.VpnEndpointGroup` + """ + return self._update( + _vpn_endpoint_group.VpnEndpointGroup, vpn_endpoint_group, **attrs + ) + + # ========== IPsec Site Connection ========== + def create_vpn_ipsec_site_connection(self, **attrs): + """Create a new IPsec site connection from attributes + + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection`, + comprised of the properties on the IPSecSiteConnection class. + + :returns: The results of IPsec site connection creation + :rtype: + :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` + """ + return self._create( + _ipsec_site_connection.VpnIPSecSiteConnection, **attrs + ) + + def find_vpn_ipsec_site_connection( + self, name_or_id, ignore_missing=True, **query + ): + """Find a single IPsec site connection + + :param name_or_id: The name or ID of an IPsec site connection. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` + will be raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods such as query filters. + :returns: One + :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` + or None + """ + return self._find( + _ipsec_site_connection.VpnIPSecSiteConnection, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_vpn_ipsec_site_connection(self, ipsec_site_connection): + """Get a single IPsec site connection + + :param ipsec_site_connection: The value can be the ID of an IPsec site + connection or a + :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` + instance. + + :returns: One + :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get( + _ipsec_site_connection.VpnIPSecSiteConnection, + ipsec_site_connection, + ) + + def vpn_ipsec_site_connections(self, **query): + """Return a generator of IPsec site connections + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + :returns: A generator of IPsec site connection objects + :rtype: + :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` + """ + return self._list( + _ipsec_site_connection.VpnIPSecSiteConnection, **query + ) + + def update_vpn_ipsec_site_connection(self, ipsec_site_connection, **attrs): + """Update a IPsec site connection + + :ipsec_site_connection: Either the id of an IPsec site connection or + a + :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` + instance. + :param attrs: The attributes to update on the IPsec site + connection represented by ``ipsec_site_connection``. + + :returns: The updated IPsec site connection + :rtype: + :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` + """ + return self._update( + _ipsec_site_connection.VpnIPSecSiteConnection, + ipsec_site_connection, + **attrs, + ) + + def delete_vpn_ipsec_site_connection( + self, ipsec_site_connection, ignore_missing=True + ): + """Delete a IPsec site connection + + :param ipsec_site_connection: The value can be either the ID of an + IPsec site connection, or a + :class:`~openstack.network.v2.vpn_ipsec_site_connection.VpnIPSecSiteConnection` + instance. + :param bool ignore_missing: + When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the IPsec site connection does not exist. When set to + ``True``, no exception will be set when attempting to delete a + nonexistent IPsec site connection. + + :returns: ``None`` + """ + self._delete( + _ipsec_site_connection.VpnIPSecSiteConnection, + ipsec_site_connection, + ignore_missing=ignore_missing, + ) + + # ========== IKEPolicy ========== + def create_vpn_ike_policy(self, **attrs): + """Create a new ike policy from attributes + + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy`, + comprised of the properties on the VpnIkePolicy class. + + :returns: The results of ike policy creation :rtype: + :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` + """ + return self._create(_ike_policy.VpnIkePolicy, **attrs) + + def find_vpn_ike_policy(self, name_or_id, ignore_missing=True, **query): + """Find a single ike policy + + :param name_or_id: The name or ID of an IKE policy. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` + will be raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods such as query filters. + :returns: One + :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` or None. + """ + return self._find( + _ike_policy.VpnIkePolicy, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_vpn_ike_policy(self, ike_policy): + """Get a single ike policy + + :param ike_policy: The value can be the ID of an IKE policy or a + :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` + instance. + + :returns: One + :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` + :rtype: :class:`~openstack.network.v2.ike_policy.VpnIkePolicy` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + return self._get(_ike_policy.VpnIkePolicy, ike_policy) + + def vpn_ike_policies(self, **query): + """Return a generator of IKE policies + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + :returns: A generator of ike policy objects + :rtype: :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` + """ + return self._list(_ike_policy.VpnIkePolicy, **query) + + def update_vpn_ike_policy(self, ike_policy, **attrs): + """Update an IKE policy + + :ike_policy: Either the IK of an IKE policy or a + :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` + instance. + :param attrs: The attributes to update on the ike policy + represented by ``ike_policy``. + + :returns: The updated ike policy + :rtype: :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` + """ + return self._update(_ike_policy.VpnIkePolicy, ike_policy, **attrs) + + def delete_vpn_ike_policy(self, ike_policy, ignore_missing=True): + """Delete an IKE policy + + :param ike_policy: The value can be either the ID of an ike policy, or + a :class:`~openstack.network.v2.vpn_ike_policy.VpnIkePolicy` + instance. + :param bool ignore_missing: + When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` + will be raised when the ike policy does not exist. + When set to ``True``, no exception will be set when attempting to + delete a nonexistent ike policy. + + :returns: ``None`` + """ + self._delete( + _ike_policy.VpnIkePolicy, ike_policy, ignore_missing=ignore_missing + ) + + # ========== IPSecPolicy ========== + def create_vpn_ipsec_policy(self, **attrs): + """Create a new IPsec policy from attributes + + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy`, + comprised of the properties on the VpnIpsecPolicy class. + + :returns: The results of IPsec policy creation :rtype: + :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` + """ + return self._create(_ipsec_policy.VpnIpsecPolicy, **attrs) + + def find_vpn_ipsec_policy(self, name_or_id, ignore_missing=True, **query): + """Find a single IPsec policy + + :param name_or_id: The name or ID of an IPsec policy. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` + will be raised when the resource does not exist. When set to + ``True``, None will be returned when attempting to find a + nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods such as query filters. + :returns: One + :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` + or None. + """ + return self._find( + _ipsec_policy.VpnIpsecPolicy, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_vpn_ipsec_policy(self, ipsec_policy): + """Get a single IPsec policy + + :param ipsec_policy: The value can be the ID of an IPcec policy or a + :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` + instance. + + :returns: One + :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` + :rtype: :class:`~openstack.network.v2.ipsec_policy.VpnIpsecPolicy` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + return self._get(_ipsec_policy.VpnIpsecPolicy, ipsec_policy) + + def vpn_ipsec_policies(self, **query): + """Return a generator of IPsec policies + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. + + :returns: A generator of IPsec policy objects + :rtype: :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` + """ + return self._list(_ipsec_policy.VpnIpsecPolicy, **query) + + def update_vpn_ipsec_policy(self, ipsec_policy, **attrs): + """Update an IPsec policy + + :ipsec_policy: Either the id of an IPsec policy or a + :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` + instance. + :param attrs: The attributes to update on the IPsec policy + represented by ``ipsec_policy``. + + :returns: The updated IPsec policy + :rtype: :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` + """ + return self._update( + _ipsec_policy.VpnIpsecPolicy, ipsec_policy, **attrs + ) + + def delete_vpn_ipsec_policy(self, ipsec_policy, ignore_missing=True): + """Delete an IPsec policy + + :param ipsec_policy: The value can be either the ID of an IPsec policy, + or a + :class:`~openstack.network.v2.vpn_ipsec_policy.VpnIpsecPolicy` + instance. + :param bool ignore_missing: + When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` + will be raised when the IPsec policy does not exist. When set to + ``True``, no exception will be set when attempting to delete a + nonexistent IPsec policy. + + :returns: ``None`` + """ + self._delete( + _ipsec_policy.VpnIpsecPolicy, + ipsec_policy, + ignore_missing=ignore_missing, + ) + + # ========== VPN Service ========== + def create_vpn_service(self, **attrs): + """Create a new vpn service from attributes + + :param attrs: Keyword arguments which will be used to create + a :class:`~openstack.network.v2.vpn_service.VpnService`, + comprised of the properties on the VpnService class. + + :returns: The results of vpn service creation + :rtype: :class:`~openstack.network.v2.vpn_service.VpnService` + """ + return self._create(_vpn_service.VpnService, **attrs) + + def delete_vpn_service(self, vpn_service, ignore_missing=True): + """Delete a vpn service + + :param vpn_service: + The value can be either the ID of a vpn service or a + :class:`~openstack.network.v2.vpn_service.VpnService` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the vpn service does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent vpn service. + + :returns: ``None`` + """ + self._delete( + _vpn_service.VpnService, vpn_service, ignore_missing=ignore_missing + ) + + def find_vpn_service(self, name_or_id, ignore_missing=True, **query): + """Find a single vpn service + + :param name_or_id: The name or ID of a vpn service. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One :class:`~openstack.network.v2.vpn_service.VpnService` + or None + """ + return self._find( + _vpn_service.VpnService, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_vpn_service(self, vpn_service): + """Get a single vpn service + + :param vpn_service: The value can be the ID of a vpn service or a + :class:`~openstack.network.v2.vpn_service.VpnService` + instance. + + :returns: One + :class:`~openstack.network.v2.vpn_service.VpnService` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_vpn_service.VpnService, vpn_service) + + def vpn_services(self, **query): + """Return a generator of vpn services + + :param dict query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of vpn service objects + :rtype: :class:`~openstack.network.v2.vpn_service.VpnService` + """ + return self._list(_vpn_service.VpnService, **query) + + def update_vpn_service(self, vpn_service, **attrs): + """Update a vpn service + + :param vpn_service: Either the id of a vpn service or a + :class:`~openstack.network.v2.vpn_service.VpnService` instance. + :param attrs: The attributes to update on the VPN service + represented by ``vpn_service``. + + :returns: The updated vpnservice + :rtype: :class:`~openstack.network.v2.vpn_service.VpnService` + """ + return self._update(_vpn_service.VpnService, vpn_service, **attrs) + + def create_floating_ip_port_forwarding(self, floating_ip, **attrs): + """Create a new floating ip port forwarding from attributes + + :param floating_ip: The value can be either the ID of a floating ip + or a :class:`~openstack.network.v2.floating_ip.FloatingIP` + instance. + :param attrs:Keyword arguments which will be used to create + a:class:`~openstack.network.v2.port_forwarding.PortForwarding`, + comprised of the properties on the PortForwarding class. + + :returns: The results of port forwarding creation + :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` + """ + floatingip = self._get_resource(_floating_ip.FloatingIP, floating_ip) + return self._create( + _port_forwarding.PortForwarding, + floatingip_id=floatingip.id, + **attrs, + ) + + def delete_floating_ip_port_forwarding( + self, floating_ip, port_forwarding, ignore_missing=True + ): + """Delete a floating IP port forwarding. + + :param floating_ip: The value can be either the ID of a floating ip + or a :class:`~openstack.network.v2.floating_ip.FloatingIP` + instance. + :param port_forwarding: The value can be either the ID of a port + forwarding or a + :class:`~openstack.network.v2.port_forwarding.PortForwarding` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the floating ip does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent ip. + + :returns: ``None`` + """ + floatingip = self._get_resource(_floating_ip.FloatingIP, floating_ip) + self._delete( + _port_forwarding.PortForwarding, + port_forwarding, + ignore_missing=ignore_missing, + floatingip_id=floatingip.id, + ) + + def find_floating_ip_port_forwarding( + self, floating_ip, port_forwarding_id, ignore_missing=True, **query + ): + """Find a floating ip port forwarding + + :param floating_ip: The value can be the ID of the Floating IP that the + port forwarding belongs or a + :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. + :param port_forwarding_id: The ID of a port forwarding. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.port_forwarding.PortForwarding` + or None + """ + floatingip = self._get_resource(_floating_ip.FloatingIP, floating_ip) + return self._find( + _port_forwarding.PortForwarding, + port_forwarding_id, + ignore_missing=ignore_missing, + floatingip_id=floatingip.id, + **query, + ) + + def get_floating_ip_port_forwarding(self, floating_ip, port_forwarding): + """Get a floating ip port forwarding + + :param floating_ip: The value can be the ID of the Floating IP that the + port forwarding belongs or a + :class:`~openstack.network.v2.floating_ip.FloatingIP` instance. + :param port_forwarding: The value can be the ID of a port forwarding + or a + :class:`~openstack.network.v2.port_forwarding.PortForwarding` + instance. + :returns: One + :class:`~openstack.network.v2.port_forwarding.PortForwarding` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + floatingip = self._get_resource(_floating_ip.FloatingIP, floating_ip) + return self._get( + _port_forwarding.PortForwarding, + port_forwarding, + floatingip_id=floatingip.id, + ) + + def floating_ip_port_forwardings(self, floating_ip, **query): + """Return a generator of floating ip port forwarding - def create_service_profile(self, **attrs): - """Create a new network service flavor profile from attributes + :param floating_ip: The value can be the ID of the Floating IP that the + port forwarding belongs or a + :class:`~openstack.network.v2.floating_ip.FloatingIP` + instance. + :param kwargs **query: Optional query parameters to be sent to limit + the resources being returned. + :returns: A generator of floating ip port forwarding objects + :rtype: + :class:`~openstack.network.v2.port_forwarding.PortForwarding` + """ + floatingip = self._get_resource(_floating_ip.FloatingIP, floating_ip) + return self._list( + _port_forwarding.PortForwarding, + floatingip_id=floatingip.id, + **query, + ) + + def update_floating_ip_port_forwarding( + self, floating_ip, port_forwarding, **attrs + ): + """Update a floating ip port forwarding + + :param floating_ip: The value can be the ID of the Floating IP that the + port forwarding belongs or a + :class:`~openstack.network.v2.floating_ip.FloatingIP` + instance. + :param port_forwarding: Either the id of a floating ip port forwarding + or a + :class:`~openstack.network.v2.port_forwarding.PortForwarding`instance. + :param attrs: The attributes to update on the floating ip port + forwarding represented by ``floating_ip``. + + :returns: The updated floating ip port forwarding + :rtype: :class:`~openstack.network.v2.port_forwarding.PortForwarding` + """ + floatingip = self._get_resource(_floating_ip.FloatingIP, floating_ip) + return self._update( + _port_forwarding.PortForwarding, + port_forwarding, + floatingip_id=floatingip.id, + **attrs, + ) + + def create_conntrack_helper(self, router, **attrs): + """Create a new L3 conntrack helper from attributes - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.service_profile - .ServiceProfile`, - comprised of the properties on the ServiceProfile - class. + :param router: Either the router ID or an instance of + :class:`~openstack.network.v2.router.Router` + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper`, + comprised of the properties on the ConntrackHelper class. - :returns: The results of service profile creation - :rtype: :class:`~openstack.network.v2.service_profile.ServiceProfile` + :returns: The results of conntrack helper creation + :rtype: + :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper` """ - return self._create(_service_profile.ServiceProfile, **attrs) + router = self._get_resource(_router.Router, router) + return self._create( + _l3_conntrack_helper.ConntrackHelper, router_id=router.id, **attrs + ) - def delete_service_profile(self, service_profile, ignore_missing=True): - """Delete a network service flavor profile + def conntrack_helpers(self, router, **query): + """Return a generator of conntrack helpers - :param service_profile: The value can be either the ID of a service - profile or a - :class:`~openstack.network.v2.service_profile - .ServiceProfile` instance. + :param router: Either the router ID or an instance of + :class:`~openstack.network.v2.router.Router` + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. + :returns: A generator of conntrack helper objects + :rtype: + :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper` + """ + router = self._get_resource(_router.Router, router) + return self._list( + _l3_conntrack_helper.ConntrackHelper, router_id=router.id, **query + ) + + def get_conntrack_helper(self, conntrack_helper, router): + """Get a single L3 conntrack helper + + :param conntrack_helper: The value can be the ID of a L3 conntrack + helper or a + :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper`, + instance. + :param router: The value can be the ID of a Router or a + :class:`~openstack.network.v2.router.Router` instance. + + :returns: One + :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + router = self._get_resource(_router.Router, router) + return self._get( + _l3_conntrack_helper.ConntrackHelper, + conntrack_helper, + router_id=router.id, + ) + + def update_conntrack_helper(self, conntrack_helper, router, **attrs): + """Update a L3 conntrack_helper + + :param conntrack_helper: The value can be the ID of a L3 conntrack + helper or a + :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper`, + instance. + :param router: The value can be the ID of a Router or a + :class:`~openstack.network.v2.router.Router` instance. + :param attrs: The attributes to update on the L3 conntrack helper + represented by ``conntrack_helper``. + + :returns: The updated conntrack helper + :rtype: + :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper` + + """ + router = self._get_resource(_router.Router, router) + return self._update( + _l3_conntrack_helper.ConntrackHelper, + conntrack_helper, + router_id=router.id, + **attrs, + ) + + def delete_conntrack_helper( + self, conntrack_helper, router, ignore_missing=True + ): + """Delete a L3 conntrack_helper + + :param conntrack_helper: The value can be the ID of a L3 conntrack + helper or a + :class:`~openstack.network.v2.l3_conntrack_helper.ConntrackHelper`, + instance. + :param router: The value can be the ID of a Router or a + :class:`~openstack.network.v2.router.Router` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the service profile does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent service profile. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the floating ip does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent ip. :returns: ``None`` """ - self._delete(_service_profile.ServiceProfile, service_profile, - ignore_missing=ignore_missing) + router = self._get_resource(_router.Router, router) + self._delete( + _l3_conntrack_helper.ConntrackHelper, + conntrack_helper, + router_id=router.id, + ignore_missing=ignore_missing, + ) + + def create_tap_flow(self, **attrs): + """Create a new Tap Flow from attributes""" + return self._create(_tap_flow.TapFlow, **attrs) + + def delete_tap_flow(self, tap_flow, ignore_missing=True): + """Delete a Tap Flow""" + self._delete( + _tap_flow.TapFlow, tap_flow, ignore_missing=ignore_missing + ) + + def find_tap_flow(self, name_or_id, ignore_missing=True, **query): + """Find a single Tap Service""" + return self._find( + _tap_flow.TapFlow, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_tap_flow(self, tap_flow): + """Get a signle Tap Flow""" + return self._get(_tap_flow.TapFlow, tap_flow) + + def update_tap_flow(self, tap_flow, **attrs): + """Update a Tap Flow""" + return self._update(_tap_flow.TapFlow, tap_flow, **attrs) + + def tap_flows(self, **query): + """Return a generator of Tap Flows""" + return self._list(_tap_flow.TapFlow, **query) + + def create_tap_mirror(self, **attrs): + """Create a new Tap Mirror from attributes""" + return self._create(_tap_mirror.TapMirror, **attrs) + + def delete_tap_mirror(self, tap_mirror, ignore_missing=True): + """Delete a Tap Mirror""" + self._delete( + _tap_mirror.TapMirror, tap_mirror, ignore_missing=ignore_missing + ) + + def find_tap_mirror(self, name_or_id, ignore_missing=True, **query): + """Find a single Tap Mirror""" + return self._find( + _tap_mirror.TapMirror, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_tap_mirror(self, tap_mirror): + """Get a signle Tap Mirror""" + return self._get(_tap_mirror.TapMirror, tap_mirror) + + def update_tap_mirror(self, tap_mirror, **attrs): + """Update a Tap Mirror""" + return self._update(_tap_mirror.TapMirror, tap_mirror, **attrs) + + def tap_mirrors(self, **query): + """Return a generator of Tap Mirrors""" + return self._list(_tap_mirror.TapMirror, **query) + + def create_tap_service(self, **attrs): + """Create a new Tap Service from attributes""" + return self._create(_tap_service.TapService, **attrs) + + def delete_tap_service(self, tap_service, ignore_missing=True): + """Delete a Tap Service""" + self._delete( + _tap_service.TapService, tap_service, ignore_missing=ignore_missing + ) + + def find_tap_service(self, name_or_id, ignore_missing=True, **query): + """Find a single Tap Service""" + return self._find( + _tap_service.TapService, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_tap_service(self, tap_service): + """Get a signle Tap Service""" + return self._get(_tap_service.TapService, tap_service) + + def update_tap_service(self, tap_service, **attrs): + """Update a Tap Service""" + return self._update(_tap_service.TapService, tap_service, **attrs) + + def tap_services(self, **query): + """Return a generator of Tap Services""" + return self._list(_tap_service.TapService, **query) + + def create_sfc_flow_classifier(self, **attrs): + """Create a new Flow Classifier from attributes + + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier`, + comprised of the properties on the SfcFlowClassifier class. + + :returns: The results of SFC Flow Classifier creation + :rtype: + :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier` + """ - def find_service_profile(self, name_or_id, ignore_missing=True): - """Find a single network service flavor profile + return self._create(_sfc_flow_classifier.SfcFlowClassifier, **attrs) - :param name_or_id: The name or ID of a service profile. + def delete_sfc_flow_classifier(self, flow_classifier, ignore_missing=True): + """Delete a Flow Classifier + + :param flow_classifier: + The value can be either the ID of a flow classifier or a + :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.service_profile - .ServiceProfile` or None + :class:`~openstack.exceptions.NotFoundException` will be + raised when the flow classifier does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent flow classifier. + + :returns: ``None`` """ - return self._find(_service_profile.ServiceProfile, name_or_id, - ignore_missing=ignore_missing) + self._delete( + _sfc_flow_classifier.SfcFlowClassifier, + flow_classifier, + ignore_missing=ignore_missing, + ) - def get_service_profile(self, service_profile): - """Get a single network service flavor profile + def find_sfc_flow_classifier( + self, name_or_id, ignore_missing=True, **query + ): + """Find a single Flow Classifier - :param service_profile: The value can be the ID of a service_profile or - a - :class:`~openstack.network.v2.service_profile - .ServiceProfile` instance. + :param str name_or_id: The name or ID of an SFC flow classifier. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.sfc_flow_classifier. + SfcFlowClassifier` or None + """ + return self._find( + _sfc_flow_classifier.SfcFlowClassifier, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_sfc_flow_classifier(self, flow_classifier): + """Get a single Flow Classifier + + :param flow_classifier: + The value can be the ID of an SFC flow classifier or a + :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier` + instance. - :returns: One :class:`~openstack.network.v2.service_profile - .ServiceProfile` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :returns: + :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._get(_service_profile.ServiceProfile, service_profile) - - def service_profiles(self, **query): - """Return a generator of network service flavor profiles + return self._get( + _sfc_flow_classifier.SfcFlowClassifier, flow_classifier + ) - :param dict query: Optional query parameters to be sent to limit the - resources returned. Available parameters inclue: + def update_sfc_flow_classifier(self, flow_classifier, **attrs): + """Update a Flow Classifier - * ``description``: The description of the service flavor profile - * ``driver``: Provider driver for the service flavor profile - * ``is_enabled``: Whether the profile is enabled - * ``project_id``: The owner project ID + :param flow_classifier: The value can be the ID of a Flow Classifier + :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier`, + instance. + :param attrs: The attributes to update on the Flow Classifier - :returns: A generator of service profile objects - :rtype: :class:`~openstack.network.v2.service_profile.ServiceProfile` + :returns: The updated Flow Classifier. + :rtype: + :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier` """ - return self._list(_service_profile.ServiceProfile, paginated=True, - **query) + return self._update( + _sfc_flow_classifier.SfcFlowClassifier, flow_classifier, **attrs + ) - def update_service_profile(self, service_profile, **attrs): - """Update a network flavor service profile + def sfc_flow_classifiers(self, **query): + """Return a generator of Flow Classifiers - :param service_profile: Either the id of a service profile or a - :class:`~openstack.network.v2.service_profile - .ServiceProfile` instance. - :attrs kwargs: The attributes to update on the service profile - represented by ``value``. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. Available parameters include: - :returns: The updated service profile - :rtype: :class:`~openstack.network.v2.service_profile.ServiceProfile` + * ``name``: The name of the flow classifier. + * ``description``: The flow classifier description + * ``ethertype``: Must be IPv4 or IPv6. + * ``protocol``: Flow classifier protocol + + :returns: A generator of SFC Flow classifier objects + :rtype: + :class:`~openstack.network.v2.sfc_flow_classifier. + SfcFlowClassifier` """ - return self._update(_service_profile.ServiceProfile, service_profile, - **attrs) + return self._list(_sfc_flow_classifier.SfcFlowClassifier, **query) - def create_subnet(self, **attrs): - """Create a new subnet from attributes + def create_sfc_port_chain(self, **attrs): + """Create a new Port Chain from attributes - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.subnet.Subnet`, - comprised of the properties on the Subnet class. + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.sfc_port_chain.SfcPortChain`, + comprised of the properties on the SfcPortchain class. - :returns: The results of subnet creation - :rtype: :class:`~openstack.network.v2.subnet.Subnet` + :returns: The results of SFC Port Chain creation + :rtype: + :class:`~openstack.network.v2.sfc_port_chain.SfcPortChain` """ - return self._create(_subnet.Subnet, **attrs) + return self._create(_sfc_port_chain.SfcPortChain, **attrs) - def delete_subnet(self, subnet, ignore_missing=True): - """Delete a subnet + def delete_sfc_port_chain(self, port_chain, ignore_missing=True): + """Delete a Port Chain - :param subnet: The value can be either the ID of a subnet or a - :class:`~openstack.network.v2.subnet.Subnet` instance. + :param port_chain: + The value can be either the ID of a port chain or a + :class:`~openstack.network.v2.sfc_port_chain.SfcPortChain` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the subnet does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent subnet. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the port chain does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent port chain. :returns: ``None`` """ - self._delete(_subnet.Subnet, subnet, ignore_missing=ignore_missing) + self._delete( + _sfc_port_chain.SfcPortChain, + port_chain, + ignore_missing=ignore_missing, + ) - def find_subnet(self, name_or_id, ignore_missing=True): - """Find a single subnet + def find_sfc_port_chain(self, name_or_id, ignore_missing=True, **query): + """Find a single Port Chain - :param name_or_id: The name or ID of a subnet. + :param str name_or_id: The name or ID of an SFC port chain. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.subnet.Subnet` or None + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.sfc_port_chain. + SfcPortChain` or None + """ + return self._find( + _sfc_port_chain.SfcPortChain, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_sfc_port_chain(self, port_chain): + """Get a signle Port Chain + + :param port_chain: + The value can be the ID of an SFC port chain or a + :class:`~openstack.network.v2.sfc_port_chain.SfcPortChain` + instance. + + :returns: + :class:`~openstack.network.v2.sfc_port_chain.SfcPortchain` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._find(_subnet.Subnet, name_or_id, - ignore_missing=ignore_missing) + return self._get(_sfc_port_chain.SfcPortChain, port_chain) - def get_subnet(self, subnet): - """Get a single subnet + def update_sfc_port_chain(self, port_chain, **attrs): + """Update a Port Chain - :param subnet: The value can be the ID of a subnet or a - :class:`~openstack.network.v2.subnet.Subnet` instance. + :param flow_classifier: The value can be the ID of a Flow Classifier + :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier`, + instance. + :param attrs: The attributes to update on the Flow Classifier - :returns: One :class:`~openstack.network.v2.subnet.Subnet` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :returns: The updated Flow Classifier. + :rtype: + :class:`~openstack.network.v2.sfc_flow_classifier.SfcFlowClassifier` """ - return self._get(_subnet.Subnet, subnet) + return self._update(_sfc_port_chain.SfcPortChain, port_chain, **attrs) - def subnets(self, **query): - """Return a generator of subnets + def sfc_port_chains(self, **query): + """Return a generator of Port Chains - :param dict query: Optional query parameters to be sent to limit + :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: - * ``cidr``: Subnet CIDR - * ``description``: The subnet description - * ``gateway_ip``: Subnet gateway IP address - * ``ip_version``: Subnet IP address version - * ``ipv6_address_mode``: The IPv6 adress mode - * ``ipv6_ra_mode``: The IPv6 router advertisement mode - * ``is_dhcp_enabled``: Subnet has DHCP enabled (boolean) - * ``name``: Subnet name - * ``network_id``: ID of network that owns the subnets - * ``project_id``: Owner tenant ID - * ``subnet_pool_id``: The subnet pool ID from which to obtain a - CIDR. + * ``name``: The name of the port chain + * ``description``: The port chain description - :returns: A generator of subnet objects - :rtype: :class:`~openstack.network.v2.subnet.Subnet` + :returns: A generator of SFC port chain objects + :rtype: + :class:`~openstack.network.v2.sfc_port_chain.SfcPortChain` """ - return self._list(_subnet.Subnet, paginated=False, **query) + return self._list(_sfc_port_chain.SfcPortChain, **query) - def update_subnet(self, subnet, **attrs): - """Update a subnet + def create_sfc_port_pair(self, **attrs): + """Create a new Port Pair from attributes - :param subnet: Either the id of a subnet or a - :class:`~openstack.network.v2.subnet.Subnet` instance. - :param dict attrs: The attributes to update on the subnet represented - by ``subnet``. + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair`, + comprised of the properties on the SfcPortPair class. - :returns: The updated subnet - :rtype: :class:`~openstack.network.v2.subnet.Subnet` + :returns: The results of SFC Port Pair creation + :rtype: + :class:`~openstack.network.v2.sfc_port_pair.SfPortPair` """ - return self._update(_subnet.Subnet, subnet, **attrs) + return self._create(_sfc_port_pair.SfcPortPair, **attrs) - def create_subnet_pool(self, **attrs): - """Create a new subnet pool from attributes + def delete_sfc_port_pair(self, port_pair, ignore_missing=True): + """Delete a Port Pair - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.subnet_pool.SubnetPool`, - comprised of the properties on the SubnetPool class. + :param port_pair: + The value can be either the ID of a port pair or a + :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the port pair does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent port pair. - :returns: The results of subnet pool creation - :rtype: :class:`~openstack.network.v2.subnet_pool.SubnetPool` + :returns: ``None`` """ - return self._create(_subnet_pool.SubnetPool, **attrs) + self._delete( + _sfc_port_pair.SfcPortPair, + port_pair, + ignore_missing=ignore_missing, + ) - def delete_subnet_pool(self, subnet_pool, ignore_missing=True): - """Delete a subnet pool + def find_sfc_port_pair(self, name_or_id, ignore_missing=True, **query): + """Find a single Port Pair - :param subnet_pool: The value can be either the ID of a subnet pool or - a :class:`~openstack.network.v2.subnet_pool.SubnetPool` instance. + :param str name_or_id: The name or ID of an SFC port pair. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the subnet pool does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent subnet pool. - - :returns: ``None`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair` or None """ - self._delete(_subnet_pool.SubnetPool, subnet_pool, - ignore_missing=ignore_missing) + return self._find( + _sfc_port_pair.SfcPortPair, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) - def find_subnet_pool(self, name_or_id, ignore_missing=True): - """Find a single subnet pool + def get_sfc_port_pair(self, port_pair): + """Get a signle Port Pair - :param name_or_id: The name or ID of a subnet pool. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.subnet_pool.SubnetPool` - or None + :param port_pair: + The value can be the ID of an SFC port pair or a + :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair` + instance. + + :returns: + :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._find(_subnet_pool.SubnetPool, name_or_id, - ignore_missing=ignore_missing) + return self._get(_sfc_port_pair.SfcPortPair, port_pair) - def get_subnet_pool(self, subnet_pool): - """Get a single subnet pool + def update_sfc_port_pair(self, port_pair, **attrs): + """Update a Port Pair - :param subnet_pool: The value can be the ID of a subnet pool or a - :class:`~openstack.network.v2.subnet_pool.SubnetPool` instance. + :param port_pair: The value can be the ID of a Port Pair + :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair`, + instance. + :param attrs: The attributes to update on the Port Pair - :returns: One :class:`~openstack.network.v2.subnet_pool.SubnetPool` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :returns: The updated Port Pair. + :rtype: + :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair` """ - return self._get(_subnet_pool.SubnetPool, subnet_pool) + return self._update(_sfc_port_pair.SfcPortPair, port_pair, **attrs) - def subnet_pools(self, **query): - """Return a generator of subnet pools + def sfc_port_pairs(self, **query): + """Return a generator of Port Pairs - :param kwargs \*\*query: Optional query parameters to be sent to limit + :param kwargs query: Optional query parameters to be sent to limit the resources being returned. Available parameters include: - * ``address_scope_id``: Subnet pool address scope ID - * ``description``: The subnet pool description - * ``ip_version``: The IP address family - * ``is_default``: Subnet pool is the default (boolean) - * ``is_shared``: Subnet pool is shared (boolean) - * ``name``: Subnet pool name - * ``project_id``: Owner tenant ID + * ``name``: The name of the port pair. + * ``description``: The port pair description. - :returns: A generator of subnet pool objects - :rtype: :class:`~openstack.network.v2.subnet_pool.SubnetPool` + :returns: A generator of SFC port pair objects + :rtype: + :class:`~openstack.network.v2.sfc_port_pair.SfcPortPair` """ - return self._list(_subnet_pool.SubnetPool, paginated=False, **query) + return self._list(_sfc_port_pair.SfcPortPair, **query) - def update_subnet_pool(self, subnet_pool, **attrs): - """Update a subnet pool + def create_sfc_port_pair_group(self, **attrs): + """Create a new Port Pair Group from attributes - :param subnet_pool: Either the ID of a subnet pool or a - :class:`~openstack.network.v2.subnet_pool.SubnetPool` instance. - :param dict attrs: The attributes to update on the subnet pool - represented by ``subnet_pool``. + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.sfc_port_pair_group.SfcPortPairGroup`, + comprised of the properties on the SfcPortPairGroup class. - :returns: The updated subnet pool - :rtype: :class:`~openstack.network.v2.subnet_pool.SubnetPool` + :returns: The results of SFC Port Pair Group creation + :rtype: + :class:`~openstack.network.v2.sfc_port_pair_group.SfcPortPairGroup` """ - return self._update(_subnet_pool.SubnetPool, subnet_pool, **attrs) + return self._create(_sfc_port_pair_group.SfcPortPairGroup, **attrs) - def create_vpn_service(self, **attrs): - """Create a new vpn service from attributes + def delete_sfc_port_pair_group(self, port_pair_group, ignore_missing=True): + """Delete a Port Pair Group - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.network.v2.vpn_service.VPNService`, - comprised of the properties on the VPNService class. + :param port_pair_group: + The value can be either the ID of a port pair group or a + :class:`~openstack.network.v2.sfc_port_pair_group. + SfcPortPairGroup` instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the port pair group does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent port pair group. - :returns: The results of vpn service creation - :rtype: :class:`~openstack.network.v2.vpn_service.VPNService` + :returns: ``None`` """ - return self._create(_vpn_service.VPNService, **attrs) + self._delete( + _sfc_port_pair_group.SfcPortPairGroup, + port_pair_group, + ignore_missing=ignore_missing, + ) - def delete_vpn_service(self, vpn_service, ignore_missing=True): - """Delete a vpn service + def find_sfc_port_pair_group( + self, name_or_id, ignore_missing=True, **query + ): + """Find a single Port Pair Group - :param vpn_service: - The value can be either the ID of a vpn service or a - :class:`~openstack.network.v2.vpn_service.VPNService` instance. + :param str name_or_id: The name or ID of an SFC port pair group. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the vpn service does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent vpn service. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.sfc_port_pair_group. + SfcPortPairGroup` or None + """ + return self._find( + _sfc_port_pair_group.SfcPortPairGroup, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_sfc_port_pair_group(self, port_pair_group): + """Get a signle Port Pair Group + + :param port_pair_group: + The value can be the ID of an SFC port pair group or a + :class:`~openstack.network.v2.sfc_port_pair_group.SfcPortPairGroup` + instance. - :returns: ``None`` + :returns: + :class:`~openstack.network.v2.sfc_port_pair_group.SfcPortPairGroup` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - self._delete(_vpn_service.VPNService, vpn_service, - ignore_missing=ignore_missing) + return self._get( + _sfc_port_pair_group.SfcPortPairGroup, port_pair_group + ) - def find_vpn_service(self, name_or_id, ignore_missing=True): - """Find a single vpn service + def update_sfc_port_pair_group(self, port_pair_group, **attrs): + """Update a Port Pair Group - :param name_or_id: The name or ID of a vpn service. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.network.v2.vpn_service.VPNService` - or None + :param port_pair_group: The value can be the ID of a Port Pair Group + :class:`~openstack.network.v2.sfc_port_pair.SfcPortPairGroup`, + instance. + :param attrs: The attributes to update on the Port Pair Group + + :returns: The updated Port Pair Group. + :rtype: + :class:`~openstack.network.v2.sfc_port_pair_group.SfcPortPairGroup` """ - return self._find(_vpn_service.VPNService, name_or_id, - ignore_missing=ignore_missing) + return self._update( + _sfc_port_pair_group.SfcPortPairGroup, port_pair_group, **attrs + ) - def get_vpn_service(self, vpn_service): - """Get a single vpn service + def sfc_port_pair_groups(self, **query): + """Return a generator of Port Pair Groups - :param vpn_service: The value can be the ID of a vpn service or a - :class:`~openstack.network.v2.vpn_service.VPNService` - instance. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. Available parameters include: - :returns: One - :class:`~openstack.network.v2.vpn_service.VPNService` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + * ``name``: The name of the port pair. + * ``description``: The port pair description. + + :returns: A generator of SFC port pair group objects + :rtype: + :class:`~openstack.network.v2.sfc_port_pair_group. + SfcPortPairGroup` """ - return self._get(_vpn_service.VPNService, vpn_service) + return self._list(_sfc_port_pair_group.SfcPortPairGroup, **query) - def vpn_services(self, **query): - """Return a generator of vpn services + def create_sfc_service_graph(self, **attrs): + """Create a new Service Graph from attributes - :param dict query: Optional query parameters to be sent to limit - the resources being returned. + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph`, + comprised of the properties on the SfcServiceGraph class. - :returns: A generator of vpn service objects - :rtype: :class:`~openstack.network.v2.vpn_service.VPNService` + :returns: The results of SFC Service Graph creation + :rtype: + :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph` """ - return self._list(_vpn_service.VPNService, paginated=False, **query) + return self._create(_sfc_sservice_graph.SfcServiceGraph, **attrs) - def update_vpn_service(self, vpn_service, **attrs): - """Update a vpn service + def delete_sfc_service_graph(self, service_graph, ignore_missing=True): + """Delete a Service Graph - :param vpn_service: Either the id of a vpn service or a - :class:`~openstack.network.v2.vpn_service.VPNService` instance. - :param dict attrs: The attributes to update on the VPN service - represented by ``vpn_service``. + :param service_graph: + The value can be either the ID of a service graph or a + :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the service graph does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent service graph. - :returns: The updated vpnservice - :rtype: :class:`~openstack.network.v2.vpn_service.VPNService` + :returns: ``None`` + """ + self._delete( + _sfc_sservice_graph.SfcServiceGraph, + service_graph, + ignore_missing=ignore_missing, + ) + + def find_sfc_service_graph(self, name_or_id, ignore_missing=True, **query): + """Find a single Service Graph + + :param str name_or_id: The name or ID of an SFC service graph. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + :returns: One + :class:`~openstack.network.v2.sfc_service_graph. + SfcServiceGraph` or None + """ + return self._find( + _sfc_sservice_graph.SfcServiceGraph, + name_or_id, + ignore_missing=ignore_missing, + **query, + ) + + def get_sfc_service_graph(self, service_graph): + """Get a signle Service Graph + + :param service_graph: + The value can be the ID of an SFC service graph or a + :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph` + instance. + + :returns: + :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + return self._get(_sfc_sservice_graph.SfcServiceGraph, service_graph) + + def update_sfc_service_graph(self, service_graph, **attrs): + """Update a Service Graph + + :param service_graph: The value can be the ID of a Service Graph + :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph`, + instance. + :param attrs: The attributes to update on the Service Graph + + :returns: The updated Service Graph. + :rtype: + :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph` """ - return self._update(_vpn_service.VPNService, vpn_service, **attrs) + return self._update( + _sfc_sservice_graph.SfcServiceGraph, service_graph, **attrs + ) + + def sfc_service_graphs(self, **query): + """Return a generator of Service Graphs + + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. Available parameters include: + + * ``name``: The name of the port pair. + * ``description``: The port pair description. + + :returns: A generator of SFC service graph objects + :rtype: + :class:`~openstack.network.v2.sfc_service_graph.SfcServiceGraph` + """ + return self._list(_sfc_sservice_graph.SfcServiceGraph, **query) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) + + def _get_cleanup_dependencies(self): + return {'network': {'before': ['identity']}} + + def _service_cleanup( + self, + dry_run=True, + client_status_queue=None, + identified_resources=None, + filters=None, + resource_evaluation_fn=None, + skip_resources=None, + ): + project_id = self.get_project_id() + + # check if the VPN service plugin is configured + vpn_plugin = list(self.service_providers(service_type="VPN")) + if vpn_plugin: + if not self.should_skip_resource_cleanup( + "vpn_ipsec_site_connection", skip_resources + ): + for obj in self.vpn_ipsec_site_connections(): + self._service_cleanup_del_res( + self.delete_vpn_ipsec_site_connection, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + + if not self.should_skip_resource_cleanup( + "vpn_service", skip_resources + ): + for obj in self.vpn_services(): + self._service_cleanup_del_res( + self.delete_vpn_service, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + + if not self.should_skip_resource_cleanup( + "vpn_endpoint_group", skip_resources + ): + for obj in self.vpn_endpoint_groups(): + self._service_cleanup_del_res( + self.delete_vpn_endpoint_group, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + + if not self.should_skip_resource_cleanup( + "vpn_ike_policy", skip_resources + ): + for obj in self.vpn_ike_policies(): + self._service_cleanup_del_res( + self.delete_vpn_ike_policy, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + + if not self.should_skip_resource_cleanup( + "vpn_ipsec_policy", skip_resources + ): + for obj in self.vpn_ipsec_policies(): + self._service_cleanup_del_res( + self.delete_vpn_ipsec_policy, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + + if not self.should_skip_resource_cleanup( + "floating_ip", skip_resources + ): + # Delete floating_ips in the project if no filters defined OR all + # filters are matching and port_id is empty + for obj in self.ips(project_id=project_id): + self._service_cleanup_del_res( + self.delete_ip, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=fip_cleanup_evaluation, + ) + + if not self.should_skip_resource_cleanup( + "security_group", skip_resources + ): + # Delete (try to delete) all security groups in the project + # Let's hope we can't drop SG in use + for obj in self.security_groups(project_id=project_id): + if obj.name != 'default': + self._service_cleanup_del_res( + self.delete_security_group, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + + if not ( + self.should_skip_resource_cleanup("network", skip_resources) + or self.should_skip_resource_cleanup("router", skip_resources) + or self.should_skip_resource_cleanup("port", skip_resources) + or self.should_skip_resource_cleanup("subnet", skip_resources) + ): + # Networks are crazy, try to delete router+net+subnet + # if there are no "other" ports allocated on the net + for net in self.networks(project_id=project_id): + network_has_ports_allocated = False + router_if = list() + for port in self.ports( + project_id=project_id, network_id=net.id + ): + self.log.debug(f'Looking at port {port}') + if port.device_owner in [ + 'network:router_interface', + 'network:router_interface_distributed', + 'network:ha_router_replicated_interface', + ]: + router_if.append(port) + elif port.device_owner == 'network:dhcp': + # we don't treat DHCP as a real port + continue + elif port.device_owner is None or port.device_owner == '': + # Nobody owns the port - go with it + continue + elif ( + identified_resources + and port.device_id not in identified_resources + ): + # It seems some no other service identified this + # resource to be deleted. We can assume it doesn't + # count + network_has_ports_allocated = True + if network_has_ports_allocated: + # If some ports are on net - we cannot delete it + continue + self.log.debug(f'Network {net} should be deleted') + # __Check__ if we need to drop network according to filters + network_must_be_deleted = self._service_cleanup_del_res( + self.delete_network, + net, + dry_run=True, + client_status_queue=None, + identified_resources=None, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + if not network_must_be_deleted: + # If not - check another net + continue + # otherwise disconnect router, drop net, subnet, router + # Disconnect + for port in router_if: + if client_status_queue: + client_status_queue.put(port) + + router = self.get_router(port.device_id) + if not dry_run: + # Router interfaces cannot be deleted when the router + # has static routes, so remove those first + if len(router.routes) > 0: + try: + self.remove_extra_routes_from_router( + router, + {"router": {"routes": router.routes}}, + ) + except exceptions.SDKException: + self.log.error( + f"Cannot delete routes {router.routes} " + f"from router {router}" + ) + + try: + self.remove_interface_from_router( + router=port.device_id, port_id=port.id + ) + except exceptions.SDKException: + self.log.error(f'Cannot delete object {obj}') + # router disconnected, drop it + self._service_cleanup_del_res( + self.delete_router, + router, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=None, + resource_evaluation_fn=None, + ) + # Drop ports not belonging to anybody + for port in self.ports( + project_id=project_id, network_id=net.id + ): + if port.device_owner is None or port.device_owner == '': + self._service_cleanup_del_res( + self.delete_port, + port, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=None, + resource_evaluation_fn=None, + ) + + # Drop all subnets in the net (no further conditions) + for obj in self.subnets( + project_id=project_id, network_id=net.id + ): + self._service_cleanup_del_res( + self.delete_subnet, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=None, + resource_evaluation_fn=None, + ) + + # And now the network itself (we are here definitely only if we + # need that) + self._service_cleanup_del_res( + self.delete_network, + net, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=None, + resource_evaluation_fn=None, + ) + else: + self.log.debug( + "Skipping cleanup of networks, routers, ports and subnets " + "as those resources require all of them to be cleaned up" + "together, but at least one should be kept" + ) + + if not self.should_skip_resource_cleanup("router", skip_resources): + # It might happen, that we have routers not attached to anything + for obj in self.routers(): + ports = list(self.ports(device_id=obj.id)) + if len(ports) == 0: + self._service_cleanup_del_res( + self.delete_router, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=None, + resource_evaluation_fn=None, + ) + + +def fip_cleanup_evaluation(obj, identified_resources=None, filters=None): + """Determine whether Floating IP should be deleted + + :param Resource obj: Floating IP object + :param dict identified_resources: Optional dictionary with resources + identified by other services for deletion. + :param dict filters: dictionary with parameters + """ + if filters is not None and ( + obj.port_id is not None + and identified_resources + and obj.port_id not in identified_resources + ): + # If filters are set, but port is not empty and will not be empty - + # skip + return False + else: + return True diff --git a/openstack/network/v2/address_group.py b/openstack/network/v2/address_group.py new file mode 100644 index 0000000000..9aebf49e6c --- /dev/null +++ b/openstack/network/v2/address_group.py @@ -0,0 +1,91 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class AddressGroup(resource.Resource): + """Address group extension.""" + + resource_key = 'address_group' + resources_key = 'address_groups' + base_path = '/address-groups' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _allow_unknown_attrs_in_body = True + + _query_mapping = resource.QueryParameters( + 'sort_key', + 'sort_dir', + 'name', + 'description', + 'project_id', + ) + + # Properties + #: The ID of the address group. + id = resource.Body('id') + #: The address group name. + name = resource.Body('name') + #: The address group name. + description = resource.Body('description') + #: The ID of the project that owns the address group. + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: The IP addresses of the address group. + addresses = resource.Body('addresses', type=list) + + def _put(self, session, url, body): + resp = session.put(url, json=body) + exceptions.raise_from_response(resp) + return resp + + def add_addresses(self, session, addresses): + """Add addresses into the address group. + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param list addresses: The list of address strings. + + :returns: The response as a AddressGroup object with updated addresses + + :raises: :class:`~openstack.exceptions.SDKException` on error. + """ + url = utils.urljoin(self.base_path, self.id, 'add_addresses') + resp = self._put(session, url, {'addresses': addresses}) + self._translate_response(resp) + return self + + def remove_addresses(self, session, addresses): + """Remove addresses from the address group. + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param list addresses: The list of address strings. + + :returns: The response as a AddressGroup object with updated addresses + + :raises: :class:`~openstack.exceptions.SDKException` on error. + """ + url = utils.urljoin(self.base_path, self.id, 'remove_addresses') + resp = self._put(session, url, {'addresses': addresses}) + self._translate_response(resp) + return self diff --git a/openstack/network/v2/address_scope.py b/openstack/network/v2/address_scope.py index 1364fcfdd6..ca1798826b 100644 --- a/openstack/network/v2/address_scope.py +++ b/openstack/network/v2/address_scope.py @@ -10,27 +10,31 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class AddressScope(resource.Resource): """Address scope extension.""" + resource_key = 'address_scope' resources_key = 'address_scopes' base_path = '/address-scopes' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'name', 'ip_version', - project_id='tenant_id', + 'name', + 'ip_version', + 'project_id', + 'sort_key', + 'sort_dir', is_shared='shared', ) @@ -38,7 +42,9 @@ class AddressScope(resource.Resource): #: The address scope name. name = resource.Body('name') #: The ID of the project that owns the address scope. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: The IP address family of the address scope. #: *Type: int* ip_version = resource.Body('ip_version', type=int) diff --git a/openstack/network/v2/agent.py b/openstack/network/v2/agent.py index bfda9d2b0d..181d10dcf6 100644 --- a/openstack/network/v2/agent.py +++ b/openstack/network/v2/agent.py @@ -10,30 +10,38 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import exceptions +from openstack.network.v2 import bgp_speaker as _speaker +from openstack import resource from openstack import utils class Agent(resource.Resource): """Neutron agent extension.""" + resource_key = 'agent' resources_key = 'agents' base_path = '/agents' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = False - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True # NOTE: We skip query for JSON fields and datetime fields _query_mapping = resource.QueryParameters( - 'agent_type', 'availability_zone', 'binary', 'description', 'host', + 'agent_type', + 'availability_zone', + 'binary', + 'description', + 'host', 'topic', - is_admin_state_up='admin_state_up', is_alive='alive', + is_admin_state_up='admin_state_up', + is_alive='alive', ) # Properties @@ -59,34 +67,91 @@ class Agent(resource.Resource): #: Whether or not the network agent is alive. #: *Type: bool* is_alive = resource.Body('alive', type=bool) + #: Whether or not the agent is succesffully synced towards placement. + #: Agents supporting the guaranteed minimum bandwidth feature share their + #: resource view with neutron-server and neutron-server share this view + #: with placement, resources_synced represents the success of the latter. + #: The value None means no resource view synchronization to Placement was + #: attempted. true / false values signify the success of the last + #: synchronization attempt. + #: *Type: bool* + resources_synced = resource.Body('resources_synced', type=bool) #: Timestamp when the network agent was last started. started_at = resource.Body('started_at') #: The messaging queue topic the network agent subscribes to. topic = resource.Body('topic') + #: The HA state of the L3 agent. This is one of 'active', 'standby' or + #: 'fault' for HA routers, or None for other types of routers. + ha_state = resource.Body('ha_state') - def add_agent_to_network(self, session, **body): + def add_agent_to_network(self, session, network_id): + body = {'network_id': network_id} url = utils.urljoin(self.base_path, self.id, 'dhcp-networks') - resp = session.post(url, endpoint_filter=self.service, json=body) + resp = session.post(url, json=body) + return resp.json() + + def remove_agent_from_network(self, session, network_id): + body = {'network_id': network_id} + url = utils.urljoin( + self.base_path, self.id, 'dhcp-networks', network_id + ) + session.delete(url, json=body) + + def add_router_to_agent(self, session, router): + body = {'router_id': router} + url = utils.urljoin(self.base_path, self.id, 'l3-routers') + resp = session.post(url, json=body) return resp.json() - def remove_agent_from_network(self, session, **body): - network_id = body.get('network_id') - url = utils.urljoin(self.base_path, self.id, 'dhcp-networks', - network_id) - session.delete(url, endpoint_filter=self.service, json=body) + def remove_router_from_agent(self, session, router): + body = {'router_id': router} + url = utils.urljoin(self.base_path, self.id, 'l3-routers', router) + session.delete(url, json=body) + def get_bgp_speakers_hosted_by_dragent(self, session): + """List BGP speakers hosted by a Dynamic Routing Agent -class DHCPAgentHostingNetwork(resource.Resource): - resource_key = 'network' - resources_key = 'networks' - base_path = '/agents/%(agent_id)s/dhcp-networks' - resource_name = 'dhcp-network' - service = network_service.NetworkService() + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + + :returns: A list of BgpSpeakers + :rtype: :class:`~openstack.network.v2.bgp_speaker.BgpSpeaker` + """ + url = utils.urljoin(self.base_path, self.id, 'bgp-drinstances') + resp = session.get(url) + exceptions.raise_from_response(resp) + self._body.attributes.update(resp.json()) + speaker_ids = [sp['id'] for sp in resp.json()['bgp_speakers']] + speakers = _speaker.BgpSpeaker.list(session=session) + return [sp for sp in speakers if sp.id in speaker_ids] + + +class NetworkHostingDHCPAgent(Agent): + resource_key = 'agent' + resources_key = 'agents' + resource_name = 'dhcp-agent' + base_path = '/networks/%(network_id)s/dhcp-agents' + + # capabilities + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = True + + # NOTE: Doesn't support query yet. + + +class RouterL3Agent(Agent): + resource_key = 'agent' + resources_key = 'agents' + base_path = '/routers/%(router_id)s/l3-agents' + resource_name = 'l3-agent' # capabilities allow_create = False - allow_get = True - allow_update = False + allow_retrieve = True + allow_commit = False allow_delete = False allow_list = True diff --git a/openstack/network/v2/auto_allocated_topology.py b/openstack/network/v2/auto_allocated_topology.py index a5cddac4b8..69c52a7aad 100644 --- a/openstack/network/v2/auto_allocated_topology.py +++ b/openstack/network/v2/auto_allocated_topology.py @@ -10,20 +10,20 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class AutoAllocatedTopology(resource.Resource): resource_name = 'auto_allocated_topology' resource_key = 'auto_allocated_topology' base_path = '/auto-allocated-topology' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # Capabilities allow_create = False - allow_get = True - allow_update = False + allow_fetch = True + allow_commit = False allow_delete = True allow_list = False @@ -36,7 +36,9 @@ class AutoAllocatedTopology(resource.Resource): #: Will return in error if resources have not been configured correctly #: To use this feature auto-allocated-topology, subnet_allocation, #: external-net and router extensions must be enabled and set up. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) class ValidateTopology(AutoAllocatedTopology): @@ -46,4 +48,4 @@ class ValidateTopology(AutoAllocatedTopology): #: Will return "Deployment error:" if the resources required have not #: been correctly set up. dry_run = resource.Body('dry_run') - project_id = resource.URI('project') + project = resource.URI('project') diff --git a/openstack/network/v2/availability_zone.py b/openstack/network/v2/availability_zone.py index dd55f17974..9879d53b48 100644 --- a/openstack/network/v2/availability_zone.py +++ b/openstack/network/v2/availability_zone.py @@ -10,27 +10,29 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as _resource +from openstack import resource as _resource class AvailabilityZone(_resource.Resource): resource_key = 'availability_zone' resources_key = 'availability_zones' base_path = '/availability_zones' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = False - allow_get = False - allow_update = False + allow_fetch = False + allow_commit = False allow_delete = False allow_list = True # NOTE: We don't support query by state yet because there is a mapping # at neutron side difficult to map. _query_mapping = _resource.QueryParameters( - name='availability_zone', resource='agent_type') + name='availability_zone', + resource='agent_type', + ) # Properties #: Name of the availability zone. diff --git a/openstack/network/v2/bgp_peer.py b/openstack/network/v2/bgp_peer.py new file mode 100644 index 0000000000..3304247323 --- /dev/null +++ b/openstack/network/v2/bgp_peer.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class BgpPeer(resource.Resource): + resource_key = 'bgp_peer' + resources_key = 'bgp_peers' + base_path = '/bgp-peers' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Properties + #: The Id of the BGP Peer + id = resource.Body('id') + #: The BGP Peer's name. + name = resource.Body('name') + #: The ID of the project that owns the BGP Peer + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: The authentication type for the BGP Peer, can be none or md5. + #: none by default. + auth_type = resource.Body('auth_type') + #: The remote Autonomous System number of the BGP Peer. + remote_as = resource.Body('remote_as') + #: The ip address of the Peer. + peer_ip = resource.Body('peer_ip') diff --git a/openstack/network/v2/bgp_speaker.py b/openstack/network/v2/bgp_speaker.py new file mode 100644 index 0000000000..1be9f00806 --- /dev/null +++ b/openstack/network/v2/bgp_speaker.py @@ -0,0 +1,172 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack.network.v2 import agent as _agent +from openstack import resource +from openstack import utils + + +class BgpSpeaker(resource.Resource): + resource_key = 'bgp_speaker' + resources_key = 'bgp_speakers' + base_path = '/bgp-speakers' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Properties + #: The Id of the BGP Speaker + id = resource.Body('id') + #: The BGP speaker's name. + name = resource.Body('name') + #: The ID of the project that owns the BGP Speaker. + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: The IP version (4 or 6) of the BGP Speaker. + ip_version = resource.Body('ip_version') + #: Whether to enable or disable the advertisement of floating ip host + #: routes by the BGP Speaker. True by default. + advertise_floating_ip_host_routes = resource.Body( + 'advertise_floating_ip_host_routes' + ) + #: Whether to enable or disable the advertisement of tenant network + #: routes by the BGP Speaker. True by default. + advertise_tenant_networks = resource.Body('advertise_tenant_networks') + #: The local Autonomous System number of the BGP Speaker. + local_as = resource.Body('local_as') + #: The ID of the network to which the BGP Speaker is associated. + networks = resource.Body('networks') + + def _put(self, session, url, body): + resp = session.put(url, json=body) + exceptions.raise_from_response(resp) + return resp + + def add_bgp_peer(self, session, peer_id): + """Add BGP Peer to a BGP Speaker + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param peer_id: id of the peer to associate with the speaker. + + :returns: A dictionary as the API Reference describes it. + + :raises: :class:`~openstack.exceptions.SDKException` on error. + """ + url = utils.urljoin(self.base_path, self.id, 'add_bgp_peer') + body = {'bgp_peer_id': peer_id} + resp = self._put(session, url, body) + return resp.json() + + def remove_bgp_peer(self, session, peer_id): + """Remove BGP Peer from a BGP Speaker + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param peer_id: The ID of the peer to disassociate from the speaker. + + :raises: :class:`~openstack.exceptions.SDKException` on error. + """ + url = utils.urljoin(self.base_path, self.id, 'remove_bgp_peer') + body = {'bgp_peer_id': peer_id} + self._put(session, url, body) + + def add_gateway_network(self, session, network_id): + """Add Network to a BGP Speaker + + :param: session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param network_id: The ID of the network to associate with the speaker + + :returns: A dictionary as the API Reference describes it. + """ + body = {'network_id': network_id} + url = utils.urljoin(self.base_path, self.id, 'add_gateway_network') + resp = session.put(url, json=body) + return resp.json() + + def remove_gateway_network(self, session, network_id): + """Delete Network from a BGP Speaker + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param network_id: The ID of the network to disassociate + from the speaker + """ + body = {'network_id': network_id} + url = utils.urljoin(self.base_path, self.id, 'remove_gateway_network') + session.put(url, json=body) + + def get_advertised_routes(self, session): + """List routes advertised by a BGP Speaker + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :returns: The response as a list of routes (cidr/nexthop pair + advertised by the BGP Speaker. + + :raises: :class:`~openstack.exceptions.SDKException` on error. + """ + url = utils.urljoin(self.base_path, self.id, 'get_advertised_routes') + resp = session.get(url) + exceptions.raise_from_response(resp) + self._body.attributes.update(resp.json()) + return resp.json() + + def get_bgp_dragents(self, session): + """List Dynamic Routing Agents hosting a specific BGP Speaker + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :returns: The response as a list of dragents hosting a specific + BGP Speaker. + :rtype: :class:`~openstack.network.v2.agent.Agent` + :raises: :class:`~openstack.exceptions.SDKException` on error. + """ + url = utils.urljoin(self.base_path, self.id, 'bgp-dragents') + resp = session.get(url) + exceptions.raise_from_response(resp) + self._body.attributes.update(resp.json()) + agent_ids = [ag['id'] for ag in resp.json()['agents']] + agents = _agent.Agent.list(session=session) + return [ag for ag in agents if ag.id in agent_ids] + + def add_bgp_speaker_to_dragent(self, session, bgp_agent_id): + """Add BGP Speaker to a Dynamic Routing Agent + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param bgp_agent_id: The id of the dynamic routing agent to which + add the speaker. + """ + body = {'bgp_speaker_id': self.id} + url = utils.urljoin('agents', bgp_agent_id, 'bgp-drinstances') + session.post(url, json=body) + + def remove_bgp_speaker_from_dragent(self, session, bgp_agent_id): + """Delete BGP Speaker from a Dynamic Routing Agent + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param bgp_agent_id: The id of the dynamic routing agent from which + remove the speaker. + """ + url = utils.urljoin('agents', bgp_agent_id, 'bgp-drinstances', self.id) + session.delete(url) diff --git a/openstack/network/v2/bgpvpn.py b/openstack/network/v2/bgpvpn.py new file mode 100644 index 0000000000..03478c8e5b --- /dev/null +++ b/openstack/network/v2/bgpvpn.py @@ -0,0 +1,67 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class BgpVpn(resource.Resource): + resource_key = 'bgpvpn' + resources_key = 'bgpvpns' + base_path = '/bgpvpn/bgpvpns' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'name', + 'project_id', + 'local_pref', + 'vni', + 'type', + 'networks', + 'routers', + 'ports', + # NOTE(seba): (route|import|export) targets only support exact matches + # and have therefore been left out + ) + + # Properties + #: The Id of the BGPVPN + id = resource.Body('id') + #: The BGPVPN's name. + name = resource.Body('name') + #: The ID of the project that owns the BGPVPN + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: List of route distinguisher strings. + route_distinguishers = resource.Body('route_distinguishers') + #: Route Targets that will be both imported and used for export. + route_targets = resource.Body('route_targets') + #: Additional Route Targets that will be imported. + import_targets = resource.Body('import_targets') + #: Additional Route Targets that will be used for export. + export_targets = resource.Body('export_targets') + #: The default BGP LOCAL_PREF of routes that will be advertised to + #: the BGPVPN. + local_pref = resource.Body('local_pref') + #: The globally-assigned VXLAN vni for the BGP VPN. + vni = resource.Body('vni') + #: Selection of the type of VPN and the technology behind it. + #: Allowed values are l2 or l3. + type = resource.Body('type') diff --git a/openstack/network/v2/bgpvpn_network_association.py b/openstack/network/v2/bgpvpn_network_association.py new file mode 100644 index 0000000000..2a041f5785 --- /dev/null +++ b/openstack/network/v2/bgpvpn_network_association.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class BgpVpnNetworkAssociation(resource.Resource): + resource_key = 'network_association' + resources_key = 'network_associations' + base_path = '/bgpvpn/bgpvpns/%(bgpvpn_id)s/network_associations' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = False + allow_delete = True + allow_list = True + + # Properties + #: The Id of the BGPVPN + id = resource.Body('id') + #: The ID of the BGPVPN who owns Network Association. + bgpvpn_id = resource.URI('bgpvpn_id') + #: The ID of the project that owns the BGPVPN + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: The ID of a Neutron network with which to associate the BGP VPN. + network_id = resource.Body('network_id') diff --git a/openstack/network/v2/bgpvpn_port_association.py b/openstack/network/v2/bgpvpn_port_association.py new file mode 100644 index 0000000000..9b37b3e5c3 --- /dev/null +++ b/openstack/network/v2/bgpvpn_port_association.py @@ -0,0 +1,49 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class BgpVpnPortAssociation(resource.Resource): + resource_key = 'port_association' + resources_key = 'port_associations' + base_path = '/bgpvpn/bgpvpns/%(bgpvpn_id)s/port_associations' + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Properties + #: The Id of the BGPVPN + id = resource.Body('id') + #: The ID of the BGPVPN who owns Network Association. + bgpvpn_id = resource.URI('bgpvpn_id') + #: The ID of the project that owns the BGPVPN + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: The ID of a Neutron Port with which to associate the BGP VPN. + port_id = resource.Body('port_id') + #: Boolean flag controlling whether or not the fixed IPs of a port will be + #: advertised to the BGPVPN (default: true). + advertise_fixed_ips = resource.Body('advertise_fixed_ips') + #: List of routes, each route being a dict with at least a type key, + #: which can be prefix or bgpvpn. + #: For the prefix type, the IP prefix (v4 or v6) to advertise is specified + #: in the prefix key. + #: For the bgpvpn type, the bgpvpn_id key specifies the BGPVPN from which + #: routes will be readvertised + routes = resource.Body('routes') diff --git a/openstack/network/v2/bgpvpn_router_association.py b/openstack/network/v2/bgpvpn_router_association.py new file mode 100644 index 0000000000..bbc11a7c7f --- /dev/null +++ b/openstack/network/v2/bgpvpn_router_association.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class BgpVpnRouterAssociation(resource.Resource): + resource_key = 'router_association' + resources_key = 'router_associations' + base_path = '/bgpvpn/bgpvpns/%(bgpvpn_id)s/router_associations' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Properties + #: The Id of the BGPVPN + id = resource.Body('id') + #: The ID of the BGPVPN who owns Network Association. + bgpvpn_id = resource.URI('bgpvpn_id') + #: The ID of the project that owns the BGPVPN + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: The ID of a Neutron router with which to associate the BGP VPN. + router_id = resource.Body('router_id') + #: Boolean flag controlling whether or not the routes specified in the + #: routes attribute of the router will be advertised to the BGPVPN + #: (default: true). + advertise_extra_routes = resource.Body('advertise_extra_routes') diff --git a/openstack/network/v2/default_security_group_rule.py b/openstack/network/v2/default_security_group_rule.py new file mode 100644 index 0000000000..0d40649ac4 --- /dev/null +++ b/openstack/network/v2/default_security_group_rule.py @@ -0,0 +1,89 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.network.v2 import _base +from openstack import resource + + +class DefaultSecurityGroupRule(_base.NetworkResource): + resource_key = 'default_security_group_rule' + resources_key = 'default_security_group_rules' + base_path = '/default-security-group-rules' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = False + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'id', + 'description', + 'remote_group_id', + 'remote_address_group_id', + 'direction', + 'protocol', + 'port_range_min', + 'port_range_max', + 'remote_ip_prefix', + 'used_in_default_sg', + 'used_in_non_default_sg', + 'sort_dir', + 'sort_key', + ether_type='ethertype', + ) + + # Properties + #: The default security group rule description. + description = resource.Body('description') + #: The remote security group ID to be associated with this security + #: group rule created from this template. + #: You can specify either ``remote_group_id`` or #: + #: ``remote_address_group_id`` or ``remote_ip_prefix``. + remote_group_id = resource.Body('remote_group_id') + #: The remote address group ID to be associated with this security + #: group rule created from that template. + #: You can specify either ``remote_group_id`` or + #: ``remote_address_group_id`` or ``remote_ip_prefix``. + remote_address_group_id = resource.Body('remote_address_group_id') + #: ``ingress`` or ``egress``: The direction in which the security group #: + #: rule will be applied. See 'direction' field in the security group rule + #: API. + direction = resource.Body('direction') + #: The protocol that is matched by the security group rule. + #: Valid values are ``null``, ``tcp``, ``udp``, and ``icmp``. + protocol = resource.Body('protocol') + #: The minimum port number in the range that is matched by the + #: security group rule. If the protocol is TCP or UDP, this value + #: must be less than or equal to the value of the port_range_max + #: attribute. If the protocol is ICMP, this value must be an ICMP type. + port_range_min = resource.Body('port_range_min', type=int) + #: The maximum port number in the range that is matched by the + #: security group rule. The port_range_min attribute constrains + #: the port_range_max attribute. If the protocol is ICMP, this + #: value must be an ICMP type. + port_range_max = resource.Body('port_range_max', type=int) + #: The remote IP prefix to be associated with this security group rule. + #: You can specify either ``remote_group_id`` or + #: ``remote_address_group_id`` or ``remote_ip_prefix``. + #: This attribute matches the specified IP prefix as the source or + #: destination IP address of the IP packet depending on direction. + remote_ip_prefix = resource.Body('remote_ip_prefix') + #: Must be IPv4 or IPv6, and addresses represented in CIDR must match + #: the ingress or egress rules. + ether_type = resource.Body('ethertype') + #: Indicate if this template be used to create security group rules in the + #: default security group created automatically for each project. + used_in_default_sg = resource.Body('used_in_default_sg', type=bool) + #: Indicate if this template be used to create security group rules in the + #: custom security groups created in the project by users. + used_in_non_default_sg = resource.Body('used_in_non_default_sg', type=bool) diff --git a/openstack/network/v2/extension.py b/openstack/network/v2/extension.py index 76c79c8e97..2e6736e5f2 100644 --- a/openstack/network/v2/extension.py +++ b/openstack/network/v2/extension.py @@ -10,18 +10,18 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class Extension(resource.Resource): resource_key = 'extension' resources_key = 'extensions' base_path = '/extensions' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities - allow_get = True + allow_fetch = True allow_list = True # NOTE: No query parameters supported @@ -32,7 +32,7 @@ class Extension(resource.Resource): #: Text describing what the extension does. description = resource.Body('description') #: Links pertaining to this extension. - links = resource.Body('links') + links = resource.Body('links', type=list, list_type=dict) #: The name of this extension. name = resource.Body('name') #: Timestamp when the extension was last updated. diff --git a/openstack/network/v2/firewall_group.py b/openstack/network/v2/firewall_group.py new file mode 100644 index 0000000000..4f16c1c354 --- /dev/null +++ b/openstack/network/v2/firewall_group.py @@ -0,0 +1,66 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class FirewallGroup(resource.Resource): + resource_key = 'firewall_group' + resources_key = 'firewall_groups' + base_path = '/fwaas/firewall_groups' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'description', + 'egress_firewall_policy_id', + 'ingress_firewall_policy_id', + 'name', + 'shared', + 'status', + 'ports', + 'project_id', + ) + + # Properties + #: The administrative state of the firewall group, which is up (true) or + #: down (false). Default is true. + admin_state_up = resource.Body('admin_state_up') + #: The firewall group rule description. + description = resource.Body('description') + #: The ID of the egress firewall policy for the firewall group. + egress_firewall_policy_id = resource.Body('egress_firewall_policy_id') + #: The ID of the ingress firewall policy for the firewall group. + ingress_firewall_policy_id = resource.Body('ingress_firewall_policy_id') + #: The ID of the firewall group. + id = resource.Body('id') + #: The name of a firewall group + name = resource.Body('name') + #: A list of the IDs of the ports associated with the firewall group. + ports = resource.Body('ports') + #: The ID of the project that owns the resource. + project_id = resource.Body('project_id') + #: Indicates whether this firewall group is shared across all projects. + shared = resource.Body('shared') + #: The status of the firewall group. Valid values are ACTIVE, INACTIVE, + #: ERROR, PENDING_UPDATE, or PENDING_DELETE. + status = resource.Body('status') diff --git a/openstack/network/v2/firewall_policy.py b/openstack/network/v2/firewall_policy.py new file mode 100644 index 0000000000..4d760bc08b --- /dev/null +++ b/openstack/network/v2/firewall_policy.py @@ -0,0 +1,103 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.exceptions import HttpException +from openstack import resource +from openstack import utils + + +class FirewallPolicy(resource.Resource): + resource_key = 'firewall_policy' + resources_key = 'firewall_policies' + base_path = '/fwaas/firewall_policies' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'description', + 'firewall_rules', + 'name', + 'project_id', + 'shared', + ) + + # Properties + #: Each time that the firewall policy or its associated rules are changed, + #: the API sets this attribute to false. To audit the policy, + #: explicitly set this attribute to true. + audited = resource.Body('audited') + #: The firewall group rule description. + description = resource.Body('description') + #: The ID of the firewall policy. + id = resource.Body('id') + #: A list of the IDs of the firewall rules associated with the + #: firewall policy. + firewall_rules = resource.Body('firewall_rules') + #: The name of a firewall policy + name = resource.Body('name') + #: The ID of the project that owns the resource. + project_id = resource.Body('project_id') + #: Set to true to make this firewall policy visible to other projects. + shared = resource.Body('shared') + + def insert_rule(self, session, **body): + """Insert a firewall_rule into a firewall_policy in order. + + :param session: The session to communicate through. + :type session: :class:`~openstack.session.Session` + :param dict body: The body requested to be updated on the router + + :returns: The updated firewall policy + :rtype: :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` + + :raises: :class:`~openstack.exceptions.HttpException` on error. + """ + url = utils.urljoin(self.base_path, self.id, 'insert_rule') + return self._put_request(session, url, body) + + def remove_rule(self, session, **body): + """Remove a firewall_rule from a firewall_policy. + + :param session: The session to communicate through. + :type session: :class:`~openstack.session.Session` + :param dict body: The body requested to be updated on the router + + :returns: The updated firewall policy + :rtype: :class:`~openstack.network.v2.firewall_policy.FirewallPolicy` + + :raises: :class:`~openstack.exceptions.HttpException` on error. + """ + url = utils.urljoin(self.base_path, self.id, 'remove_rule') + return self._put_request(session, url, body) + + def _put_request(self, session, url, json_data): + resp = session.put(url, json=json_data) + data = resp.json() + if not resp.ok: + message = None + if 'NeutronError' in data: + message = data['NeutronError']['message'] + raise HttpException(message=message, response=resp) + + self._body.attributes.update(data) + self._update_location() + return self diff --git a/openstack/network/v2/firewall_rule.py b/openstack/network/v2/firewall_rule.py new file mode 100644 index 0000000000..04a84eadb9 --- /dev/null +++ b/openstack/network/v2/firewall_rule.py @@ -0,0 +1,83 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class FirewallRule(resource.Resource): + resource_key = 'firewall_rule' + resources_key = 'firewall_rules' + base_path = '/fwaas/firewall_rules' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'action', + 'description', + 'destination_ip_address', + 'name', + 'destination_port', + 'enabled', + 'ip_version', + 'project_id', + 'protocol', + 'shared', + 'source_ip_address', + 'source_port', + 'firewall_policy_id', + ) + + # Properties + #: The action that the API performs on traffic that matches the firewall + #: rule. Valid values are allow or deny. Default is deny. + action = resource.Body('action') + #: The description of the firewall rule + description = resource.Body('description') + #: The destination IPv4 or IPv6 address or CIDR for the firewall rule. + destination_ip_address = resource.Body('destination_ip_address') + #: The destination port or port range for the firewall rule. + destination_port = resource.Body('destination_port') + #: Facilitates selectively turning off rules without having to disassociate + #: the rule from the firewall policy + enabled = resource.Body('enabled') + #: The IP protocol version for the firewall rule. Valid values are 4 or 6. + ip_version = resource.Body('ip_version') + #: The name of the firewall rule. + name = resource.Body('name') + #: The ID of the project that owns the resource. + project_id = resource.Body('project_id') + #: The IP protocol for the firewall rule. + protocol = resource.Body('protocol') + #: Indicates whether this firewall rule is shared across all projects. + shared = resource.Body('shared') + #: The source IPv4 or IPv6 address or CIDR for the firewall rule. + source_ip_address = resource.Body('source_ip_address') + #: The source port or port range for the firewall rule. + source_port = resource.Body('source_port') + #: Summary field of a FirewallRule, composed of the protocol, + #: source_ip_address:source_port, + #: destination_ip_address:destination_port and action. + summary = resource.Computed('summary', default='') + #: The ID of the firewall policy. + firewall_policy_id = resource.Body('firewall_policy_id') + #: The ID of the firewall rule. + id = resource.Body('id') diff --git a/openstack/network/v2/flavor.py b/openstack/network/v2/flavor.py index 32321378e0..364a4248e9 100644 --- a/openstack/network/v2/flavor.py +++ b/openstack/network/v2/flavor.py @@ -10,25 +10,32 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource +from openstack import utils class Flavor(resource.Resource): resource_key = 'flavor' resources_key = 'flavors' base_path = '/flavors' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'description', 'name', 'service_type', is_enabled='enabled') + 'description', + 'name', + 'service_type', + 'sort_key', + 'sort_dir', + is_enabled='enabled', + ) # properties #: description for the flavor @@ -41,3 +48,24 @@ class Flavor(resource.Resource): service_type = resource.Body('service_type') #: IDs of service profiles associated with this flavor service_profile_ids = resource.Body('service_profiles', type=list) + + def associate_flavor_with_service_profile( + self, session, service_profile_id=None + ): + flavor_id = self.id + url = utils.urljoin(self.base_path, flavor_id, 'service_profiles') + body = {"service_profile": {"id": service_profile_id}} + resp = session.post(url, json=body) + return resp.json() + + def disassociate_flavor_from_service_profile( + self, session, service_profile_id=None + ): + flavor_id = self.id + url = utils.urljoin( + self.base_path, flavor_id, 'service_profiles', service_profile_id + ) + session.delete( + url, + ) + return None diff --git a/openstack/network/v2/floating_ip.py b/openstack/network/v2/floating_ip.py index 630d103424..b48538277d 100644 --- a/openstack/network/v2/floating_ip.py +++ b/openstack/network/v2/floating_ip.py @@ -10,35 +10,52 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack.network.v2 import _base +from openstack import resource -class FloatingIP(resource.Resource): +class FloatingIP(_base.NetworkResource, _base.TagMixinNetwork): name_attribute = "floating_ip_address" resource_name = "floating ip" resource_key = 'floatingip' resources_key = 'floatingips' base_path = '/floatingips' - service = network_service.NetworkService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True + # For backward compatibility include tenant_id as query param _query_mapping = resource.QueryParameters( - 'description', 'fixed_ip_address', 'floating_ip_address', - 'floating_network_id', 'port_id', 'router_id', 'status', - project_id='tenant_id') + 'description', + 'fixed_ip_address', + 'floating_ip_address', + 'floating_network_id', + 'id', + 'port_id', + 'router_id', + 'status', + 'subnet_id', + 'project_id', + 'tenant_id', + 'sort_key', + 'sort_dir', + tenant_id='project_id', + **_base.TagMixinNetwork._tag_query_parameters, + ) # Properties #: Timestamp at which the floating IP was created. created_at = resource.Body('created_at') #: The floating IP description. description = resource.Body('description') + #: The DNS domain. + dns_domain = resource.Body('dns_domain') + #: The DNS name. + dns_name = resource.Body('dns_name') #: The fixed IP address associated with the floating IP. If you #: intend to associate the floating IP with a fixed IP at creation #: time, then you must indicate the identifier of the internal port. @@ -48,25 +65,38 @@ class FloatingIP(resource.Resource): fixed_ip_address = resource.Body('fixed_ip_address') #: The floating IP address. floating_ip_address = resource.Body('floating_ip_address') + #: Floating IP object doesn't have name attribute, set ip address to name + #: so that user could find floating IP by UUID or IP address using find_ip + name = floating_ip_address #: The ID of the network associated with the floating IP. floating_network_id = resource.Body('floating_network_id') + #: Read-only. The details of the port that this floating IP associates + #: with. Present if ``fip-port-details`` extension is loaded. + #: *Type: dict with keys: name, network_id, mac_address, admin_state_up, + #: status, device_id, device_owner* + port_details = resource.Body('port_details', type=dict) #: The port ID. port_id = resource.Body('port_id') + #: The ID of the QoS policy attached to the floating IP. + qos_policy_id = resource.Body('qos_policy_id') #: The ID of the project this floating IP is associated with. - project_id = resource.Body('tenant_id') - #: Revision number of the floating IP. *Type: int* - revision_number = resource.Body('revision_number', type=int) + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: The ID of an associated router. router_id = resource.Body('router_id') #: The floating IP status. Value is ``ACTIVE`` or ``DOWN``. status = resource.Body('status') #: Timestamp at which the floating IP was last updated. updated_at = resource.Body('updated_at') + #: The Subnet ID associated with the floating IP. + subnet_id = resource.Body('subnet_id') @classmethod def find_available(cls, session): - info = cls.list(session, fields='id', port_id='') - try: - return next(info) - except StopIteration: - return None + # server-side filtering on empty values is not always supported. + # TODO(mordred) Make this check for support for the server-side filter + for ip in cls.list(session): + if not ip.port_id: + return ip + return None diff --git a/openstack/network/v2/health_monitor.py b/openstack/network/v2/health_monitor.py index a0aea93285..bf853cfdfe 100644 --- a/openstack/network/v2/health_monitor.py +++ b/openstack/network/v2/health_monitor.py @@ -10,32 +10,37 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class HealthMonitor(resource.Resource): resource_key = 'healthmonitor' resources_key = 'healthmonitors' base_path = '/lbaas/healthmonitors' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'delay', 'expected_codes', 'http_method', 'max_retries', - 'timeout', 'type', 'url_path', + 'delay', + 'expected_codes', + 'http_method', + 'max_retries', + 'timeout', + 'type', + 'url_path', + 'project_id', is_admin_state_up='adminstate_up', - project_id='tenant_id', ) # Properties - #: The time, in milliseconds, between sending probes to members. + #: The time, in seconds, between sending probes to members. delay = resource.Body('delay') #: Expected HTTP codes for a passing HTTP(S) monitor. expected_codes = resource.Body('expected_codes') @@ -51,9 +56,13 @@ class HealthMonitor(resource.Resource): #: List of pools associated with this health monitor #: *Type: list of dicts which contain the pool IDs* pool_ids = resource.Body('pools', type=list) + #: The ID of the pool associated with this health monitor + pool_id = resource.Body('pool_id') #: The ID of the project this health monitor is associated with. - project_id = resource.Body('tenant_id') - #: The maximum number of milliseconds for a monitor to wait for a + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: The maximum number of seconds for a monitor to wait for a #: connection to be established before it times out. This value must #: be less than the delay value. timeout = resource.Body('timeout') diff --git a/openstack/network/v2/l3_conntrack_helper.py b/openstack/network/v2/l3_conntrack_helper.py new file mode 100644 index 0000000000..9f142bfee8 --- /dev/null +++ b/openstack/network/v2/l3_conntrack_helper.py @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ConntrackHelper(resource.Resource): + resource_key = 'conntrack_helper' + resources_key = 'conntrack_helpers' + base_path = '/routers/%(router_id)s/conntrack_helpers' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Properties + #: The ID of the Router who owns helper. + router_id = resource.URI('router_id') + #: The netfilter conntrack helper module. + helper = resource.Body('helper') + #: The network protocol for the netfilter conntrack target rule. + protocol = resource.Body('protocol') + #: The network port for the netfilter conntrack target rule. + port = resource.Body('port') diff --git a/openstack/network/v2/listener.py b/openstack/network/v2/listener.py index 39bf8b1c3f..d46ff57886 100644 --- a/openstack/network/v2/listener.py +++ b/openstack/network/v2/listener.py @@ -10,27 +10,33 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class Listener(resource.Resource): resource_key = 'listener' resources_key = 'listeners' base_path = '/lbaas/listeners' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'connection_limit', 'default_pool_id', 'default_tls_container_ref', - 'description', 'name', 'project_id', 'protocol', 'protocol_port', - is_admin_state_up='admin_state_up' + 'connection_limit', + 'default_pool_id', + 'default_tls_container_ref', + 'description', + 'name', + 'project_id', + 'protocol', + 'protocol_port', + is_admin_state_up='admin_state_up', ) # Properties @@ -49,6 +55,8 @@ class Listener(resource.Resource): #: List of load balancers associated with this listener. #: *Type: list of dicts which contain the load balancer IDs* load_balancer_ids = resource.Body('loadbalancers') + #: The ID of the load balancer associated with this listener. + load_balancer_id = resource.Body('loadbalancer_id') #: Name of the listener name = resource.Body('name') #: The ID of the project this listener is associated with. diff --git a/openstack/network/v2/load_balancer.py b/openstack/network/v2/load_balancer.py index 60f6df7470..08f505b0d1 100644 --- a/openstack/network/v2/load_balancer.py +++ b/openstack/network/v2/load_balancer.py @@ -10,23 +10,35 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class LoadBalancer(resource.Resource): resource_key = 'loadbalancer' resources_key = 'loadbalancers' base_path = '/lbaas/loadbalancers' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True + _query_mapping = resource.QueryParameters( + 'description', + 'name', + 'project_id', + 'provider', + 'provisioning_status', + 'tenant_id', + 'vip_address', + 'vip_subnet_id', + is_admin_state_up='admin_state_up', + ) + # Properties #: Description for the load balancer. description = resource.Body('description') @@ -44,7 +56,9 @@ class LoadBalancer(resource.Resource): #: *Type: list of dicts which contain the pool IDs* pool_ids = resource.Body('pools', type=list) #: The ID of the project this load balancer is associated with. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: The name of the provider. provider = resource.Body('provider') #: Status of load balancer provisioning, e.g. ACTIVE, INACTIVE. diff --git a/openstack/network/v2/local_ip.py b/openstack/network/v2/local_ip.py new file mode 100644 index 0000000000..35b61c5362 --- /dev/null +++ b/openstack/network/v2/local_ip.py @@ -0,0 +1,70 @@ +# Copyright 2021 Huawei, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from openstack import resource + + +class LocalIP(resource.Resource): + """Local IP extension.""" + + resource_name = "local ip" + resource_key = "local_ip" + resources_key = "local_ips" + base_path = "/local_ips" + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _allow_unknown_attrs_in_body = True + + _query_mapping = resource.QueryParameters( + 'sort_key', + 'sort_dir', + 'name', + 'description', + 'project_id', + 'network_id', + 'local_port_id', + 'local_ip_address', + 'ip_mode', + ) + + # Properties + #: Timestamp at which the floating IP was created. + created_at = resource.Body('created_at') + #: The local ip description. + description = resource.Body('description') + #: The ID of the local ip. + id = resource.Body('id') + #: The local ip ip-mode. + ip_mode = resource.Body('ip_mode') + #: The Local IP address. + local_ip_address = resource.Body('local_ip_address') + #: The ID of the port that owns the local ip. + local_port_id = resource.Body('local_port_id') + #: The local ip name. + name = resource.Body('name') + #: The ID of the network that owns the local ip. + network_id = resource.Body('network_id') + #: The ID of the project that owns the local ip. + project_id = resource.Body('project_id') + #: The local ip revision number. + revision_number = resource.Body('revision_number') + #: Timestamp at which the floating IP was last updated. + updated_at = resource.Body('updated_at') diff --git a/openstack/network/v2/local_ip_association.py b/openstack/network/v2/local_ip_association.py new file mode 100644 index 0000000000..17d09b513d --- /dev/null +++ b/openstack/network/v2/local_ip_association.py @@ -0,0 +1,53 @@ +# Copyright 2021 Huawei, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from openstack import resource + + +class LocalIPAssociation(resource.Resource): + """Local IP extension.""" + + resource_key = "port_association" + resources_key = "port_associations" + base_path = "/local_ips/%(local_ip_id)s/port_associations" + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _allow_unknown_attrs_in_body = True + + _query_mapping = resource.QueryParameters( + 'fixed_port_id', + 'fixed_ip', + 'host', + 'sort_key', + 'sort_dir', + ) + + # Properties + #: The fixed port ID. + fixed_port_id = resource.Body('fixed_port_id') + #: The fixed IP. + fixed_ip = resource.Body('fixed_ip') + #: Host + host = resource.Body('host') + #: The local ip address + local_ip_address = resource.Body('local_ip_address') + #: The ID of Local IP address + local_ip_id = resource.URI('local_ip_id') diff --git a/openstack/network/v2/metering_label.py b/openstack/network/v2/metering_label.py index 31220e4450..0ae1caaf32 100644 --- a/openstack/network/v2/metering_label.py +++ b/openstack/network/v2/metering_label.py @@ -10,27 +10,30 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class MeteringLabel(resource.Resource): resource_key = 'metering_label' resources_key = 'metering_labels' base_path = '/metering/metering-labels' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'description', 'name', + 'description', + 'name', + 'project_id', + 'sort_key', + 'sort_dir', is_shared='shared', - project_id='tenant_id' ) # Properties @@ -39,7 +42,9 @@ class MeteringLabel(resource.Resource): #: Name of the metering label. name = resource.Body('name') #: The ID of the project this metering label is associated with. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: Indicates whether this label is shared across all tenants. #: *Type: bool* is_shared = resource.Body('shared', type=bool) diff --git a/openstack/network/v2/metering_label_rule.py b/openstack/network/v2/metering_label_rule.py index 568d054bc5..3347ec2e07 100644 --- a/openstack/network/v2/metering_label_rule.py +++ b/openstack/network/v2/metering_label_rule.py @@ -10,26 +10,32 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class MeteringLabelRule(resource.Resource): resource_key = 'metering_label_rule' resources_key = 'metering_label_rules' base_path = '/metering/metering-label-rules' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'direction', 'metering_label_id', 'remote_ip_prefix', - project_id='tenant_id', + 'direction', + 'metering_label_id', + 'remote_ip_prefix', + 'source_ip_prefix', + 'destination_ip_prefix', + 'project_id', + 'sort_key', + 'sort_dir', ) # Properties @@ -44,6 +50,22 @@ class MeteringLabelRule(resource.Resource): #: The metering label ID to associate with this metering label rule. metering_label_id = resource.Body('metering_label_id') #: The ID of the project this metering label rule is associated with. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: The remote IP prefix to be associated with this metering label rule. - remote_ip_prefix = resource.Body('remote_ip_prefix') + remote_ip_prefix = resource.Body( + 'remote_ip_prefix', + deprecated=True, + deprecation_reason="The use of 'remote_ip_prefix' in metering label " + "rules is deprecated and will be removed in future " + "releases. One should use instead, the " + "'source_ip_prefix' and/or 'destination_ip_prefix' " + "parameters. For more details, you can check the " + "spec: https://review.opendev.org/#/c/744702/.", + ) + + #: The source IP prefix to be associated with this metering label rule. + source_ip_prefix = resource.Body('source_ip_prefix') + #: The destination IP prefix to be associated with this metering label rule + destination_ip_prefix = resource.Body('destination_ip_prefix') diff --git a/openstack/network/v2/ndp_proxy.py b/openstack/network/v2/ndp_proxy.py new file mode 100644 index 0000000000..ae75927cf0 --- /dev/null +++ b/openstack/network/v2/ndp_proxy.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class NDPProxy(resource.Resource): + resource_name = "ndp proxy" + resource_key = 'ndp_proxy' + resources_key = 'ndp_proxies' + base_path = '/ndp_proxies' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _allow_unknown_attrs_in_body = True + + _query_mapping = resource.QueryParameters( + "sort_key", + "sort_dir", + 'name', + 'description', + 'project_id', + 'router_id', + 'port_id', + 'ip_address', + ) + + # Properties + #: Timestamp at which the NDP proxy was created. + created_at = resource.Body('created_at') + #: The description + description = resource.Body('description') + #: The ID of the NDP proxy. + id = resource.Body('id') + #: The internal IP address + ip_address = resource.Body('ip_address') + # The name of ndp proxy + name = resource.Body('name') + #: The ID of internal port + port_id = resource.Body('port_id') + #: The ID of the project that owns the NDP proxy. + project_id = resource.Body('project_id') + #: The NDP proxy revision number. + revision_number = resource.Body('revision_number') + #: The ID of Router + router_id = resource.Body('router_id') + #: Timestamp at which the NDP proxy was last updated. + updated_at = resource.Body('updated_at') diff --git a/openstack/network/v2/network.py b/openstack/network/v2/network.py index aa875875c4..537b643e54 100644 --- a/openstack/network/v2/network.py +++ b/openstack/network/v2/network.py @@ -10,34 +10,41 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack.network.v2 import _base +from openstack import resource -class Network(resource.Resource): +class Network(_base.NetworkResource, _base.TagMixinNetwork): resource_key = 'network' resources_key = 'networks' base_path = '/networks' - service = network_service.NetworkService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True # NOTE: We don't support query on list or datetime fields yet _query_mapping = resource.QueryParameters( - 'description', 'name', 'project_id', 'status', + 'description', + 'name', + 'status', + 'project_id', + 'sort_key', + 'sort_dir', + 'id', ipv4_address_scope_id='ipv4_address_scope', ipv6_address_scope_id='ipv6_address_scope', is_admin_state_up='admin_state_up', is_port_security_enabled='port_security_enabled', + is_router_external='router:external', is_shared='shared', provider_network_type='provider:network_type', provider_physical_network='provider:physical_network', provider_segmentation_id='provider:segmentation_id', + **_base.TagMixinNetwork._tag_query_parameters, ) # Properties @@ -66,13 +73,14 @@ class Network(resource.Resource): #: The port security status, which is enabled ``True`` or disabled #: ``False``. *Type: bool* *Default: False* #: Available for multiple provider extensions. - is_port_security_enabled = resource.Body('port_security_enabled', - type=bool, - default=False) + is_port_security_enabled = resource.Body( + 'port_security_enabled', type=bool, default=False + ) #: Whether or not the router is external. #: *Type: bool* *Default: False* - is_router_external = resource.Body('router:external', type=bool, - default=False) + is_router_external = resource.Body( + 'router:external', type=bool, default=False + ) #: Indicates whether this network is shared across all tenants. #: By default, only administrative users can change this value. #: *Type: bool* @@ -96,8 +104,6 @@ class Network(resource.Resource): provider_segmentation_id = resource.Body('provider:segmentation_id') #: The ID of the QoS policy attached to the port. qos_policy_id = resource.Body('qos_policy_id') - #: Revision number of the network. *Type: int* - revision_number = resource.Body('revision_number', type=int) #: A list of provider segment objects. #: Available for multiple provider extensions. segments = resource.Body('segments') @@ -108,20 +114,23 @@ class Network(resource.Resource): subnet_ids = resource.Body('subnets', type=list) #: Timestamp when the network was last updated. updated_at = resource.Body('updated_at') + #: Indicates the VLAN transparency mode of the network + is_vlan_transparent = resource.Body('vlan_transparent', type=bool) + #: Indicates the VLAN QinQ mode of the network + is_vlan_qinq = resource.Body('vlan_qinq', type=bool) -class NetworkHostingDHCPAgent(resource.Resource): - resource_key = 'agent' - resources_key = 'agents' - resource_name = 'dhcp-agent' - base_path = '/networks/%(network_id)s/dhcp-agents' - service = network_service.NetworkService() +class DHCPAgentHostingNetwork(Network): + resource_key = 'network' + resources_key = 'networks' + base_path = '/agents/%(agent_id)s/dhcp-networks' + resource_name = 'dhcp-network' # capabilities allow_create = False - allow_get = True - allow_update = False + allow_fetch = True + allow_commit = False allow_delete = False allow_list = True - # NOTE: Doesn't support query yet. + # NOTE: No query parameter is supported diff --git a/openstack/network/v2/network_ip_availability.py b/openstack/network/v2/network_ip_availability.py index 132fbc94fd..18e305422e 100644 --- a/openstack/network/v2/network_ip_availability.py +++ b/openstack/network/v2/network_ip_availability.py @@ -10,8 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class NetworkIPAvailability(resource.Resource): @@ -19,18 +18,23 @@ class NetworkIPAvailability(resource.Resource): resources_key = 'network_ip_availabilities' base_path = '/network-ip-availabilities' name_attribute = 'network_name' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = False - allow_get = True - allow_update = False + allow_fetch = True + allow_commit = False allow_delete = False allow_list = True _query_mapping = resource.QueryParameters( - 'network_id', 'network_name', - project_id='tenant_id' + 'ip_version', + 'network_id', + 'network_name', + 'project_id', + 'sort_key', + 'sort_dir', ) # Properties @@ -42,7 +46,9 @@ class NetworkIPAvailability(resource.Resource): #: *Type: list* subnet_ip_availability = resource.Body('subnet_ip_availability', type=list) #: The ID of the project this network IP availability is associated with. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: The total ips of a network. #: *Type: int* total_ips = resource.Body('total_ips', type=int) diff --git a/openstack/network/v2/network_segment_range.py b/openstack/network/v2/network_segment_range.py new file mode 100644 index 0000000000..55d08de399 --- /dev/null +++ b/openstack/network/v2/network_segment_range.py @@ -0,0 +1,82 @@ +# Copyright (c) 2018, Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class NetworkSegmentRange(resource.Resource): + resource_key = 'network_segment_range' + resources_key = 'network_segment_ranges' + base_path = '/network_segment_ranges' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'name', + 'default', + 'shared', + 'project_id', + 'network_type', + 'physical_network', + 'minimum', + 'maximum', + 'used', + 'available', + 'sort_key', + 'sort_dir', + ) + + # Properties + #: The network segment range name. + name = resource.Body('name') + #: The network segment range is loaded from the host configuration file. + #: *Type: bool* + default = resource.Body('default', type=bool) + #: The network segment range is shared with other projects. + #: *Type: bool* + shared = resource.Body('shared', type=bool) + #: The ID of the project associated with this network segment range. + project_id = resource.Body('project_id') + #: The type of network associated with this network segment range, such as + #: ``geneve``, ``gre``, ``vlan`` or ``vxlan``. + network_type = resource.Body('network_type') + #: The name of the physical network associated with this network segment + #: range. + physical_network = resource.Body('physical_network') + #: The minimum segmentation ID for this network segment range. The + #: network type defines the segmentation model, VLAN ID for ``vlan`` + #: network type and tunnel ID for ``geneve``, ``gre`` and ``vxlan`` + #: network types. + #: *Type: int* + minimum = resource.Body('minimum', type=int) + #: The maximum segmentation ID for this network segment range. The + #: network type defines the segmentation model, VLAN ID for ``vlan`` + #: network type and tunnel ID for ``geneve``, ``gre`` and ``vxlan`` + #: network types. + #: *Type: int* + maximum = resource.Body('maximum', type=int) + #: Mapping of which segmentation ID in the range is used by which tenant. + #: *Type: dict* + used = resource.Body('used', type=dict) + #: List of available segmentation IDs in this network segment range. + #: *Type: list* + available = resource.Body('available', type=list) diff --git a/openstack/network/v2/pool.py b/openstack/network/v2/pool.py index 27888d89ac..7d2ee31da2 100644 --- a/openstack/network/v2/pool.py +++ b/openstack/network/v2/pool.py @@ -10,34 +10,43 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class Pool(resource.Resource): resource_key = 'pool' resources_key = 'pools' base_path = '/lbaas/pools' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'description', 'lb_algorithm', 'name', - 'protocol', 'provider', 'subnet_id', 'virtual_ip_id', + 'description', + 'lb_algorithm', + 'name', + 'protocol', + 'provider', + 'subnet_id', + 'virtual_ip_id', + 'listener_id', + 'project_id', is_admin_state_up='admin_state_up', - project_id='tenant_id', + load_balancer_id='loadbalancer_id', ) # Properties #: Description for the pool. description = resource.Body('description') #: The ID of the associated health monitors. + health_monitor_id = resource.Body('healthmonitor_id') + #: The ID of the associated health monitors (LBaaS v1). health_monitor_ids = resource.Body('health_monitors', type=list) #: The statuses of the associated health monitors. health_monitor_status = resource.Body('health_monitor_status', type=list) @@ -51,16 +60,22 @@ class Pool(resource.Resource): #: List of associated listeners. #: *Type: list of dicts which contain the listener IDs* listener_ids = resource.Body('listeners', type=list) + #: ID of listener associated with this pool + listener_id = resource.Body('listener_id') #: List of associated load balancers. #: *Type: list of dicts which contain the load balancer IDs* load_balancer_ids = resource.Body('loadbalancers', type=list) + #: ID of load balancer associated with this pool + load_balancer_id = resource.Body('loadbalancer_id') #: List of members that belong to the pool. #: *Type: list of dicts which contain the member IDs* member_ids = resource.Body('members', type=list) #: Pool name. Does not have to be unique. name = resource.Body('name') #: The ID of the project this pool is associated with. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: The protocol of the pool, which is TCP, HTTP, or HTTPS. protocol = resource.Body('protocol') #: The provider name of the load balancer service. diff --git a/openstack/network/v2/pool_member.py b/openstack/network/v2/pool_member.py index 2d8dba0588..02f01e6e5c 100644 --- a/openstack/network/v2/pool_member.py +++ b/openstack/network/v2/pool_member.py @@ -10,27 +10,31 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class PoolMember(resource.Resource): resource_key = 'member' resources_key = 'members' base_path = '/lbaas/pools/%(pool_id)s/members' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'address', 'name', 'protocol_port', 'subnet_id', 'weight', + 'address', + 'name', + 'protocol_port', + 'subnet_id', + 'weight', + 'project_id', is_admin_state_up='admin_state_up', - project_id='tenant_id', ) # Properties @@ -44,7 +48,9 @@ class PoolMember(resource.Resource): #: Name of the pool member. name = resource.Body('name') #: The ID of the project this pool member is associated with. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: The port on which the application is hosted. protocol_port = resource.Body('protocol_port', type=int) #: Subnet ID in which to access this pool member. diff --git a/openstack/network/v2/port.py b/openstack/network/v2/port.py index 63ef801a43..e68f28be13 100644 --- a/openstack/network/v2/port.py +++ b/openstack/network/v2/port.py @@ -10,34 +10,56 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack.network.v2 import _base +from openstack import resource -class Port(resource.Resource): +class Port(_base.NetworkResource, _base.TagMixinNetwork): resource_key = 'port' resources_key = 'ports' base_path = '/ports' - service = network_service.NetworkService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True # NOTE: we skip query on list or datetime fields for now _query_mapping = resource.QueryParameters( - 'description', 'device_id', 'device_owner', 'ip_address', - 'mac_address', 'name', 'network_id', 'status', 'subnet_id', + 'binding:host_id', + 'binding:profile', + 'binding:vif_details', + 'binding:vif_type', + 'binding:vnic_type', + 'description', + 'device_id', + 'device_owner', + 'fields', + 'fixed_ips', + 'id', + 'ip_address', + 'mac_address', + 'name', + 'network_id', + 'status', + 'subnet_id', + 'project_id', + 'security_groups', + 'sort_key', + 'sort_dir', + # For backward compatibility include tenant_id as query param + tenant_id='project_id', is_admin_state_up='admin_state_up', is_port_security_enabled='port_security_enabled', - project_id='tenant_id', + security_group_ids='security_groups', + **_base.TagMixinNetwork._tag_query_parameters, ) # Properties - #: Allowed address pairs. + #: Allowed address pairs list. Dictionary key ``ip_address`` is required + #: and key ``mac_address`` is optional. allowed_address_pairs = resource.Body('allowed_address_pairs', type=list) #: The ID of the host where the port is allocated. In some cases, #: different implementations can run on different hosts. @@ -67,55 +89,81 @@ class Port(resource.Resource): binding_vnic_type = resource.Body('binding:vnic_type') #: Timestamp when the port was created. created_at = resource.Body('created_at') + #: Underlying data plane status of this port. + data_plane_status = resource.Body('data_plane_status') #: The port description. description = resource.Body('description') #: Device ID of this port. device_id = resource.Body('device_id') #: Device owner of this port (e.g. ``network:dhcp``). device_owner = resource.Body('device_owner') + #: Device profile of this port, refers to Cyborg device-profiles: + # https://docs.openstack.org/api-ref/accelerator/v2/index.html# + # device-profiles. + device_profile = resource.Body('device_profile') #: DNS assignment for the port. dns_assignment = resource.Body('dns_assignment') + #: DNS domain assigned to the port. + dns_domain = resource.Body('dns_domain') #: DNS name for the port. dns_name = resource.Body('dns_name') #: Extra DHCP options. extra_dhcp_opts = resource.Body('extra_dhcp_opts', type=list) - #: IP addresses of an allowed address pair. - ip_address = resource.Body('ip_address') #: IP addresses for the port. Includes the IP address and subnet ID. fixed_ips = resource.Body('fixed_ips', type=list) + #: The type of hardware offload this port will request when attached to the + # network backend. + hardware_offload_type = resource.Body('hardware_offload_type') + #: Read-only. The ip_allocation indicates when ports use deferred, + # immediate or no IP allocation. + ip_allocation = resource.Body('ip_allocation') #: The administrative state of the port, which is up ``True`` or #: down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: The port security status, which is enabled ``True`` or disabled #: ``False``. *Type: bool* *Default: False* - is_port_security_enabled = resource.Body('port_security_enabled', - type=bool, default=False) + is_port_security_enabled = resource.Body( + 'port_security_enabled', type=bool, default=False + ) #: The MAC address of an allowed address pair. mac_address = resource.Body('mac_address') #: The port name. name = resource.Body('name') #: The ID of the attached network. network_id = resource.Body('network_id') + #: The NUMA affinity policy defined for this port. + numa_affinity_policy = resource.Body('numa_affinity_policy') #: The ID of the project who owns the network. Only administrative #: users can specify a project ID other than their own. - project_id = resource.Body('tenant_id') - #: The extra DHCP option name. - option_name = resource.Body('opt_name') - #: The extra DHCP option value. - option_value = resource.Body('opt_value') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: Whether to propagate uplink status of the port. *Type: bool* + propagate_uplink_status = resource.Body( + 'propagate_uplink_status', type=bool + ) + #: Read-only. The ID of the QoS policy attached to the network where the + # port is bound. + qos_network_policy_id = resource.Body('qos_network_policy_id') #: The ID of the QoS policy attached to the port. qos_policy_id = resource.Body('qos_policy_id') - #: Revision number of the port. *Type: int* - revision_number = resource.Body('revision_number', type=int) + #: Read-only. The port-resource-request exposes Placement resources + # (i.e.: minimum-bandwidth) and traits (i.e.: vnic-type, physnet) + # requested by a port to Nova and Placement. + resource_request = resource.Body('resource_request', type=dict) #: The IDs of any attached security groups. #: *Type: list of strs of the security group IDs* security_group_ids = resource.Body('security_groups', type=list) #: The port status. Value is ``ACTIVE`` or ``DOWN``. status = resource.Body('status') - #: The ID of the subnet. If you specify only a subnet UUID, OpenStack - #: networking allocates an available IP from that subnet to the port. - #: If you specify both a subnet ID and an IP address, OpenStack networking - #: tries to allocate the address to the port. - subnet_id = resource.Body('subnet_id') + #: Read-only. The trunk referring to this parent port and its subports. + #: Present for trunk parent ports if ``trunk-details`` extension is loaded. + #: *Type: dict with keys: trunk_id, sub_ports. + #: sub_ports is a list of dicts with keys: + #: port_id, segmentation_type, segmentation_id, mac_address* + trunk_details = resource.Body('trunk_details', type=dict) + #: Status of the trusted VIF setting, this value is added to the + #: binding:profile field and passed to services which needs, it, like Nova + trusted = resource.Body('trusted', type=bool) #: Timestamp when the port was last updated. updated_at = resource.Body('updated_at') diff --git a/openstack/network/v2/port_binding.py b/openstack/network/v2/port_binding.py new file mode 100644 index 0000000000..2b339fbf55 --- /dev/null +++ b/openstack/network/v2/port_binding.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class PortBinding(resource.Resource): + name_attribute = "bindings" + resource_name = "binding" + resource_key = 'binding' + resources_key = 'bindings' + base_path = '/ports/%(port_id)s/bindings' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = False + allow_commit = True + allow_delete = True + allow_list = True + + requires_id = False + + # Properties + #: The port ID of the binding + port_id = resource.URI('port_id') + #: The hostname of the system the agent is running on. + host = resource.Body('host') + #: A dictionary that enables the application running on + # the specific host to pass and receive vif port information + # specific to the networking back-end. + profile = resource.Body('profile', type=dict) + #: A dictionary which contains additional information on the + # port. The following fields are defined: port_filter and + # ovs_hybrid_plug, both are booleans. + vif_details = resource.Body('vif_details', type=dict) + #: The type of which mechanism is used for the port. + # Currently the following values are supported: ovs, bridge, + # macvtap, hw_veb, hostdev_physical, vhostuser, distributed and + # other. + vif_type = resource.Body('vif_type') + #: The type of vNIC which this port should be attached to. + # The valid values are normal, macvtap, direct, baremetal, + # direct-physical, virtio-forwarder, smart-nic and remote-managed. + vnic_type = resource.Body('vnic_type') + + def activate_port_binding(self, session, **attrs): + host = attrs['host'] + url = utils.urljoin( + '/ports', self.port_id, 'bindings', host, 'activate' + ) + resp = session.put(url, json={'binding': attrs}) + exceptions.raise_from_response(resp) + self._body.attributes.update(resp.json()) + return self + + def delete_port_binding(self, session, host): + url = utils.urljoin('/ports', self.port_id, 'bindings', host) + resp = session.delete(url) + exceptions.raise_from_response(resp) diff --git a/openstack/network/v2/port_forwarding.py b/openstack/network/v2/port_forwarding.py new file mode 100644 index 0000000000..1e651f76e0 --- /dev/null +++ b/openstack/network/v2/port_forwarding.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class PortForwarding(resource.Resource): + name_attribute = "floating_ip_port_forwarding" + resource_name = "port forwarding" + resource_key = 'port_forwarding' + resources_key = 'port_forwardings' + base_path = '/floatingips/%(floatingip_id)s/port_forwardings' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'internal_port_id', + 'external_port', + 'protocol', + 'sort_key', + 'sort_dir', + ) + + # Properties + #: The ID of Floating IP address + floatingip_id = resource.URI('floatingip_id') + #: The ID of internal port + internal_port_id = resource.Body('internal_port_id') + #: The internal IP address + internal_ip_address = resource.Body('internal_ip_address') + #: The internal TCP/UDP/other port number + internal_port = resource.Body('internal_port', type=int) + #: The external TCP/UDP/other port number + external_port = resource.Body('external_port', type=int) + #: The protocol + protocol = resource.Body('protocol') + #: The description + description = resource.Body('description') diff --git a/openstack/network/v2/qos_bandwidth_limit_rule.py b/openstack/network/v2/qos_bandwidth_limit_rule.py index 62f915648a..33e24b69d7 100644 --- a/openstack/network/v2/qos_bandwidth_limit_rule.py +++ b/openstack/network/v2/qos_bandwidth_limit_rule.py @@ -10,20 +10,20 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class QoSBandwidthLimitRule(resource.Resource): resource_key = 'bandwidth_limit_rule' resources_key = 'bandwidth_limit_rules' base_path = '/qos/policies/%(qos_policy_id)s/bandwidth_limit_rules' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True @@ -34,6 +34,5 @@ class QoSBandwidthLimitRule(resource.Resource): max_kbps = resource.Body('max_kbps') #: Maximum burst bandwidth in kbps. max_burst_kbps = resource.Body('max_burst_kbps') - # NOTE(ralonsoh): to be implemented in bug 1560961 #: Traffic direction from the tenant point of view ('egress', 'ingress'). - # direction = resource.prop('direction') + direction = resource.Body('direction') diff --git a/openstack/network/v2/qos_dscp_marking_rule.py b/openstack/network/v2/qos_dscp_marking_rule.py index c01a6e5483..ac89a1afb0 100644 --- a/openstack/network/v2/qos_dscp_marking_rule.py +++ b/openstack/network/v2/qos_dscp_marking_rule.py @@ -10,20 +10,20 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class QoSDSCPMarkingRule(resource.Resource): resource_key = 'dscp_marking_rule' resources_key = 'dscp_marking_rules' base_path = '/qos/policies/%(qos_policy_id)s/dscp_marking_rules' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True diff --git a/openstack/network/v2/qos_minimum_bandwidth_rule.py b/openstack/network/v2/qos_minimum_bandwidth_rule.py index 09577dc97b..bd22fd01aa 100644 --- a/openstack/network/v2/qos_minimum_bandwidth_rule.py +++ b/openstack/network/v2/qos_minimum_bandwidth_rule.py @@ -10,20 +10,20 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class QoSMinimumBandwidthRule(resource.Resource): resource_key = 'minimum_bandwidth_rule' resources_key = 'minimum_bandwidth_rules' base_path = '/qos/policies/%(qos_policy_id)s/minimum_bandwidth_rules' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True diff --git a/openstack/network/v2/qos_minimum_packet_rate_rule.py b/openstack/network/v2/qos_minimum_packet_rate_rule.py new file mode 100644 index 0000000000..4b727ad5d3 --- /dev/null +++ b/openstack/network/v2/qos_minimum_packet_rate_rule.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack import resource + + +class QoSMinimumPacketRateRule(resource.Resource): + resource_key = 'minimum_packet_rate_rule' + resources_key = 'minimum_packet_rate_rules' + base_path = '/qos/policies/%(qos_policy_id)s/minimum_packet_rate_rules' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Properties + #: Traffic direction from the tenant point of view. Valid values: ('any', + #: 'egress', 'ingress') + direction = resource.Body('direction') + #: Minimum packet rate in kpps. + min_kpps = resource.Body('min_kpps') + #: The ID of the QoS policy who owns rule. + qos_policy_id = resource.URI('qos_policy_id') diff --git a/openstack/network/v2/qos_packet_rate_limit_rule.py b/openstack/network/v2/qos_packet_rate_limit_rule.py new file mode 100644 index 0000000000..7bbad38ee2 --- /dev/null +++ b/openstack/network/v2/qos_packet_rate_limit_rule.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class QoSPacketRateLimitRule(resource.Resource): + resource_key = 'packet_rate_limit_rule' + resources_key = resource_key + 's' + base_path = '/qos/policies/%(qos_policy_id)s/' + resources_key + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Properties + #: The ID of the QoS policy who owns rule. + qos_policy_id = resource.URI('qos_policy_id') + #: Maximum packet rare in kpps. + max_kpps = resource.Body('max_kpps') + #: Maximum burst packet rate in kpps. + max_burst_kpps = resource.Body('max_burst_kpps') + #: Traffic direction from the tenant point of view ('egress', 'ingress', + # 'any'). + direction = resource.Body('direction') diff --git a/openstack/network/v2/qos_policy.py b/openstack/network/v2/qos_policy.py index 68a6295e45..bc96fdbb1c 100644 --- a/openstack/network/v2/qos_policy.py +++ b/openstack/network/v2/qos_policy.py @@ -9,28 +9,35 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. +from openstack.common import tag +from openstack import resource +from openstack import utils -from openstack.network import network_service -from openstack import resource2 as resource - -class QoSPolicy(resource.Resource): +class QoSPolicy(resource.Resource, tag.TagMixin): resource_key = 'policy' resources_key = 'policies' base_path = '/qos/policies' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'name', 'description', - project_id='tenant_id', - is_shared='shared' + 'name', + 'description', + 'id', + 'is_default', + 'project_id', + 'sort_key', + 'sort_dir', + is_shared='shared', + **tag.TagMixin._tag_query_parameters, ) # Properties @@ -38,11 +45,23 @@ class QoSPolicy(resource.Resource): name = resource.Body('name') #: The ID of the project who owns the network. Only administrative #: users can specify a project ID other than their own. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: The QoS policy description. description = resource.Body('description') + #: Indicates whether this QoS policy is the default policy for this + #: project. + #: *Type: bool* + is_default = resource.Body('is_default', type=bool) #: Indicates whether this QoS policy is shared across all projects. #: *Type: bool* is_shared = resource.Body('shared', type=bool) #: List of QoS rules applied to this QoS policy. rules = resource.Body('rules') + + def set_tags(self, session, tags): + url = utils.urljoin('/policies', self.id, 'tags') + session.put(url, json={'tags': tags}) + self._body.attributes.update({'tags': tags}) + return self diff --git a/openstack/network/v2/qos_rule_type.py b/openstack/network/v2/qos_rule_type.py index 2f33f59b8e..7aff99e7dc 100644 --- a/openstack/network/v2/qos_rule_type.py +++ b/openstack/network/v2/qos_rule_type.py @@ -10,25 +10,32 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class QoSRuleType(resource.Resource): resource_key = 'rule_type' resources_key = 'rule_types' base_path = '/qos/rule-types' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = False - allow_get = False - allow_update = False + allow_fetch = True + allow_commit = False allow_delete = False allow_list = True - _query_mapping = resource.QueryParameters('type') + _query_mapping = resource.QueryParameters( + 'type', + 'drivers', + 'all_rules', + 'all_supported', + ) # Properties #: QoS rule type name. - type = resource.Body('type') + type = resource.Body('type', alternate_id=True) + #: List of QoS backend drivers supporting this QoS rule type + drivers = resource.Body('drivers') diff --git a/openstack/network/v2/quota.py b/openstack/network/v2/quota.py index 8620fbaac3..e4a4c55cb3 100644 --- a/openstack/network/v2/quota.py +++ b/openstack/network/v2/quota.py @@ -10,23 +10,25 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class Quota(resource.Resource): resource_key = 'quota' resources_key = 'quotas' base_path = '/quotas' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True # Properties + #: Flag to check the quota usage before setting the new limit. *Type: bool* + check_limit = resource.Body('check_limit', type=bool) #: The maximum amount of floating IPs you can have. *Type: int* floating_ips = resource.Body('floatingip', type=int) #: The maximum amount of health monitors you can create. *Type: int* @@ -44,7 +46,7 @@ class Quota(resource.Resource): #: The maximum amount of ports you can create. *Type: int* ports = resource.Body('port', type=int) #: The ID of the project these quota values are for. - project_id = resource.Body('tenant_id') + project_id = resource.Body('tenant_id', alternate_id=True) #: The maximum amount of RBAC policies you can create. *Type: int* rbac_policies = resource.Body('rbac_policy', type=int) #: The maximum amount of routers you can create. *Type: int* @@ -58,16 +60,78 @@ class Quota(resource.Resource): #: The maximum amount of security groups you can create. *Type: int* security_groups = resource.Body('security_group', type=int) + def _prepare_request( + self, + requires_id=True, + prepend_key=False, + patch=False, + base_path=None, + *args, + **kwargs, + ): + _request = super()._prepare_request(requires_id, prepend_key) + if self.resource_key in _request.body: + _body = _request.body[self.resource_key] + else: + _body = _request.body + if 'id' in _body: + del _body['id'] + return _request + class QuotaDefault(Quota): base_path = '/quotas/%(project)s/default' # capabilities allow_retrieve = True - allow_update = False + allow_commit = False allow_delete = False allow_list = False # Properties #: The ID of the project. project = resource.URI('project') + + +class QuotaDetails(Quota): + base_path = '/quotas/%(project)s/details' + + # capabilities + allow_retrieve = True + allow_commit = False + allow_delete = False + allow_list = False + + # Properties + #: The ID of the project. + project = resource.URI('project') + #: The maximum amount of floating IPs you can have. *Type: dict* + floating_ips = resource.Body('floatingip', type=dict) + #: The maximum amount of health monitors you can create. *Type: dict* + health_monitors = resource.Body('healthmonitor', type=dict) + #: The maximum amount of listeners you can create. *Type: dict* + listeners = resource.Body('listener', type=dict) + #: The maximum amount of load balancers you can create. *Type: dict* + load_balancers = resource.Body('loadbalancer', type=dict) + #: The maximum amount of L7 policies you can create. *Type: dict* + l7_policies = resource.Body('l7policy', type=dict) + #: The maximum amount of networks you can create. *Type: dict* + networks = resource.Body('network', type=dict) + #: The maximum amount of pools you can create. *Type: dict* + pools = resource.Body('pool', type=dict) + #: The maximum amount of ports you can create. *Type: dict* + ports = resource.Body('port', type=dict) + #: The ID of the project these quota values are for. + project_id = resource.Body('project_id', alternate_id=True) + #: The maximum amount of RBAC policies you can create. *Type: dict* + rbac_policies = resource.Body('rbac_policy', type=dict) + #: The maximum amount of routers you can create. *Type: int* + routers = resource.Body('router', type=dict) + #: The maximum amount of subnets you can create. *Type: dict* + subnets = resource.Body('subnet', type=dict) + #: The maximum amount of subnet pools you can create. *Type: dict* + subnet_pools = resource.Body('subnetpool', type=dict) + #: The maximum amount of security group rules you can create. *Type: dict* + security_group_rules = resource.Body('security_group_rule', type=dict) + #: The maximum amount of security groups you can create. *Type: dict* + security_groups = resource.Body('security_group', type=dict) diff --git a/openstack/network/v2/rbac_policy.py b/openstack/network/v2/rbac_policy.py index 16c3022ae6..00ac276929 100644 --- a/openstack/network/v2/rbac_policy.py +++ b/openstack/network/v2/rbac_policy.py @@ -10,26 +10,30 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class RBACPolicy(resource.Resource): resource_key = 'rbac_policy' resources_key = 'rbac_policies' base_path = '/rbac-policies' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'action', 'object_id', 'object_type', 'project_id', + 'action', + 'object_id', + 'object_type', + 'project_id', 'target_project_id', + target_project_id='target_tenant', ) # Properties @@ -38,7 +42,9 @@ class RBACPolicy(resource.Resource): #: The ID of the project this RBAC will be enforced. target_project_id = resource.Body('target_tenant') #: The owner project ID. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: Type of the object that this RBAC policy affects. object_type = resource.Body('object_type') #: Action for the RBAC policy. diff --git a/openstack/network/v2/router.py b/openstack/network/v2/router.py index 5043ce13a3..9a9eb3d2aa 100644 --- a/openstack/network/v2/router.py +++ b/openstack/network/v2/router.py @@ -10,38 +10,46 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import exceptions +from openstack.network.v2 import _base +from openstack import resource from openstack import utils -class Router(resource.Resource): +class Router(_base.NetworkResource, _base.TagMixinNetwork): resource_key = 'router' resources_key = 'routers' base_path = '/routers' - service = network_service.NetworkService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True # NOTE: We don't support query on datetime, list or dict fields _query_mapping = resource.QueryParameters( - 'description', 'flavor_id', 'name', 'status', + 'description', + 'flavor_id', + 'id', + 'name', + 'status', + 'project_id', + 'sort_key', + 'sort_dir', is_admin_state_up='admin_state_up', is_distributed='distributed', is_ha='ha', - project_id='tenant_id', + **_base.TagMixinNetwork._tag_query_parameters, ) # Properties #: Availability zone hints to use when scheduling the router. #: *Type: list of availability zone names* - availability_zone_hints = resource.Body('availability_zone_hints', - type=list) + availability_zone_hints = resource.Body( + 'availability_zone_hints', type=list + ) #: Availability zones for the router. #: *Type: list of availability zone names* availability_zones = resource.Body('availability_zones', type=list) @@ -49,6 +57,8 @@ class Router(resource.Resource): created_at = resource.Body('created_at') #: The router description. description = resource.Body('description') + #: The ndp proxy state of the router + enable_ndp_proxy = resource.Body('enable_ndp_proxy', type=bool) #: The ``network_id``, for the external gateway. *Type: dict* external_gateway_info = resource.Body('external_gateway_info', type=dict) #: The ID of the flavor. @@ -57,15 +67,17 @@ class Router(resource.Resource): #: or down ``False``. *Type: bool* is_admin_state_up = resource.Body('admin_state_up', type=bool) #: The distributed state of the router, which is distributed ``True`` - #: or not ``False``. *Type: bool* *Default: False* - is_distributed = resource.Body('distributed', type=bool, default=False) + #: or not ``False``. *Type: bool* + is_distributed = resource.Body('distributed', type=bool) #: The highly-available state of the router, which is highly available - #: ``True`` or not ``False``. *Type: bool* *Default: False* - is_ha = resource.Body('ha', type=bool, default=False) + #: ``True`` or not ``False``. *Type: bool* + is_ha = resource.Body('ha', type=bool) #: The router name. name = resource.Body('name') #: The ID of the project this router is associated with. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: Revision number of the router. *Type: int* revision_number = resource.Body('revision', type=int) #: The extra routes configuration for the router. @@ -75,56 +87,158 @@ class Router(resource.Resource): #: Timestamp when the router was created. updated_at = resource.Body('updated_at') + def _put(self, session, url, body): + resp = session.put(url, json=body) + exceptions.raise_from_response(resp) + return resp + def add_interface(self, session, **body): """Add an internal interface to a logical router. :param session: The session to communicate through. - :type session: :class:`~openstack.session.Session` - :param dict body : The body requested to be updated on the outer + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param dict body: The body requested to be updated on the router :returns: The body of the response as a dictionary. + + :raises: :class:`~openstack.exceptions.SDKException` on error. """ url = utils.urljoin(self.base_path, self.id, 'add_router_interface') - resp = session.put(url, endpoint_filter=self.service, json=body) + resp = self._put(session, url, body) return resp.json() def remove_interface(self, session, **body): """Remove an internal interface from a logical router. :param session: The session to communicate through. - :type session: :class:`~openstack.session.Session` - :param dict body : The body requested to be updated on the outer + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param dict body: The body requested to be updated on the router :returns: The body of the response as a dictionary. + + :raises: :class:`~openstack.exceptions.SDKException` on error. """ url = utils.urljoin(self.base_path, self.id, 'remove_router_interface') - resp = session.put(url, endpoint_filter=self.service, json=body) + resp = self._put(session, url, body) return resp.json() + def add_extra_routes(self, session, body): + """Add extra routes to a logical router. + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param dict body: The request body as documented in the api-ref. + + :returns: The response as a Router object with the added extra routes. + + :raises: :class:`~openstack.exceptions.SDKException` on error. + """ + url = utils.urljoin(self.base_path, self.id, 'add_extraroutes') + resp = self._put(session, url, body) + self._translate_response(resp) + return self + + def remove_extra_routes(self, session, body): + """Remove extra routes from a logical router. + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param dict body: The request body as documented in the api-ref. + + :returns: The response as a Router object with the extra routes left. + + :raises: :class:`~openstack.exceptions.SDKException` on error. + """ + url = utils.urljoin(self.base_path, self.id, 'remove_extraroutes') + resp = self._put(session, url, body) + self._translate_response(resp) + return self + def add_gateway(self, session, **body): """Add an external gateway to a logical router. :param session: The session to communicate through. - :type session: :class:`~openstack.session.Session` - :param dict body : The body requested to be updated on the outer + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param dict body: The body requested to be updated on the router :returns: The body of the response as a dictionary. """ - url = utils.urljoin(self.base_path, self.id, - 'add_gateway_router') - resp = session.put(url, endpoint_filter=self.service, json=body) + url = utils.urljoin(self.base_path, self.id, 'add_gateway_router') + resp = session.put(url, json=body) return resp.json() def remove_gateway(self, session, **body): """Remove an external gateway from a logical router. :param session: The session to communicate through. - :type session: :class:`~openstack.session.Session` - :param dict body : The body requested to be updated on the outer + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param dict body: The body requested to be updated on the router :returns: The body of the response as a dictionary. """ - url = utils.urljoin(self.base_path, self.id, - 'remove_gateway_router') - resp = session.put(url, endpoint_filter=self.service, json=body) + url = utils.urljoin(self.base_path, self.id, 'remove_gateway_router') + resp = session.put(url, json=body) return resp.json() + + def add_external_gateways(self, session, body): + """Add external gateways to a router. + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param dict body: The body requested to be updated on the router + + :returns: The body of the response as a dictionary. + """ + url = utils.urljoin(self.base_path, self.id, 'add_external_gateways') + resp = session.put(url, json=body) + self._translate_response(resp) + return self + + def update_external_gateways(self, session, body): + """Update external gateways of a router. + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param dict body: The body requested to be updated on the router + + :returns: The body of the response as a dictionary. + """ + url = utils.urljoin( + self.base_path, self.id, 'update_external_gateways' + ) + resp = session.put(url, json=body) + self._translate_response(resp) + return self + + def remove_external_gateways(self, session, body): + """Remove external gateways from a router. + + :param session: The session to communicate through. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param dict body: The body requested to be updated on the router + + :returns: The body of the response as a dictionary. + """ + url = utils.urljoin( + self.base_path, self.id, 'remove_external_gateways' + ) + resp = session.put(url, json=body) + self._translate_response(resp) + return self + + +class L3AgentRouter(Router): + resource_key = 'router' + resources_key = 'routers' + base_path = '/agents/%(agent_id)s/l3-routers' + resource_name = 'l3-router' + + # capabilities + allow_create = False + allow_retrieve = True + allow_commit = False + allow_delete = False + allow_list = True + + +# NOTE: No query parameter is supported diff --git a/openstack/network/v2/security_group.py b/openstack/network/v2/security_group.py index 2b7e0ff5f4..268b9f3dcc 100644 --- a/openstack/network/v2/security_group.py +++ b/openstack/network/v2/security_group.py @@ -10,26 +10,35 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack.network.v2 import _base +from openstack import resource -class SecurityGroup(resource.Resource): +class SecurityGroup(_base.NetworkResource, _base.TagMixinNetwork): resource_key = 'security_group' resources_key = 'security_groups' base_path = '/security-groups' - service = network_service.NetworkService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'description', 'name', - project_id='tenant_id', + 'description', + 'fields', + 'id', + 'name', + 'stateful', + 'project_id', + 'tenant_id', + 'revision_number', + 'sort_dir', + 'sort_key', + is_shared='shared', + **_base.TagMixinNetwork._tag_query_parameters, ) # Properties @@ -39,13 +48,18 @@ class SecurityGroup(resource.Resource): description = resource.Body('description') #: The security group name. name = resource.Body('name') + #: Whether the security group is stateful or not. + stateful = resource.Body('stateful') #: The ID of the project this security group is associated with. - project_id = resource.Body('tenant_id') - #: Revision number of the security group. *Type: int* - revision_number = resource.Body('revision_number', type=int) + project_id = resource.Body('project_id') #: A list of #: :class:`~openstack.network.v2.security_group_rule.SecurityGroupRule` #: objects. *Type: list* security_group_rules = resource.Body('security_group_rules', type=list) + #: The ID of the project this security group is associated with. + tenant_id = resource.Body('tenant_id', deprecated=True) #: Timestamp when the security group was last updated. updated_at = resource.Body('updated_at') + #: Indicates whether this Security Group is shared across all projects. + #: *Type: bool* + is_shared = resource.Body('shared', type=bool) diff --git a/openstack/network/v2/security_group_rule.py b/openstack/network/v2/security_group_rule.py index 6ea5e84a57..ba65c737c3 100644 --- a/openstack/network/v2/security_group_rule.py +++ b/openstack/network/v2/security_group_rule.py @@ -10,29 +10,40 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack.network.v2 import _base +from openstack import resource -class SecurityGroupRule(resource.Resource): +class SecurityGroupRule(_base.NetworkResource, _base.TagMixinNetwork): resource_key = 'security_group_rule' resources_key = 'security_group_rules' base_path = '/security-group-rules' - service = network_service.NetworkService() # capabilities allow_create = True - allow_get = True - allow_update = False + allow_fetch = True + allow_commit = False allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'description', 'direction', 'protocol', - 'remote_group_id', 'security_group_id', + 'description', + 'direction', + 'id', + 'protocol', + 'remote_group_id', + 'security_group_id', + 'remote_address_group_id', + 'port_range_max', + 'port_range_min', + 'remote_ip_prefix', + 'revision_number', + 'project_id', + 'tenant_id', + 'sort_dir', + 'sort_key', ether_type='ethertype', - project_id='tenant_id', - + **_base.TagMixinNetwork._tag_query_parameters, ) # Properties @@ -59,22 +70,38 @@ class SecurityGroupRule(resource.Resource): #: attribute. If the protocol is ICMP, this value must be an ICMP type. port_range_min = resource.Body('port_range_min', type=int) #: The ID of the project this security group rule is associated with. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id') #: The protocol that is matched by the security group rule. #: Valid values are ``null``, ``tcp``, ``udp``, and ``icmp``. protocol = resource.Body('protocol') #: The remote security group ID to be associated with this security #: group rule. You can specify either ``remote_group_id`` or - #: ``remote_ip_prefix`` in the request body. + #: ``remote_address_group_id`` or ``remote_ip_prefix``. remote_group_id = resource.Body('remote_group_id') + #: The remote address group ID to be associated with this security + #: group rule. You can specify either ``remote_group_id`` or + #: ``remote_address_group_id`` or ``remote_ip_prefix``. + remote_address_group_id = resource.Body('remote_address_group_id') #: The remote IP prefix to be associated with this security group rule. - #: You can specify either ``remote_group_id`` or ``remote_ip_prefix`` - #: in the request body. This attribute matches the specified IP prefix - #: as the source IP address of the IP packet. + #: You can specify either ``remote_group_id`` or + #: ``remote_address_group_id`` or ``remote_ip_prefix``. + #: This attribute matches the specified IP prefix as the source or + #: destination IP address of the IP packet depending on direction. remote_ip_prefix = resource.Body('remote_ip_prefix') - #: Revision number of the security group rule. *Type: int* - revision_number = resource.Body('revision_number', type=int) #: The security group ID to associate with this security group rule. security_group_id = resource.Body('security_group_id') + #: The ID of the project this security group rule is associated with. + tenant_id = resource.Body('tenant_id', deprecated=True) #: Timestamp when the security group rule was last updated. updated_at = resource.Body('updated_at') + + def _prepare_request(self, *args, **kwargs): + _request = super()._prepare_request(*args, **kwargs) + # Old versions of Neutron do not handle being passed a + # remote_address_group_id and raise and error. Remove it from + # the body if it is blank. + if not self.remote_address_group_id: + if 'security_group_rule' in _request.body: + _rule = _request.body['security_group_rule'] + _rule.pop('remote_address_group_id', None) + return _request diff --git a/openstack/network/v2/segment.py b/openstack/network/v2/segment.py index 8d211ecbf4..f0c1f09d45 100644 --- a/openstack/network/v2/segment.py +++ b/openstack/network/v2/segment.py @@ -10,26 +10,32 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class Segment(resource.Resource): resource_key = 'segment' resources_key = 'segments' base_path = '/segments' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'description', 'name', 'network_id', 'network_type', - 'physical_network', 'segmentation_id', + 'description', + 'name', + 'network_id', + 'network_type', + 'physical_network', + 'segmentation_id', + 'sort_key', + 'sort_dir', ) # Properties diff --git a/openstack/network/v2/service_profile.py b/openstack/network/v2/service_profile.py index 0af0a340f9..b6502164f7 100644 --- a/openstack/network/v2/service_profile.py +++ b/openstack/network/v2/service_profile.py @@ -10,27 +10,28 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class ServiceProfile(resource.Resource): resource_key = 'service_profile' resources_key = 'service_profiles' base_path = '/service_profiles' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'description', 'driver', + 'description', + 'driver', + 'project_id', is_enabled='enabled', - project_id='tenant_id' ) # Properties #: Description of the service flavor profile. @@ -42,4 +43,6 @@ class ServiceProfile(resource.Resource): #: Metainformation of the service flavor profile meta_info = resource.Body('metainfo') #: The owner project ID - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) diff --git a/openstack/network/v2/service_provider.py b/openstack/network/v2/service_provider.py index 0f66803036..ef2e4b0049 100644 --- a/openstack/network/v2/service_provider.py +++ b/openstack/network/v2/service_provider.py @@ -10,25 +10,26 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource class ServiceProvider(resource.Resource): resources_key = 'service_providers' base_path = '/service-providers' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # Capabilities allow_create = False - allow_get = False - allow_update = False + allow_fetch = False + allow_commit = False allow_delete = False allow_list = True _query_mapping = resource.QueryParameters( - 'service_type', 'name', - is_default='default' + 'service_type', + 'name', + is_default='default', ) # Properties diff --git a/openstack/network/v2/sfc_flow_classifier.py b/openstack/network/v2/sfc_flow_classifier.py new file mode 100644 index 0000000000..527a372202 --- /dev/null +++ b/openstack/network/v2/sfc_flow_classifier.py @@ -0,0 +1,88 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class SfcFlowClassifier(resource.Resource): + resource_key = 'flow_classifier' + resources_key = 'flow_classifiers' + base_path = '/sfc/flow_classifiers' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'description', + 'name', + 'project_id', + 'tenant_id', + 'ethertype', + 'protocol', + 'source_port_range_min', + 'source_port_range_max', + 'destination_port_range_min', + 'destination_port_range_max', + 'logical_source_port', + 'logical_destination_port', + ) + + # Properties + #: Human-readable description for the resource. + description = resource.Body('description') + #: Human-readable name of the resource. Default is an empty string. + name = resource.Body('name') + #: Must be IPv4 or IPv6, and addresses represented in CIDR must match + # the ingress or egress rules. + ethertype = resource.Body('ethertype') + #: The IP protocol can be represented by a string, an integer, or null. + #: Valid values: any (0), ah (51), dccp (33), egp (8), esp (50), gre (47), + #: icmp (1), icmpv6 (58), igmp (2), ipip (4), ipv6-encap (41), + #: ipv6-frag (44), ipv6-icmp (58), ipv6-nonxt (59), ipv6-opts (60), + #: ipv6-route (43), ospf (89), pgm (113), rsvp (46), sctp (132), tcp (6), + #: udp (17), udplite (136), vrrp (112). + protocol = resource.Body('protocol') + #: Minimum source protocol port. + source_port_range_min = resource.Body('source_port_range_min', type=int) + #: Maximum source protocol port. + source_port_range_max = resource.Body('source_port_range_max', type=int) + #: Minimum destination protocol port. + destination_port_range_min = resource.Body( + 'destination_port_range_min', type=int + ) + #: Maximum destination protocol port. + destination_port_range_max = resource.Body( + 'destination_port_range_max', type=int + ) + #: The source IP prefix. + source_ip_prefix = resource.Body('source_ip_prefix') + #: The destination IP prefix. + destination_ip_prefix = resource.Body('destination_ip_prefix') + #: The UUID of the source logical port. + logical_source_port = resource.Body('logical_source_port') + #: The UUID of the destination logical port. + logical_destination_port = resource.Body('logical_destination_port') + #: A dictionary of L7 parameters, in the form of + #: logical_source_network: uuid, logical_destination_network: uuid. + l7_parameters = resource.Body('l7_parameters', type=dict) + #: Summary field of a Flow Classifier, composed of the + #: protocol, source protcol port, destination ptocolo port, + #: logical_source_port, logical_destination_port and + #: l7_parameters + summary = resource.Computed('summary', default='') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) diff --git a/openstack/network/v2/sfc_port_chain.py b/openstack/network/v2/sfc_port_chain.py new file mode 100644 index 0000000000..bf89148d23 --- /dev/null +++ b/openstack/network/v2/sfc_port_chain.py @@ -0,0 +1,49 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class SfcPortChain(resource.Resource): + resource_key = 'port_chain' + resources_key = 'port_chains' + base_path = '/sfc/port_chains' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'description', + 'name', + 'project_id', + 'tenant_id', + ) + + # Properties + #: Human-readable description for the resource. + description = resource.Body('description') + #: Human-readable name of the resource. Default is an empty string. + name = resource.Body('name') + #: List of port-pair-group UUIDs. + port_pair_groups = resource.Body('port_pair_groups', type=list) + #: List of flow-classifier UUIDs. + flow_classifiers = resource.Body('flow_classifiers', type=list) + #: A dictionary of chain parameters, correlation values can be + #: mpls and nsh, symmetric can be True or False. + chain_parameters = resource.Body('chain_parameters', type=dict) + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) diff --git a/openstack/network/v2/sfc_port_pair.py b/openstack/network/v2/sfc_port_pair.py new file mode 100644 index 0000000000..bbe4acceee --- /dev/null +++ b/openstack/network/v2/sfc_port_pair.py @@ -0,0 +1,53 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class SfcPortPair(resource.Resource): + resource_key = 'port_pair' + resources_key = 'port_pairs' + base_path = '/sfc/port_pairs' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'description', + 'name', + 'egress', + 'ingress', + 'project_id', + 'tenant_id', + ) + + # Properties + #: Human-readable description for the resource. + description = resource.Body('description') + #: Human-readable name of the resource. Default is an empty string. + name = resource.Body('name') + #: The UUID of the ingress Neutron port. + ingress = resource.Body('ingress') + #: The UUID of the egress Neutron port. + egress = resource.Body('egress') + #: A dictionary of service function parameters, correlation values can be + #: mpls and nsh, weight which can be an int. + service_function_parameters = resource.Body( + 'service_function_parameters', type=dict + ) + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) diff --git a/openstack/network/v2/sfc_port_pair_group.py b/openstack/network/v2/sfc_port_pair_group.py new file mode 100644 index 0000000000..f53c2afbc3 --- /dev/null +++ b/openstack/network/v2/sfc_port_pair_group.py @@ -0,0 +1,57 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class SfcPortPairGroup(resource.Resource): + resource_key = 'port_pair_group' + resources_key = 'port_pair_groups' + base_path = '/sfc/port_pair_groups' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'description', + 'name', + 'project_id', + 'tenant_id', + ) + + # Properties + #: Human-readable description for the resource. + description = resource.Body('description') + #: Human-readable name of the resource. Default is an empty string. + name = resource.Body('name') + #: List of port-pair UUIDs. + port_pairs = resource.Body('port_pairs', type=list) + #: Dictionary of port pair group parameters, in the form of + #: lb_fields: list of regex (eth|ip|tcp|udp)_(src|dst)), + #: ppg_n_tuple_mapping: ingress_n_tuple or egress_n_tuple. + #: The ingress or egress tuple is a dict with the following keys: + #: source_ip_prefix, destination_ip_prefix, source_port_range_min, + #: source_port_range_max, destination_port_range_min, + #: destination_port_range_max. + port_pair_group_parameters = resource.Body( + 'port_pair_group_parameters', type=dict + ) + #: True if passive Tap service functions support is enabled, + #: default is False. + is_tap_enabled = resource.Body('tap_enabled', type=bool) + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) diff --git a/openstack/network/v2/sfc_service_graph.py b/openstack/network/v2/sfc_service_graph.py new file mode 100644 index 0000000000..fe76db5bb1 --- /dev/null +++ b/openstack/network/v2/sfc_service_graph.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class SfcServiceGraph(resource.Resource): + resource_key = 'service_graph' + resources_key = 'service_graphs' + base_path = '/sfc/service_graphs' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'description', + 'name', + 'project_id', + 'tenant_id', + ) + + # Properties + #: Human-readable description for the resource. + description = resource.Body('description') + #: Human-readable name of the resource. Default is an empty string. + name = resource.Body('name') + #: A dictionary where the key is the source port chain and the + #: value is a list of destination port chains. + port_chains = resource.Body('port_chains') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) diff --git a/openstack/network/v2/subnet.py b/openstack/network/v2/subnet.py index e52229dcc1..6e67202512 100644 --- a/openstack/network/v2/subnet.py +++ b/openstack/network/v2/subnet.py @@ -10,31 +10,42 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack.network.v2 import _base +from openstack import resource -class Subnet(resource.Resource): +class Subnet(_base.NetworkResource, _base.TagMixinNetwork): resource_key = 'subnet' resources_key = 'subnets' base_path = '/subnets' - service = network_service.NetworkService() # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True # NOTE: Query on list or datetime fields are currently not supported. _query_mapping = resource.QueryParameters( - 'cidr', 'description', 'gateway_ip', 'ip_version', - 'ipv6_address_mode', 'ipv6_ra_mode', 'name', 'network_id', + 'cidr', + 'description', + 'gateway_ip', + 'id', + 'ip_version', + 'ipv6_address_mode', + 'ipv6_ra_mode', + 'name', + 'network_id', 'segment_id', + 'dns_publish_fixed_ip', + 'project_id', + 'sort_key', + 'sort_dir', is_dhcp_enabled='enable_dhcp', - project_id='tenant_id', subnet_pool_id='subnetpool_id', + use_default_subnet_pool='use_default_subnetpool', + **_base.TagMixinNetwork._tag_query_parameters, ) # Properties @@ -49,6 +60,8 @@ class Subnet(resource.Resource): description = resource.Body('description') #: A list of DNS nameservers. dns_nameservers = resource.Body('dns_nameservers', type=list) + #: Whether to publish DNS records for fixed IPs + dns_publish_fixed_ip = resource.Body('dns_publish_fixed_ip', type=bool) #: The gateway IP address. gateway_ip = resource.Body('gateway_ip') #: A list of host routes. @@ -57,7 +70,7 @@ class Subnet(resource.Resource): #: *Type: int* ip_version = resource.Body('ip_version', type=int) #: The IPv6 address modes which are 'dhcpv6-stateful', 'dhcpv6-stateless' - #: or 'slacc'. + #: or 'slaac'. ipv6_address_mode = resource.Body('ipv6_address_mode') #: The IPv6 router advertisements modes which can be 'slaac', #: 'dhcpv6-stateful', 'dhcpv6-stateless'. @@ -69,10 +82,12 @@ class Subnet(resource.Resource): name = resource.Body('name') #: The ID of the attached network. network_id = resource.Body('network_id') + #: The prefix length to use for subnet allocation from a subnet pool + prefix_length = resource.Body('prefixlen') #: The ID of the project this subnet is associated with. - project_id = resource.Body('tenant_id') - #: Revision number of the subnet. *Type: int* - revision_number = resource.Body('revision_number', type=int) + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: The ID of the segment this subnet is associated with. segment_id = resource.Body('segment_id') #: Service types for this subnet @@ -81,3 +96,7 @@ class Subnet(resource.Resource): subnet_pool_id = resource.Body('subnetpool_id') #: Timestamp when the subnet was last updated. updated_at = resource.Body('updated_at') + #: Whether to use the default subnet pool to obtain a CIDR. + use_default_subnet_pool = resource.Body( + 'use_default_subnetpool', type=bool + ) diff --git a/openstack/network/v2/subnet_pool.py b/openstack/network/v2/subnet_pool.py index 22b9f60a60..f872480468 100644 --- a/openstack/network/v2/subnet_pool.py +++ b/openstack/network/v2/subnet_pool.py @@ -10,28 +10,35 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack.network.v2 import _base +from openstack import resource -class SubnetPool(resource.Resource): +class SubnetPool(resource.Resource, _base.TagMixinNetwork): resource_key = 'subnetpool' resources_key = 'subnetpools' base_path = '/subnetpools' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True _query_mapping = resource.QueryParameters( - 'address_scope_id', 'description', 'ip_version', 'is_default', + 'address_scope_id', + 'description', + 'ip_version', + 'is_default', 'name', + 'project_id', + 'sort_key', + 'sort_dir', is_shared='shared', - project_id='tenant_id', + **_base.TagMixinNetwork._tag_query_parameters, ) # Properties @@ -68,7 +75,9 @@ class SubnetPool(resource.Resource): #: The subnet pool name. name = resource.Body('name') #: The ID of the project that owns the subnet pool. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: A list of subnet prefixes that are assigned to the subnet pool. #: The adjacent prefixes are merged and treated as a single prefix. #: *Type: list* diff --git a/openstack/network/v2/tap_flow.py b/openstack/network/v2/tap_flow.py new file mode 100644 index 0000000000..e892d69fad --- /dev/null +++ b/openstack/network/v2/tap_flow.py @@ -0,0 +1,57 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class TapFlow(resource.Resource): + """Tap Flow""" + + resource_key = 'tap_flow' + resources_key = 'tap_flows' + base_path = '/taas/tap_flows' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _allow_unknown_attrs_in_body = True + + _query_mapping = resource.QueryParameters( + "sort_key", + "sort_dir", + 'name', + 'project_id', + ) + + # Properties + #: The ID of the tap flow. + id = resource.Body('id') + #: The tap flow's name. + name = resource.Body('name') + #: The tap flow's description. + description = resource.Body('description') + #: The ID of the project that owns the tap flow. + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: The id of the tap_service with which the tap flow is associated + tap_service_id = resource.Body('tap_service_id') + #: The direction of the tap flow. + direction = resource.Body('direction') + #: The status for the tap flow. + status = resource.Body('status') + #: The id of the port the tap flow is associated with + source_port = resource.Body('source_port') diff --git a/openstack/network/v2/tap_mirror.py b/openstack/network/v2/tap_mirror.py new file mode 100644 index 0000000000..562c91039a --- /dev/null +++ b/openstack/network/v2/tap_mirror.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class TapMirror(resource.Resource): + """Tap Mirror""" + + resource_key = 'tap_mirror' + resources_key = 'tap_mirrors' + base_path = '/taas/tap_mirrors' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _allow_unknown_attrs_in_body = True + + _query_mapping = resource.QueryParameters( + "sort_key", "sort_dir", 'name', 'project_id' + ) + + # Properties + #: The ID of the Tap Mirror. + id = resource.Body('id') + #: The Tap Mirror name. + name = resource.Body('name') + #: The Tap Mirror description. + description = resource.Body('description') + #: The ID of the project that owns the Tap Mirror. + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: The id of the port the Tap Mirror is associated with + port_id = resource.Body('port_id') + #: The status for the tap service. + directions = resource.Body('directions') + #: The destination IP address of the Tap Mirror + remote_ip = resource.Body('remote_ip') + #: The type of the Tap Mirror, it can be gre or erspanv1 + mirror_type = resource.Body('mirror_type') diff --git a/openstack/network/v2/tap_service.py b/openstack/network/v2/tap_service.py new file mode 100644 index 0000000000..ecb2825fbc --- /dev/null +++ b/openstack/network/v2/tap_service.py @@ -0,0 +1,50 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class TapService(resource.Resource): + """Tap Service""" + + resource_key = 'tap_service' + resources_key = 'tap_services' + base_path = '/taas/tap_services' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _allow_unknown_attrs_in_body = True + + _query_mapping = resource.QueryParameters( + "sort_key", "sort_dir", 'name', 'project_id' + ) + + # Properties + #: The ID of the tap service. + id = resource.Body('id') + #: The tap service name. + name = resource.Body('name') + #: The tap service description. + description = resource.Body('description') + #: The ID of the project that owns the tap service. + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: The id of the port the tap service is associated with + port_id = resource.Body('port_id') + #: The status for the tap service. + status = resource.Body('status') diff --git a/openstack/network/v2/trunk.py b/openstack/network/v2/trunk.py new file mode 100644 index 0000000000..c67e5686fa --- /dev/null +++ b/openstack/network/v2/trunk.py @@ -0,0 +1,83 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.common import tag +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Trunk(resource.Resource, tag.TagMixin): + resource_key = 'trunk' + resources_key = 'trunks' + base_path = '/trunks' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'name', + 'description', + 'port_id', + 'status', + 'sub_ports', + 'project_id', + is_admin_state_up='admin_state_up', + **tag.TagMixin._tag_query_parameters, + ) + + # Properties + #: Trunk name. + name = resource.Body('name') + #: The ID of the project who owns the trunk. Only administrative + #: users can specify a project ID other than their own. + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: The trunk description. + description = resource.Body('description') + #: The administrative state of the port, which is up ``True`` or + #: down ``False``. *Type: bool* + is_admin_state_up = resource.Body('admin_state_up', type=bool) + #: The ID of the trunk's parent port + port_id = resource.Body('port_id') + #: The status for the trunk. Possible values are ACTIVE, DOWN, BUILD, + #: DEGRADED, and ERROR. + status = resource.Body('status') + #: A list of ports associated with the trunk. + sub_ports = resource.Body('sub_ports', type=list) + + def add_subports(self, session, subports): + url = utils.urljoin('/trunks', self.id, 'add_subports') + resp = session.put(url, json={'sub_ports': subports}) + exceptions.raise_from_response(resp) + self._body.attributes.update(resp.json()) + return self + + def delete_subports(self, session, subports): + url = utils.urljoin('/trunks', self.id, 'remove_subports') + resp = session.put(url, json={'sub_ports': subports}) + exceptions.raise_from_response(resp) + self._body.attributes.update(resp.json()) + return self + + def get_subports(self, session): + url = utils.urljoin('/trunks', self.id, 'get_subports') + resp = session.get(url) + exceptions.raise_from_response(resp) + self._body.attributes.update(resp.json()) + return resp.json() diff --git a/openstack/network/v2/vpn_endpoint_group.py b/openstack/network/v2/vpn_endpoint_group.py new file mode 100644 index 0000000000..063f9e301e --- /dev/null +++ b/openstack/network/v2/vpn_endpoint_group.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class VpnEndpointGroup(resource.Resource): + resource_key = 'endpoint_group' + resources_key = 'endpoint_groups' + base_path = '/vpn/endpoint-groups' + + _allow_unknown_attrs_in_body = True + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'description', + 'name', + 'project_id', + 'tenant_id', + type='endpoint_type', + ) + + # Properties + #: Human-readable description for the resource. + description = resource.Body('description') + #: List of endpoints of the same type, for the endpoint group. + #: The values will depend on type. + endpoints = resource.Body('endpoints', type=list) + #: Human-readable name of the resource. Default is an empty string. + name = resource.Body('name') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) + #: The type of the endpoints in the group. A valid value is subnet, cidr, + #: network, router, or vlan. Only subnet and cidr are supported at this + #: moment. + type = resource.Body('type') diff --git a/openstack/network/v2/vpn_ike_policy.py b/openstack/network/v2/vpn_ike_policy.py new file mode 100644 index 0000000000..db1ff6f7ff --- /dev/null +++ b/openstack/network/v2/vpn_ike_policy.py @@ -0,0 +1,76 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class VpnIkePolicy(resource.Resource): + """VPN IKE policy extension.""" + + resource_key = 'ikepolicy' + resources_key = 'ikepolicies' + base_path = '/vpn/ikepolicies' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'auth_algorithm', + 'description', + 'encryption_algorithm', + 'ike_version', + 'name', + 'pfs', + 'project_id', + 'phase1_negotiation_mode', + ) + + # Properties + #: The authentication hash algorithm. Valid values are sha1, + # sha256, sha384, sha512. The default is sha1. + auth_algorithm = resource.Body('auth_algorithm') + #: A human-readable description for the resource. + # Default is an empty string. + description = resource.Body('description') + #: The encryption algorithm. A valid value is 3des, aes-128, + # aes-192, aes-256, and so on. Default is aes-128. + encryption_algorithm = resource.Body('encryption_algorithm') + #: The IKE version. A valid value is v1 or v2. Default is v1. + ike_version = resource.Body('ike_version') + #: The lifetime of the security association. The lifetime consists + # of a unit and integer value. You can omit either the unit or value + # portion of the lifetime. Default unit is seconds and + # default value is 3600. + lifetime = resource.Body('lifetime', type=dict) + #: Human-readable name of the resource. Default is an empty string. + name = resource.Body('name') + #: Perfect forward secrecy (PFS). A valid value is Group2, + # Group5, Group14, and so on. Default is Group5. + pfs = resource.Body('pfs') + #: The ID of the project. + project_id = resource.Body('project_id') + #: The IKE mode. A valid value is main, which is the default. + phase1_negotiation_mode = resource.Body('phase1_negotiation_mode') + #: The units for the lifetime of the security association. + # The lifetime consists of a unit and integer value. + # You can omit either the unit or value portion of the lifetime. + # Default unit is seconds and default value is 3600. + units = resource.Body('units') + #: The lifetime value, as a positive integer. The lifetime + # consists of a unit and integer value. + # You can omit either the unit or value portion of the lifetime. + # Default unit is seconds and default value is 3600. + value = resource.Body('value', type=int) diff --git a/openstack/network/v2/vpn_ipsec_policy.py b/openstack/network/v2/vpn_ipsec_policy.py new file mode 100644 index 0000000000..bc171e267c --- /dev/null +++ b/openstack/network/v2/vpn_ipsec_policy.py @@ -0,0 +1,77 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class VpnIpsecPolicy(resource.Resource): + resource_key = 'ipsecpolicy' + resources_key = 'ipsecpolicies' + base_path = '/vpn/ipsecpolicies' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'auth_algorithm', + 'description', + 'encapsulation_mode', + 'encryption_algorithm', + 'name', + 'pfs', + 'project_id', + 'phase1_negotiation_mode', + 'transform_protocol', + ) + + # Properties + #: The authentication hash algorithm. Valid values are sha1, + # sha256, sha384, sha512. The default is sha1. + auth_algorithm = resource.Body('auth_algorithm') + #: A human-readable description for the resource. + # Default is an empty string. + description = resource.Body('description') + #: The encapsulation mode. A valid value is tunnel or transport + encapsulation_mode = resource.Body('encapsulation_mode') + #: The encryption algorithm. A valid value is 3des, aes-128, + # aes-192, aes-256, and so on. Default is aes-128. + encryption_algorithm = resource.Body('encryption_algorithm') + #: The lifetime of the security association. The lifetime consists + # of a unit and integer value. You can omit either the unit or value + # portion of the lifetime. Default unit is seconds and + # default value is 3600. + lifetime = resource.Body('lifetime', type=dict) + #: Human-readable name of the resource. Default is an empty string. + name = resource.Body('name') + #: Perfect forward secrecy (PFS). A valid value is Group2, + # Group5, Group14, and so on. Default is Group5. + pfs = resource.Body('pfs') + #: The ID of the project. + project_id = resource.Body('project_id') + #: The IKE mode. A valid value is main, which is the default. + phase1_negotiation_mode = resource.Body('phase1_negotiation_mode') + #: The transform protocol. A valid value is ESP, AH, or AH- ESP. + transform_protocol = resource.Body('transform_protocol') + #: The units for the lifetime of the security association. + # The lifetime consists of a unit and integer value. + # You can omit either the unit or value portion of the lifetime. + # Default unit is seconds and default value is 3600. + units = resource.Body('units') + #: The lifetime value, as a positive integer. The lifetime + # consists of a unit and integer value. + # You can omit either the unit or value portion of the lifetime. + # Default unit is seconds and default value is 3600. + value = resource.Body('value', type=int) diff --git a/openstack/network/v2/vpn_ipsec_site_connection.py b/openstack/network/v2/vpn_ipsec_site_connection.py new file mode 100644 index 0000000000..ed2bc31e8c --- /dev/null +++ b/openstack/network/v2/vpn_ipsec_site_connection.py @@ -0,0 +1,128 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class VpnIPSecSiteConnection(resource.Resource): + resource_key = 'ipsec_site_connection' + resources_key = 'ipsec_site_connections' + base_path = '/vpn/ipsec-site-connections' + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + _query_mapping = resource.QueryParameters( + 'auth_mode', + 'description', + 'ikepolicy_id', + 'ipsecpolicy_id', + 'initiator', + 'local_ep_group_id', + 'peer_address', + 'local_id', + 'mtu', + 'name', + 'peer_id', + 'project_id', + 'psk', + 'peer_ep_group_id', + 'route_mode', + 'vpnservice_id', + 'status', + is_admin_state_up='admin_state_up', + ) + + # Properties + #: The dead peer detection (DPD) action. + # A valid value is clear, hold, restart, + # disabled, or restart-by-peer. Default value is hold. + action = resource.Body('action') + #: The authentication mode. A valid value + # is psk, which is the default. + auth_mode = resource.Body('auth_mode') + #: A human-readable description for the resource. + # Default is an empty string. + description = resource.Body('description') + #: A dictionary with dead peer detection (DPD) protocol controls. + dpd = resource.Body('dpd', type=dict) + #: The administrative state of the resource, + # which is up (true) or down (false). + is_admin_state_up = resource.Body('admin_state_up', type=bool) + #: The ID of the IKE policy. + ikepolicy_id = resource.Body('ikepolicy_id') + #: Indicates whether this VPN can only respond + # to connections or both respond + # to and initiate connections. A valid value is + # response- only or bi-directional. Default is bi-directional. + initiator = resource.Body('initiator') + #: The ID of the IPsec policy. + ipsecpolicy_id = resource.Body('ipsecpolicy_id') + #: The dead peer detection (DPD) interval, in seconds. + # A valid value is a positive integer. Default is 30. + interval = resource.Body('interval', type=int) + #: The ID for the endpoint group that contains + # private subnets for the local side of the connection. + # Yo must specify this parameter with the + # peer_ep_group_id parameter unless in backward- compatible + # mode where peer_cidrs is provided with + # a subnet_id for the VPN service. + local_ep_group_id = resource.Body('local_ep_group_id') + #: The peer gateway public IPv4 or IPv6 address or FQDN. + peer_address = resource.Body('peer_address') + #: An ID to be used instead of the external IP address for + # a virtual router used in traffic between + # instances on different networks in east-west traffic. + # Most often, local ID would be domain + # name, email address, etc. If this is not configured + # then the external IP address will be used as the ID. + local_id = resource.Body('local_id') + #: The maximum transmission unit (MTU) + # value to address fragmentation. Minimum value + # is 68 for IPv4, and 1280 for IPv6. + mtu = resource.Body('mtu', type=int) + #: Human-readable name of the resource. Default is an empty string. + name = resource.Body('name') + #: The peer router identity for authentication. + # A valid value is an IPv4 address, IPv6 address, e-mail address, + # key ID, or FQDN. Typically, this value matches + # the peer_address value. + peer_id = resource.Body('peer_id') + #: (Deprecated) Unique list of valid peer private + # CIDRs in the form < net_address > / < prefix > . + peer_cidrs = resource.Body('peer_cidrs', type=list) + #: The ID of the project. + project_id = resource.Body('tenant_id') + #: The pre-shared key. A valid value is any string. + psk = resource.Body('psk') + #: The ID for the endpoint group that contains + # private CIDRs in the form < net_address > / < prefix > + # for the peer side of the connection. You must + # specify this parameter with the local_ep_group_id + # parameter unless in backward-compatible mode + # where peer_cidrs is provided with a subnet_id for the VPN service. + peer_ep_group_id = resource.Body('peer_ep_group_id') + #: The route mode. A valid value is static, which is the default. + route_mode = resource.Body('route_mode') + #: The site connection status + status = resource.Body('status') + #: The dead peer detection (DPD) timeout + # in seconds. A valid value is a + # positive integer that is greater + # than the DPD interval value. Default is 120. + timeout = resource.Body('timeout', type=int) + #: The ID of the VPN service. + vpnservice_id = resource.Body('vpnservice_id') diff --git a/openstack/network/v2/vpn_service.py b/openstack/network/v2/vpn_service.py index 55556b4255..c03c8a6f0c 100644 --- a/openstack/network/v2/vpn_service.py +++ b/openstack/network/v2/vpn_service.py @@ -10,25 +10,35 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service -from openstack import resource2 as resource +from openstack import resource -# NOTE: The VPN service is unmaintained, need to consider remove it - -class VPNService(resource.Resource): +class VpnService(resource.Resource): resource_key = 'vpnservice' resources_key = 'vpnservices' base_path = '/vpn/vpnservices' - service = network_service.NetworkService() + + _allow_unknown_attrs_in_body = True # capabilities allow_create = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True + _query_mapping = resource.QueryParameters( + 'description', + 'external_v4_ip', + 'external_v6_ip', + 'name', + 'router_id', + 'project_id', + 'tenant_id', + 'subnet_id', + is_admin_state_up='admin_state_up', + ) + # Properties #: Human-readable description for the vpnservice. description = resource.Body('description') @@ -44,7 +54,9 @@ class VPNService(resource.Resource): #: ID of the router into which the VPN service is inserted. router_id = resource.Body('router_id') #: The ID of the project this vpnservice is associated with. - project_id = resource.Body('tenant_id') + project_id = resource.Body('project_id', alias='tenant_id') + #: Tenant_id (deprecated attribute). + tenant_id = resource.Body('tenant_id', deprecated=True) #: The vpnservice status. status = resource.Body('status') #: The ID of the subnet on which the tenant wants the vpnservice. diff --git a/openstack/network/version.py b/openstack/network/version.py index 5f19628109..805ce6a345 100644 --- a/openstack/network/version.py +++ b/openstack/network/version.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.network import network_service from openstack import resource @@ -18,13 +17,10 @@ class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' - service = network_service.NetworkService( - version=network_service.NetworkService.UNVERSIONED - ) # capabilities allow_list = True # Properties - links = resource.prop('links') - status = resource.prop('status') + links = resource.Body('links') + status = resource.Body('status') diff --git a/openstack/object_store/object_store_service.py b/openstack/object_store/object_store_service.py index 1f58b97cfd..1a66d70ac6 100644 --- a/openstack/object_store/object_store_service.py +++ b/openstack/object_store/object_store_service.py @@ -10,15 +10,13 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack import service_filter +from openstack.object_store.v1 import _proxy +from openstack import service_description -class ObjectStoreService(service_filter.ServiceFilter): +class ObjectStoreService(service_description.ServiceDescription[_proxy.Proxy]): """The object store service.""" - valid_versions = [service_filter.ValidVersion('v1')] - - def __init__(self, version=None): - """Create an object store service.""" - super(ObjectStoreService, self).__init__(service_type='object-store', - version=version) + supported_versions = { + '1': _proxy.Proxy, + } diff --git a/openstack/object_store/v1/_base.py b/openstack/object_store/v1/_base.py index 17df935552..e237892545 100644 --- a/openstack/object_store/v1/_base.py +++ b/openstack/object_store/v1/_base.py @@ -11,76 +11,100 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.object_store import object_store_service +import typing as ty + +from openstack import exceptions from openstack import resource class BaseResource(resource.Resource): - service = object_store_service.ObjectStoreService() + commit_method = 'POST' + create_method = 'PUT' #: Metadata stored for this resource. *Type: dict* - metadata = dict() - - _custom_metadata_prefix = None - _system_metadata = dict() + metadata: dict[str, ty.Any] = {} + + _custom_metadata_prefix: str + _system_metadata: dict[str, ty.Any] = {} + _last_headers: dict[str, ty.Any] = {} + + def __init__(self, metadata=None, **attrs): + """Process and save metadata known at creation stage""" + super().__init__(**attrs) + if metadata is not None: + for k, v in metadata.items(): + if not k.lower().startswith( + self._custom_metadata_prefix.lower() + ): + self.metadata[self._custom_metadata_prefix + k] = v + else: + self.metadata[k] = v + + def _prepare_request(self, *args, **kwargs): + request = super()._prepare_request(*args, **kwargs) + request.headers.update(self._calculate_headers(self.metadata)) + return request def _calculate_headers(self, metadata): - headers = dict() + headers = {} for key in metadata: - if key in self._system_metadata: + if key in self._system_metadata.keys(): header = self._system_metadata[key] + elif key in self._system_metadata.values(): + header = key else: - header = self._custom_metadata_prefix + key - headers[header] = metadata[key] + if key.startswith(self._custom_metadata_prefix): + header = key + else: + header = self._custom_metadata_prefix + key + headers[header] = str(metadata[key]) return headers - def set_metadata(self, session, metadata): - url = self._get_url(self, self.id) - session.post(url, endpoint_filter=self.service, - headers=self._calculate_headers(metadata)) + def set_metadata(self, session, metadata, refresh=True): + request = self._prepare_request() + response = session.post( + request.url, headers=self._calculate_headers(metadata) + ) + self._translate_response(response, has_body=False) + if refresh: + response = session.head(request.url) + self._translate_response(response, has_body=False) + return self def delete_metadata(self, session, keys): - url = self._get_url(self, self.id) + request = self._prepare_request() headers = {key: '' for key in keys} - session.post(url, endpoint_filter=self.service, - headers=self._calculate_headers(headers)) + response = session.post( + request.url, headers=self._calculate_headers(headers) + ) + exceptions.raise_from_response( + response, error_message="Error deleting metadata keys" + ) + return self - def _set_metadata(self): + def _set_metadata(self, headers): self.metadata = dict() - headers = self.get_headers() for header in headers: - if header.startswith(self._custom_metadata_prefix): - key = header[len(self._custom_metadata_prefix):].lower() + # RADOS and other stuff in front may actually lowcase headers + if header.lower().startswith(self._custom_metadata_prefix.lower()): + key = header[len(self._custom_metadata_prefix) :].lower() self.metadata[key] = headers[header] - def get(self, session, include_headers=False, args=None): - super(BaseResource, self).get(session, include_headers, args) - self._set_metadata() - return self - - def head(self, session): - super(BaseResource, self).head(session) - self._set_metadata() - return self - - @classmethod - def update_by_id(cls, session, resource_id, attrs, path_args=None): - """Update a Resource with the given attributes. - - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param resource_id: This resource's identifier, if needed by - the request. The default is ``None``. - :param dict attrs: The attributes to be sent in the body - of the request. - :param dict path_args: This parameter is sent by the base - class but is ignored for this method. - - :return: A ``dict`` representing the response headers. - """ - url = cls._get_url(None, resource_id) - headers = attrs.get(resource.HEADERS, dict()) - headers['Accept'] = '' - return session.post(url, endpoint_filter=cls.service, - headers=headers).headers + def _translate_response( + self, + response, + has_body=None, + error_message=None, + *, + resource_response_key=None, + ): + # Save headers of the last operation for potential use (get_object of + # cloud layer). + # This must happen before invoking parent _translate_response, cause it + # pops known headers. + self._last_headers = response.headers.copy() + super()._translate_response( + response, has_body=has_body, error_message=error_message + ) + self._set_metadata(response.headers) diff --git a/openstack/object_store/v1/_proxy.py b/openstack/object_store/v1/_proxy.py index a38171fce9..0824e5d24a 100644 --- a/openstack/object_store/v1/_proxy.py +++ b/openstack/object_store/v1/_proxy.py @@ -10,13 +10,101 @@ # License for the specific language governing permissions and limitations # under the License. +from calendar import timegm +import collections +import functools +from hashlib import sha1 +import hmac +import json +import os +import time +import typing as ty +from urllib import parse + +from openstack import _log +from openstack.cloud import _utils +from openstack import exceptions from openstack.object_store.v1 import account as _account from openstack.object_store.v1 import container as _container +from openstack.object_store.v1 import info as _info from openstack.object_store.v1 import obj as _obj from openstack import proxy +from openstack import resource +from openstack import utils + +DEFAULT_OBJECT_SEGMENT_SIZE = 1073741824 # 1GB +DEFAULT_MAX_FILE_SIZE = (5 * 1024 * 1024 * 1024 + 2) / 2 +EXPIRES_ISO8601_FORMAT = '%Y-%m-%dT%H:%M:%SZ' +SHORT_EXPIRES_ISO8601_FORMAT = '%Y-%m-%d' + + +def _get_expiration(expiration): + return int(time.time() + expiration) + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['1']] = '1' + + _resource_registry = { + "account": _account.Account, + "container": _container.Container, + "info": _info.Info, + "object": _obj.Object, + } + + skip_discovery = True + + Account = _account.Account + Container = _container.Container + Object = _obj.Object + log = _log.setup_logging('openstack') -class Proxy(proxy.BaseProxy): + @functools.lru_cache(maxsize=256) + def _extract_name(self, url, service_type=None, project_id=None): + url_path = parse.urlparse(url).path.strip() + # Remove / from the beginning to keep the list indexes of interesting + # things consistent + if url_path.startswith('/'): + url_path = url_path[1:] + + # Split url into parts and exclude potential project_id in some urls + url_parts = [ + x + for x in url_path.split('/') + if ( + x != project_id + and ( + not project_id + or (project_id and x != 'AUTH_' + project_id) + ) + ) + ] + # Strip leading version piece so that + # GET /v1/AUTH_xxx + # returns ['AUTH_xxx'] + if ( + url_parts[0] + and url_parts[0][0] == 'v' + and url_parts[0][1] + and url_parts[0][1].isdigit() + ): + url_parts = url_parts[1:] + + # Strip out anything that's empty or None + parts = [part for part in url_parts if part] + + # Getting the root of an endpoint is doing version discovery + if not parts: + return ['account'] + + if len(parts) == 1: + if 'endpoints' in parts: + return ['endpoints'] + else: + return ['container'] + else: + return ['object'] def get_account_metadata(self): """Get metadata for this account. @@ -29,13 +117,12 @@ def get_account_metadata(self): def set_account_metadata(self, **metadata): """Set metadata for this account. - :param kwargs metadata: Key/value pairs to be set as metadata - on the container. Custom metadata can be set. - Custom metadata are keys and values defined - by the user. + :param kwargs metadata: Key/value pairs to be set as metadata on the + container. Custom metadata can be set. Custom metadata are keys and + values defined by the user. """ account = self._get_resource(_account.Account, None) - account.set_metadata(self.session, metadata) + account.set_metadata(self, metadata) def delete_account_metadata(self, keys): """Delete metadata for this account. @@ -43,96 +130,96 @@ def delete_account_metadata(self, keys): :param keys: The keys of metadata to be deleted. """ account = self._get_resource(_account.Account, None) - account.delete_metadata(self.session, keys) + account.delete_metadata(self, keys) def containers(self, **query): """Obtain Container objects for this account. :param kwargs query: Optional query parameters to be sent to limit - the resources being returned. + the resources being returned. :rtype: A generator of :class:`~openstack.object_store.v1.container.Container` objects. """ - return _container.Container.list(self.session, **query) + return self._list(_container.Container, paginated=True, **query) - def create_container(self, **attrs): + def create_container(self, name, **attrs): """Create a new container from attributes + :param container: Name of the container to create. :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.object_store.v1.container.Container`, - comprised of the properties on the Container class. + a :class:`~openstack.object_store.v1.container.Container`, + comprised of the properties on the Container class. :returns: The results of container creation :rtype: :class:`~openstack.object_store.v1.container.Container` """ - return self._create(_container.Container, **attrs) + return self._create(_container.Container, name=name, **attrs) def delete_container(self, container, ignore_missing=True): """Delete a container :param container: The value can be either the name of a container or a - :class:`~openstack.object_store.v1.container.Container` - instance. + :class:`~openstack.object_store.v1.container.Container` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the container does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent server. + :class:`~openstack.exceptions.NotFoundException` will be raised + when the container does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + server. :returns: ``None`` """ - self._delete(_container.Container, container, - ignore_missing=ignore_missing) + self._delete( + _container.Container, container, ignore_missing=ignore_missing + ) def get_container_metadata(self, container): """Get metadata for a container :param container: The value can be the name of a container or a - :class:`~openstack.object_store.v1.container.Container` - instance. + :class:`~openstack.object_store.v1.container.Container` instance. :returns: One :class:`~openstack.object_store.v1.container.Container` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ return self._head(_container.Container, container) - def set_container_metadata(self, container, **metadata): + def set_container_metadata(self, container, refresh=True, **metadata): """Set metadata for a container. :param container: The value can be the name of a container or a - :class:`~openstack.object_store.v1.container.Container` - instance. - :param kwargs metadata: Key/value pairs to be set as metadata - on the container. Both custom and system - metadata can be set. Custom metadata are keys - and values defined by the user. System - metadata are keys defined by the Object Store - and values defined by the user. The system - metadata keys are: - - - `content_type` - - `is_content_type_detected` - - `versions_location` - - `read_ACL` - - `write_ACL` - - `sync_to` - - `sync_key` + :class:`~openstack.object_store.v1.container.Container` + instance. + :param refresh: Flag to trigger refresh of container object re-fetch. + :param kwargs metadata: Key/value pairs to be set as metadata on the + container. Both custom and system metadata can be set. Custom + metadata are keys and values defined by the user. System metadata + are keys defined by the Object Store and values defined by the + user. The system metadata keys are: + + - `content_type` + - `is_content_type_detected` + - `versions_location` + - `read_ACL` + - `write_ACL` + - `sync_to` + - `sync_key` """ res = self._get_resource(_container.Container, container) - res.set_metadata(self.session, metadata) + res.set_metadata(self, metadata, refresh=refresh) + return res def delete_container_metadata(self, container, keys): """Delete metadata for a container. :param container: The value can be the ID of a container or a - :class:`~openstack.object_store.v1.container.Container` - instance. + :class:`~openstack.object_store.v1.container.Container` instance. :param keys: The keys of metadata to be deleted. """ res = self._get_resource(_container.Container, container) - res.delete_metadata(self.session, keys) + res.delete_metadata(self, keys) + return res def objects(self, container, **query): """Return a generator that yields the Container's objects. @@ -141,92 +228,260 @@ def objects(self, container, **query): that you want to retrieve objects from. :type container: :class:`~openstack.object_store.v1.container.Container` - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :rtype: A generator of :class:`~openstack.object_store.v1.obj.Object` objects. """ - container = _container.Container.from_id(container) + container = self._get_container_name(container=container) - objs = _obj.Object.list(self.session, - path_args={"container": container.name}, - **query) - for obj in objs: - obj.container = container.name + for obj in self._list( + _obj.Object, + container=container, + paginated=True, + format='json', + **query, + ): + obj.container = container yield obj - def _get_container_name(self, obj, container): - if isinstance(obj, _obj.Object): + def _get_container_name(self, obj=None, container=None): + if obj is not None: + obj = self._get_resource(_obj.Object, obj) if obj.container is not None: return obj.container if container is not None: - container = _container.Container.from_id(container) + container = self._get_resource(_container.Container, container) return container.name raise ValueError("container must be specified") - def get_object(self, obj, container=None): + def get_object( + self, + obj, + container=None, + resp_chunk_size=1024, + outfile=None, + remember_content=False, + ): """Get the data associated with an object :param obj: The value can be the name of an object or a - :class:`~openstack.object_store.v1.obj.Object` instance. + :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a - :class:`~openstack.object_store.v1.container.Container` - instance. - - :returns: The contents of the object. Use the - :func:`~get_object_metadata` - method if you want an object resource. - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :class:`~openstack.object_store.v1.container.Container` instance. + :param int resp_chunk_size: chunk size of data to read. Only used if + the results are being written to a file or stream is True. + (optional, defaults to 1k) + :param outfile: Write the object to a file instead of returning the + contents. If this option is given, body in the return tuple will be + None. outfile can either be a file path given as a string, or a + File like object. + :param bool remember_content: Flag whether object data should be saved + as `data` property of the Object. When left as `false` and + `outfile` is not defined data will not be saved and need to be + fetched separately. + + :returns: Instance of the + :class:`~openstack.object_store.v1.obj.Object` objects. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - # TODO(briancurtin): call this download_object and make sure it's - # just returning the raw data, like download_image does - container_name = self._get_container_name(obj, container) + container_name = self._get_container_name(obj=obj, container=container) + + _object = self._get_resource( + _obj.Object, obj, container=container_name + ) + request = _object._prepare_request() + + get_stream = outfile is not None - return self._get(_obj.Object, obj, - path_args={"container": container_name}) + response = self.get( + request.url, headers=request.headers, stream=get_stream + ) + exceptions.raise_from_response(response) + _object._translate_response(response, has_body=False) - def download_object(self, obj, container=None, path=None): - """Download the data contained inside an object to disk. + if outfile: + if isinstance(outfile, str): + outfile_handle = open(outfile, 'wb') + else: + outfile_handle = outfile + for chunk in response.iter_content( + resp_chunk_size, decode_unicode=False + ): + outfile_handle.write(chunk) + if isinstance(outfile, str): + outfile_handle.close() + else: + outfile_handle.flush() + elif remember_content: + _object.data = response.text + + return _object + + def download_object(self, obj, container=None, **attrs): + """Download the data contained inside an object. :param obj: The value can be the name of an object or a - :class:`~openstack.object_store.v1.obj.Object` instance. + :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a - :class:`~openstack.object_store.v1.container.Container` - instance. - :param path str: Location to write the object contents. + :class:`~openstack.object_store.v1.container.Container` instance. - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - # TODO(briancurtin): download_object should really have the behavior - # of get_object, and this writing to a file should not exist. - # TODO(briancurtin): This method should probably offload the get - # operation into another thread or something of that nature. - with open(path, "w") as out: - out.write(self.get_object(obj, container)) + container_name = self._get_container_name(obj=obj, container=container) + obj = self._get_resource( + _obj.Object, obj, container=container_name, **attrs + ) + return obj.download(self) - def upload_object(self, **attrs): - """Upload a new object from attributes + def stream_object(self, obj, container=None, chunk_size=1024, **attrs): + """Stream the data contained inside an object. - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.object_store.v1.obj.Object`, - comprised of the properties on the Object class. - **Required**: A `container` argument must be specified, - which is either the ID of a container or a - :class:`~openstack.object_store.v1.container.Container` - instance. - - :returns: The results of object creation - :rtype: :class:`~openstack.object_store.v1.container.Container` + :param obj: The value can be the name of an object or a + :class:`~openstack.object_store.v1.obj.Object` instance. + :param container: The value can be the name of a container or a + :class:`~openstack.object_store.v1.container.Container` instance. + + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + :returns: An iterator that iterates over chunk_size bytes + """ + container_name = self._get_container_name(obj=obj, container=container) + obj = self._get_resource( + _obj.Object, obj, container=container_name, **attrs + ) + return obj.stream(self, chunk_size=chunk_size) + + def create_object( + self, + container, + name, + filename=None, + md5=None, + sha256=None, + segment_size=None, + use_slo=True, + metadata=None, + generate_checksums=None, + data=None, + **headers, + ): + """Create a file object. + + Automatically uses large-object segments if needed. + + :param container: The name of the container to store the file in. + This container will be created if it does not exist already. + :param name: Name for the object within the container. + :param filename: The path to the local file whose contents will be + uploaded. Mutually exclusive with data. + :param data: The content to upload to the object. Mutually exclusive + with filename. + :param md5: A hexadecimal md5 of the file. (Optional), if it is known + and can be passed here, it will save repeating the expensive md5 + process. It is assumed to be accurate. + :param sha256: A hexadecimal sha256 of the file. (Optional) See md5. + :param segment_size: Break the uploaded object into segments of this + many bytes. (Optional) SDK will attempt to discover the maximum + value for this from the server if it is not specified, or will use + a reasonable default. + :param headers: These will be passed through to the object creation + API as HTTP Headers. + :param use_slo: If the object is large enough to need to be a Large + Object, use a static rather than dynamic object. Static Objects + will delete segment objects when the manifest object is deleted. + (optional, defaults to True) + :param generate_checksums: Whether to generate checksums on the client + side that get added to headers for later prevention of double + uploads of identical data. (optional, defaults to True) + :param metadata: This dict will get changed into headers that set + metadata of the object + + :raises: ``:class:`~openstack.exceptions.SDKException``` on operation + error. """ - container = attrs.pop("container", None) - container_name = self._get_container_name(None, container) + if data is not None and filename: + raise ValueError( + "Both filename and data given. Please choose one." + ) + if data is not None and not name: + raise ValueError("name is a required parameter when data is given") + if data is not None and generate_checksums: + raise ValueError( + "checksums cannot be generated with data parameter" + ) + if generate_checksums is None: + if data is not None: + generate_checksums = False + else: + generate_checksums = True - return self._create(_obj.Object, - path_args={"container": container_name}, **attrs) + if not metadata: + metadata = {} + + if not filename and data is None: + filename = name + + if generate_checksums and (md5 is None or sha256 is None): + (md5, sha256) = utils._get_file_hashes(filename) + if md5: + metadata[self._connection._OBJECT_MD5_KEY] = md5 + if sha256: + metadata[self._connection._OBJECT_SHA256_KEY] = sha256 + + container_name = self._get_container_name(container=container) + endpoint = f'{container_name}/{name}' + + if data is not None: + self.log.debug( + "swift uploading data to %(endpoint)s", {'endpoint': endpoint} + ) + return self._create( + _obj.Object, + container=container_name, + name=name, + data=data, + metadata=metadata, + **headers, + ) + + # segment_size gets used as a step value in a range call, so needs + # to be an int + if segment_size: + segment_size = int(segment_size) + segment_size = self.get_object_segment_size(segment_size) + file_size = os.path.getsize(filename) + + if self.is_object_stale(container_name, name, filename, md5, sha256): + self._connection.log.debug( + "swift uploading %(filename)s to %(endpoint)s", + {'filename': filename, 'endpoint': endpoint}, + ) + + if metadata is not None: + # Rely on the class headers calculation for requested metadata + meta_headers = _obj.Object()._calculate_headers(metadata) + headers.update(meta_headers) + + if file_size <= segment_size: + self._upload_object(endpoint, filename, headers) + + else: + self._upload_large_object( + endpoint, + filename, + headers, + file_size, + segment_size, + use_slo, + ) + + # Backwards compat + upload_object = create_object def copy_object(self): """Copy an object.""" @@ -236,41 +491,40 @@ def delete_object(self, obj, ignore_missing=True, container=None): """Delete an object :param obj: The value can be either the name of an object or a - :class:`~openstack.object_store.v1.container.Container` - instance. + :class:`~openstack.object_store.v1.container.Container` instance. :param container: The value can be the ID of a container or a - :class:`~openstack.object_store.v1.container.Container` - instance. + :class:`~openstack.object_store.v1.container.Container` instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the object does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent server. + :class:`~openstack.exceptions.NotFoundException` will be raised + when the object does not exist. When set to ``True``, no exception + will be set when attempting to delete a nonexistent server. :returns: ``None`` """ container_name = self._get_container_name(obj, container) - self._delete(_obj.Object, obj, ignore_missing=ignore_missing, - path_args={"container": container_name}) + self._delete( + _obj.Object, + obj, + ignore_missing=ignore_missing, + container=container_name, + ) def get_object_metadata(self, obj, container=None): """Get metadata for an object. :param obj: The value can be the name of an object or a - :class:`~openstack.object_store.v1.obj.Object` instance. + :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the ID of a container or a - :class:`~openstack.object_store.v1.container.Container` - instance. + :class:`~openstack.object_store.v1.container.Container` instance. :returns: One :class:`~openstack.object_store.v1.obj.Object` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ container_name = self._get_container_name(obj, container) - return self._head(_obj.Object, obj, - path_args={"container": container_name}) + return self._head(_obj.Object, obj, container=container_name) def set_object_metadata(self, obj, container=None, **metadata): """Set metadata for an object. @@ -278,41 +532,745 @@ def set_object_metadata(self, obj, container=None, **metadata): Note: This method will do an extra HEAD call. :param obj: The value can be the name of an object or a - :class:`~openstack.object_store.v1.obj.Object` instance. + :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the name of a container or a - :class:`~openstack.object_store.v1.container.Container` - instance. + :class:`~openstack.object_store.v1.container.Container` instance. :param kwargs metadata: Key/value pairs to be set as metadata - on the container. Both custom and system - metadata can be set. Custom metadata are keys - and values defined by the user. System - metadata are keys defined by the Object Store - and values defined by the user. The system - metadata keys are: - - - `content_type` - - `content_encoding` - - `content_disposition` - - `delete_after` - - `delete_at` - - `is_content_type_detected` + on the container. Both custom and system metadata can be set. + Custom metadata are keys and values defined by the user. System + metadata are keys defined by the Object Store and values defined by + the user. The system metadata keys are: + + - `content_type` + - `content_encoding` + - `content_disposition` + - `delete_after` + - `delete_at` + - `is_content_type_detected` """ container_name = self._get_container_name(obj, container) - res = self._get_resource(_obj.Object, obj, - path_args={"container": container_name}) - res.set_metadata(self.session, metadata) + res = self._get_resource(_obj.Object, obj, container=container_name) + res.set_metadata(self, metadata) + return res def delete_object_metadata(self, obj, container=None, keys=None): """Delete metadata for an object. :param obj: The value can be the name of an object or a - :class:`~openstack.object_store.v1.obj.Object` instance. + :class:`~openstack.object_store.v1.obj.Object` instance. :param container: The value can be the ID of a container or a - :class:`~openstack.object_store.v1.container.Container` - instance. + :class:`~openstack.object_store.v1.container.Container` instance. :param keys: The keys of metadata to be deleted. """ container_name = self._get_container_name(obj, container) - res = self._get_resource(_obj.Object, obj, - path_args={"container": container_name}) - res.delete_metadata(self.session, keys) + res = self._get_resource(_obj.Object, obj, container=container_name) + res.delete_metadata(self, keys) + return res + + def is_object_stale( + self, container, name, filename, file_md5=None, file_sha256=None + ): + """Check to see if an object matches the hashes of a file. + + :param container: Name of the container. + :param name: Name of the object. + :param filename: Path to the file. + :param file_md5: Pre-calculated md5 of the file contents. Defaults to + None which means calculate locally. + :param file_sha256: Pre-calculated sha256 of the file contents. + Defaults to None which means calculate locally. + """ + try: + metadata = self.get_object_metadata(name, container).metadata + except exceptions.NotFoundException: + self._connection.log.debug( + f"swift stale check, no object: {container}/{name}" + ) + return True + + if not (file_md5 or file_sha256): + (file_md5, file_sha256) = utils._get_file_hashes(filename) + md5_key = metadata.get( + self._connection._OBJECT_MD5_KEY, + metadata.get(self._connection._SHADE_OBJECT_MD5_KEY, ''), + ) + sha256_key = metadata.get( + self._connection._OBJECT_SHA256_KEY, + metadata.get(self._connection._SHADE_OBJECT_SHA256_KEY, ''), + ) + up_to_date = utils._hashes_up_to_date( + md5=file_md5, + sha256=file_sha256, + md5_key=md5_key, + sha256_key=sha256_key, + ) + + if not up_to_date: + self._connection.log.debug( + "swift checksum mismatch: " + "%(filename)s!=%(container)s/%(name)s", + {'filename': filename, 'container': container, 'name': name}, + ) + return True + + self._connection.log.debug( + "swift object up to date: %(container)s/%(name)s", + {'container': container, 'name': name}, + ) + return False + + def _upload_large_object( + self, endpoint, filename, headers, file_size, segment_size, use_slo + ): + # If the object is big, we need to break it up into segments that + # are no larger than segment_size, upload each of them individually + # and then upload a manifest object. The segments can be uploaded in + # parallel, so we'll use the async feature of the TaskManager. + + segment_futures = [] + segment_results = [] + retry_results = [] + retry_futures = [] + manifest = [] + + # Get an OrderedDict with keys being the swift location for the + # segment, the value a FileSegment file-like object that is a + # slice of the data for the segment. + segments = self._get_file_segments( + endpoint, filename, file_size, segment_size + ) + + # Schedule the segments for upload + for name, segment in segments.items(): + # Async call to put - schedules execution and returns a future + segment_future = self._connection._pool_executor.submit( + self.put, name, headers=headers, data=segment, raise_exc=False + ) + segment_futures.append(segment_future) + # TODO(mordred) Collect etags from results to add to this manifest + # dict. Then sort the list of dicts by path. + manifest.append( + dict( + # While Object Storage usually expects the name to be + # urlencoded in most requests, the SLO manifest requires + # plain object names instead. + path=f'/{parse.unquote(name)}', + size_bytes=segment.length, + ) + ) + + # Try once and collect failed results to retry + segment_results, retry_results = self._connection._wait_for_futures( + segment_futures, raise_on_error=False + ) + + self._add_etag_to_manifest(segment_results, manifest) + + for result in retry_results: + # Grab the FileSegment for the failed upload so we can retry + name = self._object_name_from_url(result.url) + segment = segments[name] + segment.seek(0) + # Async call to put - schedules execution and returns a future + segment_future = self._connection._pool_executor.submit( + self.put, name, headers=headers, data=segment + ) + # TODO(mordred) Collect etags from results to add to this manifest + # dict. Then sort the list of dicts by path. + retry_futures.append(segment_future) + + # If any segments fail the second time, just throw the error + segment_results, retry_results = self._connection._wait_for_futures( + retry_futures, raise_on_error=True + ) + + self._add_etag_to_manifest(segment_results, manifest) + + try: + if use_slo: + return self._finish_large_object_slo( + endpoint, headers, manifest + ) + else: + return self._finish_large_object_dlo(endpoint, headers) + except Exception: + try: + segment_prefix = endpoint.split('/')[-1] + self.log.debug( + "Failed to upload large object manifest for %s. " + "Removing segment uploads.", + segment_prefix, + ) + self._delete_autocreated_image_objects( + segment_prefix=segment_prefix + ) + except Exception: + self.log.exception( + "Failed to cleanup image objects for %s:", segment_prefix + ) + raise + + def _finish_large_object_slo(self, endpoint, headers, manifest): + # TODO(mordred) send an etag of the manifest, which is the md5sum + # of the concatenation of the etags of the results + headers = headers.copy() + retries = 3 + while True: + try: + return exceptions.raise_from_response( + self.put( + endpoint, + params={'multipart-manifest': 'put'}, + headers=headers, + data=json.dumps(manifest), + ) + ) + except Exception: + retries -= 1 + if retries == 0: + raise + + def _finish_large_object_dlo(self, endpoint, headers): + headers = headers.copy() + headers['X-Object-Manifest'] = endpoint + retries = 3 + while True: + try: + return exceptions.raise_from_response( + self.put(endpoint, headers=headers) + ) + except Exception: + retries -= 1 + if retries == 0: + raise + + def _upload_object(self, endpoint, filename, headers): + with open(filename, 'rb') as dt: + return self.put(endpoint, headers=headers, data=dt) + + def _get_file_segments(self, endpoint, filename, file_size, segment_size): + # Use an ordered dict here so that testing can replicate things + segments = collections.OrderedDict() + for index, offset in enumerate(range(0, file_size, segment_size)): + remaining = file_size - (index * segment_size) + segment = _utils.FileSegment( + filename, + offset, + segment_size if segment_size < remaining else remaining, + ) + name = f'{endpoint}/{index:0>6}' + segments[name] = segment + return segments + + def get_object_segment_size(self, segment_size): + """Get a segment size that will work given capabilities""" + if segment_size is None: + segment_size = DEFAULT_OBJECT_SEGMENT_SIZE + min_segment_size = 0 + try: + # caps = self.get_object_capabilities() + caps = self.get_info() + except ( + exceptions.NotFoundException, + exceptions.PreconditionFailedException, + ): + server_max_file_size = DEFAULT_MAX_FILE_SIZE + self._connection.log.info( + "Swift capabilities not supported. " + "Using default max file size." + ) + except exceptions.SDKException: + raise + else: + server_max_file_size = caps.swift.get('max_file_size', 0) + min_segment_size = caps.slo.get('min_segment_size', 0) + + if segment_size > server_max_file_size: + return server_max_file_size + if segment_size < min_segment_size: + return min_segment_size + return segment_size + + def _object_name_from_url(self, url): + '''Get container_name/object_name from the full URL called. + + Remove the Swift endpoint from the front of the URL, and remove + the leaving / that will leave behind.''' + endpoint = self.get_endpoint() + object_name = url.replace(endpoint, '') + if object_name.startswith('/'): + object_name = object_name[1:] + return object_name + + def _add_etag_to_manifest(self, segment_results, manifest): + for result in segment_results: + if 'Etag' not in result.headers: + continue + name = self._object_name_from_url(result.url) + for entry in manifest: + if entry['path'] == f'/{parse.unquote(name)}': + entry['etag'] = result.headers['Etag'] + + def get_info(self): + """Get infomation about the object-storage service + + The object-storage service publishes a set of capabilities that + include metadata about maximum values and thresholds. + """ + return self._get(_info.Info) + + def set_account_temp_url_key(self, key, secondary=False): + """Set the temporary URL key for the account. + + :param key: Text of the key to use. + :param bool secondary: Whether this should set the secondary key. + (defaults to False) + """ + account = self._get_resource(_account.Account, None) + account.set_temp_url_key(self, key, secondary) + + def set_container_temp_url_key(self, container, key, secondary=False): + """Set the temporary URL key for a container. + + :param container: The value can be the name of a container or a + :class:`~openstack.object_store.v1.container.Container` instance. + :param key: Text of the key to use. + :param bool secondary: Whether this should set the secondary key. + (defaults to False) + """ + res = self._get_resource(_container.Container, container) + res.set_temp_url_key(self, key, secondary) + + def get_temp_url_key(self, container=None): + """Get the best temporary url key for a given container. + + Will first try to return Temp-URL-Key-2 then Temp-URL-Key for the + container, and if neither exist, will attempt to return Temp-URL-Key-2 + then Temp-URL-Key for the account. If neither exist, will return None. + + :param container: The value can be the name of a container or a + :class:`~openstack.object_store.v1.container.Container` instance. + """ + temp_url_key = None + if container: + container_meta = self.get_container_metadata(container) + temp_url_key = ( + container_meta.meta_temp_url_key_2 + or container_meta.meta_temp_url_key + ) + if not temp_url_key: + account_meta = self.get_account_metadata() + temp_url_key = ( + account_meta.meta_temp_url_key_2 + or account_meta.meta_temp_url_key + ) + if temp_url_key and not isinstance(temp_url_key, bytes): + temp_url_key = temp_url_key.encode('utf8') + return temp_url_key + + def _check_temp_url_key(self, container=None, temp_url_key=None): + if temp_url_key: + if not isinstance(temp_url_key, bytes): + temp_url_key = temp_url_key.encode('utf8') + else: + temp_url_key = self.get_temp_url_key(container) + if not temp_url_key: + raise exceptions.SDKException( + 'temp_url_key was not given, nor was a temporary url key ' + 'found for the account or the container.' + ) + return temp_url_key + + def generate_form_signature( + self, + container, + object_prefix, + redirect_url, + max_file_size, + max_upload_count, + timeout, + temp_url_key=None, + ): + """Generate a signature for a FormPost upload. + + :param container: The value can be the name of a container or a + :class:`~openstack.object_store.v1.container.Container` instance. + :param object_prefix: Prefix to apply to limit all object names + created using this signature. + :param redirect_url: The URL to redirect the browser to after the + uploads have completed. + :param max_file_size: The maximum file size per file uploaded. + :param max_upload_count: The maximum number of uploaded files allowed. + :param timeout: The number of seconds from now to allow the form + post to begin. + :param temp_url_key: The X-Account-Meta-Temp-URL-Key for the account. + Optional, if omitted, the key will be fetched from the container + or the account. + """ + max_file_size = int(max_file_size) + if max_file_size < 1: + raise exceptions.SDKException( + 'Please use a positive max_file_size value.' + ) + max_upload_count = int(max_upload_count) + if max_upload_count < 1: + raise exceptions.SDKException( + 'Please use a positive max_upload_count value.' + ) + if timeout < 1: + raise exceptions.SDKException( + 'Please use a positive value.' + ) + expires = _get_expiration(timeout) + + temp_url_key = self._check_temp_url_key( + container=container, temp_url_key=temp_url_key + ) + + res = self._get_resource(_container.Container, container) + endpoint = parse.urlparse(self.get_endpoint()) + if isinstance(endpoint.path, bytes): + # To keep mypy happy: the output type will be the same as the input + # type + path = endpoint.path.decode() + else: + path = endpoint.path + path = '/'.join([path, res.name, object_prefix]) + + data = f'{path}\n{redirect_url}\n{max_file_size}\n{max_upload_count}\n{expires}' # noqa: E501 + sig = hmac.new(temp_url_key, data.encode(), sha1).hexdigest() + + return (expires, sig) + + def generate_temp_url( + self, + path, + seconds, + method, + absolute=False, + prefix=False, + iso8601=False, + ip_range=None, + temp_url_key=None, + ): + """Generates a temporary URL that gives unauthenticated access to the + Swift object. + + :param path: The full path to the Swift object or prefix if + a prefix-based temporary URL should be generated. Example: + /v1/AUTH_account/c/o or /v1/AUTH_account/c/prefix. + :param seconds: time in seconds or ISO 8601 timestamp. + If absolute is False and this is the string representation of an + integer, then this specifies the amount of time in seconds for + which the temporary URL will be valid. If absolute is True then + this specifies an absolute time at which the temporary URL will + expire. + :param method: A HTTP method, typically either GET or PUT, to allow + for this temporary URL. + :param absolute: if True then the seconds parameter is interpreted as a + Unix timestamp, if seconds represents an integer. + :param prefix: if True then a prefix-based temporary URL will be + generated. + :param iso8601: if True, a URL containing an ISO 8601 UTC timestamp + instead of a UNIX timestamp will be created. + :param ip_range: if a valid ip range, restricts the temporary URL to + the range of ips. + :param temp_url_key: The X-Account-Meta-Temp-URL-Key for the account. + Optional, if omitted, the key will be fetched from the container or + the account. + :raises ValueError: if timestamp or path is not in valid format. + :return: the path portion of a temporary URL + """ + try: + try: + timestamp = float(seconds) + except ValueError: + formats = ( + EXPIRES_ISO8601_FORMAT, + EXPIRES_ISO8601_FORMAT[:-1], + SHORT_EXPIRES_ISO8601_FORMAT, + ) + for f in formats: + try: + t = time.strptime(seconds, f) + except ValueError: + continue + + if f == EXPIRES_ISO8601_FORMAT: + timestamp = timegm(t) + else: + # Use local time if UTC designator is missing. + timestamp = int(time.mktime(t)) + + absolute = True + break + else: + raise ValueError() + else: + if not timestamp.is_integer(): + raise ValueError() + timestamp = int(timestamp) + if timestamp < 0: + raise ValueError() + except ValueError: + raise ValueError( + 'time must either be a whole number ' + 'or in specific ISO 8601 format.' + ) + + if isinstance(path, bytes): + try: + path_for_body = path.decode('utf-8') + except UnicodeDecodeError: + raise ValueError('path must be representable as UTF-8') + else: + path_for_body = path + + parts = path_for_body.split('/', 4) + if ( + len(parts) != 5 + or parts[0] + or not all(parts[1 : (4 if prefix else 5)]) + ): + if prefix: + raise ValueError('path must at least contain /v1/a/c/') + else: + raise ValueError( + 'path must be full path to an object e.g. /v1/a/c/o' + ) + + standard_methods = ['GET', 'PUT', 'HEAD', 'POST', 'DELETE'] + if method.upper() not in standard_methods: + self.log.warning( + 'Non default HTTP method %s for tempurl ' + 'specified, possibly an error', + method.upper(), + ) + + expiration: float | int + if not absolute: + expiration = _get_expiration(timestamp) + else: + expiration = timestamp + + hmac_parts = [ + method.upper(), + str(expiration), + ('prefix:' if prefix else '') + path_for_body, + ] + + if ip_range: + if isinstance(ip_range, bytes): + try: + ip_range = ip_range.decode('utf-8') + except UnicodeDecodeError: + raise ValueError('ip_range must be representable as UTF-8') + hmac_parts.insert(0, f"ip={ip_range}") + + hmac_body = '\n'.join(hmac_parts) + + temp_url_key = self._check_temp_url_key(temp_url_key=temp_url_key) + + sig = hmac.new( + temp_url_key, hmac_body.encode('utf-8'), sha1 + ).hexdigest() + + if iso8601: + exp = time.strftime( + EXPIRES_ISO8601_FORMAT, time.gmtime(expiration) + ) + else: + exp = str(expiration) + + temp_url = f'{path_for_body}?temp_url_sig={sig}&temp_url_expires={exp}' + + if ip_range: + temp_url += f'&temp_url_ip_range={ip_range}' + + if prefix: + temp_url += f'&temp_url_prefix={parts[4]}' + # Have return type match path from caller + if isinstance(path, bytes): + return temp_url.encode('utf-8') + else: + return temp_url + + def _delete_autocreated_image_objects( + self, container=None, segment_prefix=None + ): + """Delete all objects autocreated for image uploads. + + This method should generally not be needed, as shade should clean up + the objects it uses for object-based image creation. If something goes + wrong and it is found that there are leaked objects, this method can be + used to delete any objects that shade has created on the user's behalf + in service of image uploads. + + :param str container: Name of the container. Defaults to 'images'. + :param str segment_prefix: Prefix for the image segment names to + delete. If not given, all image upload segments present are + deleted. + :returns: True if deletion was succesful, else False. + """ + if container is None: + container = self._connection._OBJECT_AUTOCREATE_CONTAINER + # This method only makes sense on clouds that use tasks + if not self._connection.image_api_use_tasks: + return False + + deleted = False + for obj in self.objects(container, prefix=segment_prefix): + meta = self.get_object_metadata(obj).metadata + if meta.get(self._connection._OBJECT_AUTOCREATE_KEY) == 'true': + self.delete_object(obj, ignore_missing=True) + deleted = True + return deleted + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) + + # ========== Project Cleanup ========== + def _get_cleanup_dependencies(self): + return {'object_store': {'before': []}} + + def _service_cleanup( + self, + dry_run=True, + client_status_queue=None, + identified_resources=None, + filters=None, + resource_evaluation_fn=None, + skip_resources=None, + ): + if self.should_skip_resource_cleanup( + "container", skip_resources + ) or self.should_skip_resource_cleanup("object", skip_resources): + return + + is_bulk_delete_supported = False + bulk_delete_max_per_request = 1 + try: + caps = self.get_info() + except exceptions.SDKException: + pass + else: + bulk_delete = caps.get("bulk_delete") + if bulk_delete is not None: + is_bulk_delete_supported = True + bulk_delete_max_per_request = bulk_delete.get( + "max_deletes_per_request", 10000 + ) + + elements = [] + for cont in self.containers(): + # Iterate over objects inside container + objects_remaining = False + for obj in self.objects(cont): + need_delete = self._service_cleanup_del_res( + self.delete_object, + obj, + dry_run=True, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + if need_delete: + if dry_run: + continue + elif is_bulk_delete_supported: + elements.append(f"{cont.name}/{obj.name}") + if len(elements) >= bulk_delete_max_per_request: + self._bulk_delete(elements) + elements.clear() + else: + self.delete_object(obj, cont) + else: + objects_remaining = True + + if len(elements) > 0: + self._bulk_delete(elements) + elements.clear() + + # Eventually delete container itself + if not objects_remaining: + self._service_cleanup_del_res( + self.delete_container, + cont, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + + def _bulk_delete(self, elements): + data = "\n".join([parse.quote(x) for x in elements]) + self.delete( + "?bulk-delete", + data=data, + headers={ + 'Content-Type': 'text/plain', + 'Accept': 'application/json', + }, + ) diff --git a/openstack/object_store/v1/account.py b/openstack/object_store/v1/account.py index 8857882d24..e4d17db9e4 100644 --- a/openstack/object_store/v1/account.py +++ b/openstack/object_store/v1/account.py @@ -20,23 +20,43 @@ class Account(_base.BaseResource): base_path = "/" - allow_retrieve = True - allow_update = True + allow_fetch = True + allow_commit = True allow_head = True #: The total number of bytes that are stored in Object Storage for #: the account. - account_bytes_used = resource.header("x-account-bytes-used", type=int) + account_bytes_used = resource.Header("x-account-bytes-used", type=int) #: The number of containers. - account_container_count = resource.header("x-account-container-count", - type=int) + account_container_count = resource.Header( + "x-account-container-count", type=int + ) #: The number of objects in the account. - account_object_count = resource.header("x-account-object-count", type=int) + account_object_count = resource.Header("x-account-object-count", type=int) #: The secret key value for temporary URLs. If not set, #: this header is not returned by this operation. - meta_temp_url_key = resource.header("x-account-meta-temp-url-key") + meta_temp_url_key = resource.Header("x-account-meta-temp-url-key") #: A second secret key value for temporary URLs. If not set, #: this header is not returned by this operation. - meta_temp_url_key_2 = resource.header("x-account-meta-temp-url-key-2") + meta_temp_url_key_2 = resource.Header("x-account-meta-temp-url-key-2") #: The timestamp of the transaction. - timestamp = resource.header("x-timestamp") + timestamp = resource.Header("x-timestamp") + + has_body = False + requires_id = False + + def set_temp_url_key(self, proxy, key, secondary=False): + """Set the temporary url key for the account. + + :param proxy: The proxy to use for making this request. + :type proxy: :class:`~openstack.proxy.Proxy` + :param key: + Text of the key to use. + :param bool secondary: + Whether this should set the secondary key. (defaults to False) + """ + header = 'Temp-URL-Key' + if secondary: + header += '-2' + + return self.set_metadata(proxy, {header: key}) diff --git a/openstack/object_store/v1/container.py b/openstack/object_store/v1/container.py index 678846d6e8..e709f30349 100644 --- a/openstack/object_store/v1/container.py +++ b/openstack/object_store/v1/container.py @@ -21,38 +21,45 @@ class Container(_base.BaseResource): "content_type": "content-type", "is_content_type_detected": "x-detect-content-type", "versions_location": "x-versions-location", + "history_location": "x-history-location", "read_ACL": "x-container-read", "write_ACL": "x-container-write", "sync_to": "x-container-sync-to", - "sync_key": "x-container-sync-key" + "sync_key": "x-container-sync-key", } base_path = "/" - id_attribute = "name" + pagination_key = 'X-Account-Container-Count' allow_create = True - allow_retrieve = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True allow_head = True + _query_mapping = resource.QueryParameters('prefix', 'format') + # Container body data (when id=None) #: The name of the container. - name = resource.prop("name") + name = resource.Body("name", alternate_id=True, alias='id') #: The number of objects in the container. - count = resource.prop("count") + count = resource.Body("count", type=int, alias='object_count') #: The total number of bytes that are stored in Object Storage #: for the container. - bytes = resource.prop("bytes") + bytes = resource.Body("bytes", type=int, alias='bytes_used') # Container metadata (when id=name) #: The number of objects. - object_count = resource.header("x-container-object-count", type=int) + object_count = resource.Header( + "x-container-object-count", type=int, alias='count' + ) #: The count of bytes used in total. - bytes_used = resource.header("x-container-bytes-used", type=int) + bytes_used = resource.Header( + "x-container-bytes-used", type=int, alias='bytes' + ) #: The timestamp of the transaction. - timestamp = resource.header("x-timestamp") + timestamp = resource.Header("x-timestamp") # Request headers (when id=None) #: If set to True, Object Storage queries all replicas to return the @@ -60,66 +67,98 @@ class Container(_base.BaseResource): #: faster after it finds one valid replica. Because setting this #: header to True is more expensive for the back end, use it only #: when it is absolutely needed. *Type: bool* - is_newest = resource.header("x-newest", type=bool) + is_newest = resource.Header("x-newest", type=bool) # Request headers (when id=name) #: The ACL that grants read access. If not set, this header is not #: returned by this operation. - read_ACL = resource.header("x-container-read") + read_ACL = resource.Header("x-container-read") #: The ACL that grants write access. If not set, this header is not #: returned by this operation. - write_ACL = resource.header("x-container-write") + write_ACL = resource.Header("x-container-write") #: The destination for container synchronization. If not set, #: this header is not returned by this operation. - sync_to = resource.header("x-container-sync-to") + sync_to = resource.Header("x-container-sync-to") #: The secret key for container synchronization. If not set, #: this header is not returned by this operation. - sync_key = resource.header("x-container-sync-key") + sync_key = resource.Header("x-container-sync-key") #: Enables versioning on this container. The value is the name #: of another container. You must UTF-8-encode and then URL-encode #: the name before you include it in the header. To disable #: versioning, set the header to an empty string. - versions_location = resource.header("x-versions-location") + versions_location = resource.Header("x-versions-location") + #: Enables versioning on the container. + history_location = resource.Header("x-history-location") #: The MIME type of the list of names. - content_type = resource.header("content-type") + content_type = resource.Header("content-type") #: If set to true, Object Storage guesses the content type based #: on the file extension and ignores the value sent in the #: Content-Type header, if present. *Type: bool* - is_content_type_detected = resource.header("x-detect-content-type", - type=bool) + is_content_type_detected = resource.Header( + "x-detect-content-type", type=bool + ) + + #: Storage policy used by the container. + #: It is not possible to change policy of an existing container + storage_policy = resource.Header("x-storage-policy") + # TODO(mordred) Shouldn't if-none-match be handled more systemically? #: In combination with Expect: 100-Continue, specify an #: "If-None-Match: \*" header to query whether the server already #: has a copy of the object before any data is sent. - if_none_match = resource.header("if-none-match") + if_none_match = resource.Header("if-none-match") + #: The secret key value for temporary URLs. If not set, + #: this header is not returned by this operation. + meta_temp_url_key = resource.Header("x-container-meta-temp-url-key") + #: A second secret key value for temporary URLs. If not set, + #: this header is not returned by this operation. + meta_temp_url_key_2 = resource.Header("x-container-meta-temp-url-key-2") @classmethod - def create_by_id(cls, session, attrs, resource_id=None): - """Create a Resource from its attributes. + def new(cls, **kwargs): + # Container uses name as id. Proxy._get_resource calls + # Resource.new(id=name) but then we need to do container.name + # It's the same thing for Container - make it be the same. + name = kwargs.pop('id', None) + if name: + kwargs.setdefault('name', name) + return cls(_synchronized=False, **kwargs) + + def create(self, session, prepend_key=True, base_path=None, **kwargs): + """Create a remote resource based on this instance. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param dict attrs: The attributes to be sent in the body - of the request. - :param resource_id: This resource's identifier, if needed by - the request. The default is ``None``. - - :return: A ``dict`` representing the response headers. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param prepend_key: A boolean indicating whether the resource_key + should be prepended in a resource creation + request. Default to True. + + :return: This :class:`Resource` instance. + :raises: :exc:`~openstack.exceptions.MethodNotSupported` if + :data:`Resource.allow_create` is not set to ``True``. """ - url = cls._get_url(None, resource_id) - headers = attrs.get(resource.HEADERS, dict()) - headers['Accept'] = '' - return session.put(url, endpoint_filter=cls.service, - headers=headers).headers - - def create(self, session): - """Create a Resource from this instance. + request = self._prepare_request( + requires_id=True, prepend_key=prepend_key, base_path=base_path + ) + response = session.put(request.url, headers=request.headers) - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` + self._translate_response(response, has_body=False) + return self - :return: This instance. + def set_temp_url_key(self, proxy, key, secondary=False): + """Set the temporary url key for a container. + + :param proxy: The proxy to use for making this request. + :type proxy: :class:`~openstack.proxy.Proxy` + :param container: + The value can be the name of a container or a + :class:`~openstack.object_store.v1.container.Container` instance. + :param key: + Text of the key to use. + :param bool secondary: + Whether this should set the second key. (defaults to False) """ - resp = self.create_by_id(session, self._attrs, self.id) - self.set_headers(resp) - self._reset_dirty() - return self + header = 'Temp-URL-Key' + if secondary: + header += '-2' + + return self.set_metadata(proxy, {header: key}) diff --git a/openstack/object_store/v1/info.py b/openstack/object_store/v1/info.py new file mode 100644 index 0000000000..5b874067fb --- /dev/null +++ b/openstack/object_store/v1/info.py @@ -0,0 +1,93 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may + +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import re +import urllib.parse + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Info(resource.Resource): + base_path = "/info" + + allow_fetch = True + + _query_mapping = resource.QueryParameters( + 'swiftinfo_sig', 'swiftinfo_expires' + ) + + # Properties + bulk_delete = resource.Body("bulk_delete", type=dict) + swift = resource.Body("swift", type=dict) + slo = resource.Body("slo", type=dict) + staticweb = resource.Body("staticweb", type=dict) + tempurl = resource.Body("tempurl", type=dict) + + # The endpoint in the catalog has version and project-id in it + # To get capabilities, we have to disassemble and reassemble the URL + # to append 'info' + # This logic is taken from swiftclient + def _get_info_url(self, url): + URI_PATTERN_VERSION = re.compile(r'\/v\d+\.?\d*(\/.*)?') + scheme, netloc, path, params, query, fragment = urllib.parse.urlparse( + url + ) + if URI_PATTERN_VERSION.search(path): + path = URI_PATTERN_VERSION.sub('/info', path) + else: + path = utils.urljoin(path, 'info') + + return urllib.parse.urlunparse( + (scheme, netloc, path, params, query, fragment) + ) + + def fetch( + self, + session, + requires_id=False, + base_path=None, + error_message=None, + skip_cache=False, + **kwargs, + ): + """Get a remote resource based on this instance. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param boolean requires_id: A boolean indicating whether resource ID + should be part of the requested URI. + :param str base_path: Base part of the URI for fetching resources, if + different from + :data:`~openstack.resource.Resource.base_path`. + :param str error_message: An Error message to be returned if + requested object does not exist. + :return: This :class:`Resource` instance. + :raises: :exc:`~openstack.exceptions.MethodNotSupported` if + :data:`Resource.allow_fetch` is not set to ``True``. + :raises: :exc:`~openstack.exceptions.NotFoundException` if + the resource was not found. + """ + if not self.allow_fetch: + raise exceptions.MethodNotSupported(self, "fetch") + + session = self._get_session(session) + info_url = self._get_info_url(session.get_endpoint()) + + microversion = self._get_microversion(session) + response = session.get(info_url, microversion=microversion) + + self.microversion = microversion + self._translate_response(response, error_message=error_message) + return self diff --git a/openstack/object_store/v1/obj.py b/openstack/object_store/v1/obj.py index 1b93e8e84e..9b51641c0c 100644 --- a/openstack/object_store/v1/obj.py +++ b/openstack/object_store/v1/obj.py @@ -13,7 +13,7 @@ import copy -from openstack.object_store import object_store_service +from openstack import exceptions from openstack.object_store.v1 import _base from openstack import resource @@ -21,37 +21,59 @@ class Object(_base.BaseResource): _custom_metadata_prefix = "X-Object-Meta-" _system_metadata = { + "accept_ranges": "accept-ranges", "content_disposition": "content-disposition", "content_encoding": "content-encoding", "content_type": "content-type", "delete_after": "x-delete-after", "delete_at": "x-delete-at", "is_content_type_detected": "x-detect-content-type", + "manifest": "x-object-manifest", + # Rax hack - the need CORS as different header + "access_control_allow_origin": "access-control-allow-origin", } base_path = "/%(container)s" - service = object_store_service.ObjectStoreService() - id_attribute = "name" + pagination_key = 'X-Container-Object-Count' allow_create = True - allow_retrieve = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True allow_list = True allow_head = True + _query_mapping = resource.QueryParameters( + 'prefix', + 'format', + 'temp_url_sig', + 'temp_url_expires', + 'filename', + 'multipart_manifest', + 'symlink', + multipart_manifest='multipart-manifest', + ) + # Data to be passed during a POST call to create an object on the server. + # TODO(mordred) Make a base class BaseDataResource that can be used here + # and with glance images that has standard overrides for dealing with + # binary data. data = None # URL parameters #: The unique name for the container. - container = resource.prop("container") + container = resource.URI("container") #: The unique name for the object. - name = resource.prop("name") + name = resource.Body("name", alternate_id=True) # Object details - hash = resource.prop("hash") - bytes = resource.prop("bytes") + # Make these private because they should only matter in the case where + # we have a Body with no headers (like if someone programmatically is + # creating an Object) + _hash = resource.Body("hash") + _bytes = resource.Body("bytes", type=int) + _last_modified = resource.Body("last_modified") + _content_type = resource.Body("content_type") # Headers for HEAD and GET requests #: If set to True, Object Storage queries all replicas to return @@ -59,46 +81,52 @@ class Object(_base.BaseResource): #: responds faster after it finds one valid replica. Because #: setting this header to True is more expensive for the back end, #: use it only when it is absolutely needed. *Type: bool* - is_newest = resource.header("x-newest", type=bool) + is_newest = resource.Header("x-newest", type=bool) #: TODO(briancurtin) there's a lot of content here... - range = resource.header("range", type=dict) + range = resource.Header("range", type=dict) #: See http://www.ietf.org/rfc/rfc2616.txt. - if_match = resource.header("if-match", type=dict) + if_match = resource.Header("if-match", type=list) #: In combination with Expect: 100-Continue, specify an #: "If-None-Match: \*" header to query whether the server already #: has a copy of the object before any data is sent. - if_none_match = resource.header("if-none-match", type=dict) + if_none_match = resource.Header("if-none-match", type=list) #: See http://www.ietf.org/rfc/rfc2616.txt. - if_modified_since = resource.header("if-modified-since", type=dict) + if_modified_since = resource.Header("if-modified-since", type=str) #: See http://www.ietf.org/rfc/rfc2616.txt. - if_unmodified_since = resource.header("if-unmodified-since", type=dict) + if_unmodified_since = resource.Header("if-unmodified-since", type=str) # Query parameters #: Used with temporary URLs to sign the request. For more #: information about temporary URLs, see OpenStack Object Storage #: API v1 Reference. - signature = resource.header("signature") + signature = resource.Header("signature") #: Used with temporary URLs to specify the expiry time of the #: signature. For more information about temporary URLs, see #: OpenStack Object Storage API v1 Reference. - expires_at = resource.header("expires") + expires_at = resource.Header("expires") + #: If present, this is a dynamic large object manifest object. + #: The value is the container and object name prefix of the segment + #: objects in the form container/prefix. + manifest = resource.Header("x-object-manifest") #: If you include the multipart-manifest=get query parameter and #: the object is a large object, the object contents are not #: returned. Instead, the manifest is returned in the #: X-Object-Manifest response header for dynamic large objects #: or in the response body for static large objects. - multipart_manifest = resource.header("multipart-manifest") + multipart_manifest = resource.Header("multipart-manifest") # Response headers from HEAD and GET #: HEAD operations do not return content. However, in this #: operation the value in the Content-Length header is not the #: size of the response body. Instead it contains the size of #: the object, in bytes. - content_length = resource.header("content-length") + content_length = resource.Header( + "content-length", type=int, alias='_bytes' + ) #: The MIME type of the object. - content_type = resource.header("content-type") + content_type = resource.Header("content-type", alias="_content_type") #: The type of ranges that the object accepts. - accept_ranges = resource.header("accept-ranges") + accept_ranges = resource.Header("accept-ranges") #: For objects smaller than 5 GB, this value is the MD5 checksum #: of the object content. The value is not quoted. #: For manifest objects, this value is the MD5 checksum of the @@ -110,47 +138,51 @@ class Object(_base.BaseResource): #: the response body as it is received and compare this value #: with the one in the ETag header. If they differ, the content #: was corrupted, so retry the operation. - etag = resource.header("etag") + etag = resource.Header("etag", alias='_hash') #: Set to True if this object is a static large object manifest object. #: *Type: bool* - is_static_large_object = resource.header("x-static-large-object", - type=bool) + is_static_large_object = resource.Header( + "x-static-large-object", type=bool + ) #: If set, the value of the Content-Encoding metadata. #: If not set, this header is not returned by this operation. - content_encoding = resource.header("content-encoding") + content_encoding = resource.Header("content-encoding") #: If set, specifies the override behavior for the browser. #: For example, this header might specify that the browser use #: a download program to save this file rather than show the file, #: which is the default. #: If not set, this header is not returned by this operation. - content_disposition = resource.header("content-disposition") + content_disposition = resource.Header("content-disposition") #: Specifies the number of seconds after which the object is #: removed. Internally, the Object Storage system stores this #: value in the X-Delete-At metadata item. - delete_after = resource.header("x-delete-after", type=int) + delete_after = resource.Header("x-delete-after", type=int) #: If set, the time when the object will be deleted by the system #: in the format of a UNIX Epoch timestamp. #: If not set, this header is not returned by this operation. - delete_at = resource.header("x-delete-at") + delete_at = resource.Header("x-delete-at") #: If set, to this is a dynamic large object manifest object. #: The value is the container and object name prefix of the #: segment objects in the form container/prefix. - object_manifest = resource.header("x-object-manifest") + object_manifest = resource.Header("x-object-manifest") #: The timestamp of the transaction. - timestamp = resource.header("x-timestamp") + timestamp = resource.Header("x-timestamp") #: The date and time that the object was created or the last #: time that the metadata was changed. - last_modified_at = resource.header("last_modified", alias="last-modified") + last_modified_at = resource.Header( + "last-modified", alias='_last_modified', aka='updated_at' + ) # Headers for PUT and POST requests #: Set to chunked to enable chunked transfer encoding. If used, #: do not set the Content-Length header to a non-zero value. - transfer_encoding = resource.header("transfer-encoding") + transfer_encoding = resource.Header("transfer-encoding") #: If set to true, Object Storage guesses the content type based #: on the file extension and ignores the value sent in the #: Content-Type header, if present. *Type: bool* - is_content_type_detected = resource.header("x-detect-content-type", - type=bool) + is_content_type_detected = resource.Header( + "x-detect-content-type", type=bool + ) #: If set, this is the name of an object used to create the new #: object by copying the X-Copy-From object. The value is in form #: {container}/{object}. You must UTF-8-encode and then URL-encode @@ -158,76 +190,166 @@ class Object(_base.BaseResource): #: in the header. #: Using PUT with X-Copy-From has the same effect as using the #: COPY operation to copy an object. - copy_from = resource.header("x-copy-from") + copy_from = resource.Header("x-copy-from") + #: If present, this is a symlink object. The value is the relative path + #: of the target object in the format /. + symlink_target = resource.Header("x-symlink-target") + #: If present, and X-Symlink-Target is present, then this is a + #: cross-account symlink to an object in the account specified in the + #: value. + symlink_target_account = resource.Header("x-symlink-target-account") + + #: CORS for RAX (deviating from standard) + access_control_allow_origin = resource.Header( + "access-control-allow-origin" + ) + + has_body = False + + def __init__(self, data=None, **attrs): + super().__init__(**attrs) + self.data = data # The Object Store treats the metadata for its resources inconsistently so # Object.set_metadata must override the BaseResource.set_metadata to # account for it. - def set_metadata(self, session, metadata): + def set_metadata(self, session, metadata, refresh=True): # Filter out items with empty values so the create metadata behaviour # is the same as account and container - filtered_metadata = \ - {key: value for key, value in metadata.iteritems() if value} + filtered_metadata = { + key: value for key, value in metadata.items() if value + } + + # Update from remote if we only have locally created information + if not self.last_modified_at: + self.head(session) # Get a copy of the original metadata so it doesn't get erased on POST # and update it with the new metadata values. - obj = self.head(session) - metadata2 = copy.deepcopy(obj.metadata) - metadata2.update(filtered_metadata) + metadata = copy.deepcopy(self.metadata) + metadata.update(filtered_metadata) # Include any original system metadata so it doesn't get erased on POST for key in self._system_metadata: - value = getattr(obj, key) - if value and key not in metadata2: - metadata2[key] = value + value = getattr(self, key) + if value and key not in metadata: + metadata[key] = value + + request = self._prepare_request() + headers = self._calculate_headers(metadata) + response = session.post(request.url, headers=headers) + self._translate_response(response, has_body=False) + self.metadata.update(metadata) - super(Object, self).set_metadata(session, metadata2) + return self # The Object Store treats the metadata for its resources inconsistently so # Object.delete_metadata must override the BaseResource.delete_metadata to # account for it. def delete_metadata(self, session, keys): - # Get a copy of the original metadata so it doesn't get erased on POST - # and update it with the new metadata values. - obj = self.head(session) - metadata = copy.deepcopy(obj.metadata) + if not keys: + return + # If we have an empty object, update it from the remote side so that + # we have a copy of the original metadata. Deleting metadata requires + # POSTing and overwriting all of the metadata. If we already have + # metadata locally, assume this is an existing object. + if not self.metadata: + self.head(session) + + metadata = copy.deepcopy(self.metadata) # Include any original system metadata so it doesn't get erased on POST for key in self._system_metadata: - value = getattr(obj, key) + value = getattr(self, key) if value: metadata[key] = value - # Remove the metadata + # Remove the requested metadata keys + # TODO(mordred) Why don't we just look at self._header_mapping() + # instead of having system_metadata? + deleted = False + attr_keys_to_delete = set() for key in keys: if key == 'delete_after': - del(metadata['delete_at']) + del metadata['delete_at'] else: - del(metadata[key]) - - url = self._get_url(self, self.id) - session.post(url, endpoint_filter=self.service, - headers=self._calculate_headers(metadata)) - - def get(self, session, include_headers=False, args=None): - url = self._get_url(self, self.id) - headers = {'Accept': 'bytes'} - resp = session.get(url, endpoint_filter=self.service, headers=headers) - resp = resp.content - self._set_metadata() - return resp - - def create(self, session): - url = self._get_url(self, self.id) - - headers = self.get_headers() - headers['Accept'] = '' - if self.data is not None: - resp = session.put(url, endpoint_filter=self.service, - data=self.data, - headers=headers).headers - else: - resp = session.post(url, endpoint_filter=self.service, data=None, - headers=headers).headers - self.set_headers(resp) + if key in metadata: + del metadata[key] + # Delete the attribute from the local copy of the object. + # Metadata that doesn't have Component attributes is + # handled by self.metadata being reset when we run + # self.head + if hasattr(self, key): + attr_keys_to_delete.add(key) + deleted = True + + # Nothing to delete, skip the POST + if not deleted: + return self + + request = self._prepare_request() + response = session.post( + request.url, headers=self._calculate_headers(metadata) + ) + exceptions.raise_from_response( + response, error_message="Error deleting metadata keys" + ) + + # Only delete from local object if the remote delete was successful + for key in attr_keys_to_delete: + delattr(self, key) + + # Just update ourselves from remote again. + return self.head(session) + + def _download(self, session, error_message=None, stream=False): + request = self._prepare_request() + + response = session.get( + request.url, headers=request.headers, stream=stream + ) + exceptions.raise_from_response(response, error_message=error_message) + return response + + def download(self, session, error_message=None): + response = self._download(session, error_message=error_message) + return response.content + + def stream(self, session, error_message=None, chunk_size=1024): + response = self._download( + session, error_message=error_message, stream=True + ) + return response.iter_content(chunk_size, decode_unicode=False) + + def create(self, session, prepend_key=True, base_path=None, **kwargs): + request = self._prepare_request(base_path=base_path) + + response = session.put( + request.url, data=self.data, headers=request.headers + ) + self._translate_response(response, has_body=False) return self + + def _raw_delete(self, session, microversion=None, **kwargs): + if not self.allow_delete: + raise exceptions.MethodNotSupported(self, 'delete') + + request = self._prepare_request(**kwargs) + session = self._get_session(session) + if microversion is None: + microversion = self._get_microversion(session) + + if self.is_static_large_object is None: + # Fetch metadata to determine SLO flag + self.head(session) + + headers = {} + + if self.is_static_large_object: + headers['multipart-manifest'] = 'delete' + + return session.delete( + request.url, + headers=headers, + microversion=microversion, + ) diff --git a/openstack/orchestration/orchestration_service.py b/openstack/orchestration/orchestration_service.py index 0333ff74f8..4ef5cc0027 100644 --- a/openstack/orchestration/orchestration_service.py +++ b/openstack/orchestration/orchestration_service.py @@ -10,18 +10,15 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack import service_filter +from openstack.orchestration.v1 import _proxy +from openstack import service_description -class OrchestrationService(service_filter.ServiceFilter): +class OrchestrationService( + service_description.ServiceDescription[_proxy.Proxy] +): """The orchestration service.""" - valid_versions = [service_filter.ValidVersion('v1')] - - def __init__(self, version=None): - """Create an orchestration service.""" - super(OrchestrationService, self).__init__( - service_type='orchestration', - version=version, - requires_project_id=True, - ) + supported_versions = { + '1': _proxy.Proxy, + } diff --git a/openstack/tests/unit/cluster/v1/__init__.py b/openstack/orchestration/util/__init__.py similarity index 100% rename from openstack/tests/unit/cluster/v1/__init__.py rename to openstack/orchestration/util/__init__.py diff --git a/openstack/orchestration/util/environment_format.py b/openstack/orchestration/util/environment_format.py new file mode 100644 index 0000000000..8d14df442c --- /dev/null +++ b/openstack/orchestration/util/environment_format.py @@ -0,0 +1,63 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import yaml + +from openstack.orchestration.util import template_format + + +SECTIONS = ( + PARAMETER_DEFAULTS, + PARAMETERS, + RESOURCE_REGISTRY, + ENCRYPTED_PARAM_NAMES, + EVENT_SINKS, + PARAMETER_MERGE_STRATEGIES, +) = ( + 'parameter_defaults', + 'parameters', + 'resource_registry', + 'encrypted_param_names', + 'event_sinks', + 'parameter_merge_strategies', +) + + +def parse(env_str): + """Takes a string and returns a dict containing the parsed structure. + + This includes determination of whether the string is using the + YAML format. + """ + try: + env = yaml.load(env_str, Loader=template_format.yaml_loader) # noqa: S506 + except yaml.YAMLError: + # NOTE(prazumovsky): we need to return more informative error for + # user, so use SafeLoader, which return error message with template + # snippet where error has been occurred. + try: + env = yaml.load(env_str, Loader=yaml.SafeLoader) + except yaml.YAMLError as yea: + raise ValueError(yea) + else: + if env is None: + env = {} + elif not isinstance(env, dict): + raise ValueError( + 'The environment is not a valid YAML mapping data type.' + ) + + for param in env: + if param not in SECTIONS: + raise ValueError(f'environment has wrong section "{param}"') + + return env diff --git a/openstack/orchestration/util/event_utils.py b/openstack/orchestration/util/event_utils.py new file mode 100644 index 0000000000..d5f86905fa --- /dev/null +++ b/openstack/orchestration/util/event_utils.py @@ -0,0 +1,117 @@ +# Copyright 2015 Red Hat Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections +import time + +from openstack.cloud import meta +from openstack import exceptions + + +# TODO(stephenfin): Convert to use real resources +def get_events(cloud, stack_id, event_args, marker=None, limit=None): + # TODO(mordred) FIX THIS ONCE assert_calls CAN HANDLE QUERY STRINGS + params = collections.OrderedDict() + for k in sorted(event_args.keys()): + params[k] = event_args[k] + + if marker: + event_args['marker'] = marker + if limit: + event_args['limit'] = limit + + response = cloud.orchestration.get( + f'/stacks/{stack_id}/events', + params=params, + ) + exceptions.raise_from_response(response) + + # Show which stack the event comes from (for nested events) + events = meta.get_and_munchify('events', response.json()) + for e in events: + e['stack_name'] = stack_id.split("/")[0] + return events + + +def poll_for_events( + cloud, stack_name, action=None, poll_period=5, marker=None +): + """Continuously poll events and logs for performed action on stack.""" + + def stop_check_action(a): + stop_status = (f'{action}_FAILED', f'{action}_COMPLETE') + return a in stop_status + + def stop_check_no_action(a): + return a.endswith('_COMPLETE') or a.endswith('_FAILED') + + if action: + stop_check = stop_check_action + else: + stop_check = stop_check_no_action + + no_event_polls = 0 + msg_template = "\n Stack %(name)s %(status)s \n" + + def is_stack_event(event): + if ( + event.get('resource_name', '') != stack_name + and event.get('physical_resource_id', '') != stack_name + ): + return False + + phys_id = event.get('physical_resource_id', '') + links = { + link.get('rel'): link.get('href') + for link in event.get('links', []) + } + stack_id = links.get('stack', phys_id).rsplit('/', 1)[-1] + return stack_id == phys_id + + while True: + events = get_events( + cloud, + stack_id=stack_name, + event_args={'sort_dir': 'asc', 'marker': marker}, + ) + + if len(events) == 0: + no_event_polls += 1 + else: + no_event_polls = 0 + # set marker to last event that was received. + marker = getattr(events[-1], 'id', None) + + for event in events: + # check if stack event was also received + if is_stack_event(event): + stack_status = getattr(event, 'resource_status', '') + msg = msg_template % dict( + name=stack_name, status=stack_status + ) + if stop_check(stack_status): + return stack_status, msg + + if no_event_polls >= 2: + # after 2 polls with no events, fall back to a stack get + stack = cloud.get_stack(stack_name, resolve_outputs=False) + if stack: + stack_status = stack['stack_status'] + msg = msg_template % dict(name=stack_name, status=stack_status) + if stop_check(stack_status): + return stack_status, msg + # go back to event polling again + no_event_polls = 0 + + time.sleep(poll_period) diff --git a/openstack/orchestration/util/template_format.py b/openstack/orchestration/util/template_format.py new file mode 100644 index 0000000000..b08715bfea --- /dev/null +++ b/openstack/orchestration/util/template_format.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import json + +import yaml + +if hasattr(yaml, 'CSafeLoader'): + yaml_loader = yaml.CSafeLoader +else: + yaml_loader = yaml.SafeLoader # type: ignore + + +class HeatYamlLoader(yaml_loader): + pass + + +def _construct_yaml_str(self, node): + # Override the default string handling function + # to always return unicode objects + return self.construct_scalar(node) + + +HeatYamlLoader.add_constructor('tag:yaml.org,2002:str', _construct_yaml_str) +# Unquoted dates like 2013-05-23 in yaml files get loaded as objects of type +# datetime.data which causes problems in API layer when being processed by +# openstack.common.jsonutils. Therefore, make unicode string out of timestamps +# until jsonutils can handle dates. +HeatYamlLoader.add_constructor( + 'tag:yaml.org,2002:timestamp', _construct_yaml_str +) + + +def parse(tmpl_str): + """Takes a string and returns a dict containing the parsed structure. + + This includes determination of whether the string is using the + JSON or YAML format. + """ + # strip any whitespace before the check + tmpl_str = tmpl_str.strip() + if tmpl_str.startswith('{'): + tpl = json.loads(tmpl_str) + else: + try: + tpl = yaml.load(tmpl_str, Loader=HeatYamlLoader) # noqa: S506 + except yaml.YAMLError: + # NOTE(prazumovsky): we need to return more informative error for + # user, so use SafeLoader, which return error message with template + # snippet where error has been occurred. + try: + tpl = yaml.load(tmpl_str, Loader=yaml.SafeLoader) + except yaml.YAMLError as yea: + raise ValueError(yea) + else: + if tpl is None: + tpl = {} + # Looking for supported version keys in the loaded template + if not ( + 'HeatTemplateFormatVersion' in tpl + or 'heat_template_version' in tpl + or 'AWSTemplateFormatVersion' in tpl + ): + raise ValueError("Template format version not found.") + return tpl diff --git a/openstack/orchestration/util/template_utils.py b/openstack/orchestration/util/template_utils.py new file mode 100644 index 0000000000..6673c5b711 --- /dev/null +++ b/openstack/orchestration/util/template_utils.py @@ -0,0 +1,364 @@ +# Copyright 2012 OpenStack Foundation +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import collections.abc +import json +from urllib import parse +from urllib import request + +from openstack import exceptions +from openstack.orchestration.util import environment_format +from openstack.orchestration.util import template_format +from openstack.orchestration.util import utils + + +def get_template_contents( + template_file=None, + template_url=None, + template_object=None, + object_request=None, + files=None, + existing=False, +): + is_object = False + tpl = None + + # Transform a bare file path to a file:// URL. + if template_file: + template_url = utils.normalise_file_path_to_url(template_file) + + if template_url: + tpl = request.urlopen(template_url).read() # noqa: S310 + + elif template_object: + is_object = True + template_url = template_object + tpl = object_request and object_request('GET', template_object) + elif existing: + return {}, None + else: + raise exceptions.SDKException( + 'Must provide one of template_file, template_url or ' + 'template_object' + ) + + if not tpl: + raise exceptions.SDKException( + f'Could not fetch template from {template_url}' + ) + + try: + if isinstance(tpl, bytes): + tpl = tpl.decode('utf-8') + template = template_format.parse(tpl) + except ValueError as e: + raise exceptions.SDKException( + f'Error parsing template {template_url} {e}' + ) + + tmpl_base_url = utils.base_url_for_url(template_url) + if files is None: + files = {} + resolve_template_get_files( + template, files, tmpl_base_url, is_object, object_request + ) + return files, template + + +def resolve_template_get_files( + template, files, template_base_url, is_object=False, object_request=None +): + def ignore_if(key, value): + if key != 'get_file' and key != 'type': + return True + if not isinstance(value, str): + return True + if key == 'type' and not value.endswith(('.yaml', '.template')): + return True + return False + + def recurse_if(value): + return isinstance(value, dict | list) + + get_file_contents( + template, + files, + template_base_url, + ignore_if, + recurse_if, + is_object, + object_request, + ) + + +def is_template(file_content): + try: + if isinstance(file_content, bytes): + file_content = file_content.decode('utf-8') + template_format.parse(file_content) + except (ValueError, TypeError): + return False + return True + + +def get_file_contents( + from_data, + files, + base_url=None, + ignore_if=None, + recurse_if=None, + is_object=False, + object_request=None, +): + if recurse_if and recurse_if(from_data): + if isinstance(from_data, dict): + recurse_data = from_data.values() + else: + recurse_data = from_data + for value in recurse_data: + get_file_contents( + value, + files, + base_url, + ignore_if, + recurse_if, + is_object, + object_request, + ) + + if isinstance(from_data, dict): + for key, value in from_data.items(): + if ignore_if and ignore_if(key, value): + continue + + if base_url and not base_url.endswith('/'): + base_url = base_url + '/' + + str_url = parse.urljoin(base_url, value) + if str_url not in files: + if is_object and object_request: + file_content = object_request('GET', str_url) + else: + file_content = utils.read_url_content(str_url) + if is_template(file_content): + if is_object: + template = get_template_contents( + template_object=str_url, + files=files, + object_request=object_request, + )[1] + else: + template = get_template_contents( + template_url=str_url, files=files + )[1] + file_content = json.dumps(template) + files[str_url] = file_content + # replace the data value with the normalised absolute URL + from_data[key] = str_url + + +def deep_update(old, new): + '''Merge nested dictionaries.''' + + # Prevents an error if in a previous iteration + # old[k] = None but v[k] = {...}, + if old is None: + old = {} + + for k, v in new.items(): + if isinstance(v, collections.abc.Mapping): + r = deep_update(old.get(k, {}), v) + old[k] = r + else: + old[k] = new[k] + return old + + +def process_multiple_environments_and_files( + env_paths=None, + template=None, + template_url=None, + env_path_is_object=None, + object_request=None, + env_list_tracker=None, +): + """Reads one or more environment files. + + Reads in each specified environment file and returns a dictionary + of the filenames->contents (suitable for the files dict) + and the consolidated environment (after having applied the correct + overrides based on order). + + If a list is provided in the env_list_tracker parameter, the behavior + is altered to take advantage of server-side environment resolution. + Specifically, this means: + + * Populating env_list_tracker with an ordered list of environment file + URLs to be passed to the server + * Including the contents of each environment file in the returned + files dict, keyed by one of the URLs in env_list_tracker + + :param env_paths: list of paths to the environment files to load; if + None, empty results will be returned + :type env_paths: list or None + :param template: unused; only included for API compatibility + :param template_url: unused; only included for API compatibility + :param env_list_tracker: if specified, environment filenames will be + stored within + :type env_list_tracker: list or None + :return: tuple of files dict and a dict of the consolidated environment + :rtype: tuple + """ + merged_files: dict[str, str] = {} + merged_env: dict[str, dict] = {} + + # If we're keeping a list of environment files separately, include the + # contents of the files in the files dict + include_env_in_files = env_list_tracker is not None + + if env_paths: + for env_path in env_paths: + files, env = process_environment_and_files( + env_path=env_path, + template=template, + template_url=template_url, + env_path_is_object=env_path_is_object, + object_request=object_request, + include_env_in_files=include_env_in_files, + ) + + # 'files' looks like {"filename1": contents, "filename2": contents} + # so a simple update is enough for merging + merged_files.update(files) + + # 'env' can be a deeply nested dictionary, so a simple update is + # not enough + merged_env = deep_update(merged_env, env) + + if env_list_tracker is not None: + env_url = utils.normalise_file_path_to_url(env_path) + env_list_tracker.append(env_url) + + return merged_files, merged_env + + +def process_environment_and_files( + env_path=None, + template=None, + template_url=None, + env_path_is_object=None, + object_request=None, + include_env_in_files=False, +): + """Loads a single environment file. + + Returns an entry suitable for the files dict which maps the environment + filename to its contents. + + :param env_path: full path to the file to load + :type env_path: str or None + :param include_env_in_files: if specified, the raw environment file itself + will be included in the returned files dict + :type include_env_in_files: bool + :return: tuple of files dict and the loaded environment as a dict + :rtype: (dict, dict) + """ + files: dict[str, str] = {} + env: dict[str, dict] = {} + + is_object = env_path_is_object and env_path_is_object(env_path) + + if is_object: + raw_env = object_request and object_request('GET', env_path) + env = environment_format.parse(raw_env) + env_base_url = utils.base_url_for_url(env_path) + + resolve_environment_urls( + env.get('resource_registry'), + files, + env_base_url, + is_object=True, + object_request=object_request, + ) + + elif env_path: + env_url = utils.normalise_file_path_to_url(env_path) + env_base_url = utils.base_url_for_url(env_url) + raw_env = request.urlopen(env_url).read() # noqa: S310 + + env = environment_format.parse(raw_env) + + resolve_environment_urls( + env.get('resource_registry'), files, env_base_url + ) + + if include_env_in_files: + files[env_url] = json.dumps(env) + + return files, env + + +def resolve_environment_urls( + resource_registry, + files, + env_base_url, + is_object=False, + object_request=None, +): + """Handles any resource URLs specified in an environment. + + :param resource_registry: mapping of type name to template filename + :type resource_registry: dict + :param files: dict to store loaded file contents into + :type files: dict + :param env_base_url: base URL to look in when loading files + :type env_base_url: str or None + """ + if resource_registry is None: + return + + rr = resource_registry + base_url = rr.get('base_url', env_base_url) + + def ignore_if(key, value): + if key == 'base_url': + return True + if isinstance(value, dict): + return True + if '::' in value: + # Built in providers like: "X::Compute::Server" + # don't need downloading. + return True + if key in ['hooks', 'restricted_actions']: + return True + + get_file_contents( + rr, + files, + base_url, + ignore_if, + is_object=is_object, + object_request=object_request, + ) + + for res_name, res_dict in rr.get('resources', {}).items(): + res_base_url = res_dict.get('base_url', base_url) + get_file_contents( + res_dict, + files, + res_base_url, + ignore_if, + is_object=is_object, + object_request=object_request, + ) diff --git a/openstack/orchestration/util/utils.py b/openstack/orchestration/util/utils.py new file mode 100644 index 0000000000..5822d26426 --- /dev/null +++ b/openstack/orchestration/util/utils.py @@ -0,0 +1,60 @@ +# Copyright 2012 OpenStack Foundation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +import os +from urllib import error +from urllib import parse +from urllib import request + +from openstack import exceptions + + +def base_url_for_url(url): + parsed = parse.urlparse(url) + parsed_dir = os.path.dirname(parsed.path) + return parse.urljoin(url, parsed_dir) + + +def normalise_file_path_to_url(path): + if parse.urlparse(path).scheme: + return path + path = os.path.abspath(path) + return parse.urljoin('file:', request.pathname2url(path)) + + +def read_url_content(url): + try: + # TODO(mordred) Use requests + content = request.urlopen(url).read() # noqa: S310 + except error.URLError: + raise exceptions.SDKException(f'Could not fetch contents for {url}') + + if content: + try: + content = content.decode('utf-8') + except ValueError: + content = base64.encodebytes(content) + return content + + +def resource_nested_identifier(rsrc): + nested_link = [ + link for link in rsrc.links or [] if link.get('rel') == 'nested' + ] + if nested_link: + nested_href = nested_link[0].get('href') + nested_identifier = nested_href.split("/")[-2:] + return "/".join(nested_identifier) diff --git a/openstack/orchestration/v1/_proxy.py b/openstack/orchestration/v1/_proxy.py index 7065b96e60..80fab8157a 100644 --- a/openstack/orchestration/v1/_proxy.py +++ b/openstack/orchestration/v1/_proxy.py @@ -10,98 +10,195 @@ # License for the specific language governing permissions and limitations # under the License. +import typing as ty + from openstack import exceptions +from openstack.orchestration.util import template_utils from openstack.orchestration.v1 import resource as _resource from openstack.orchestration.v1 import software_config as _sc from openstack.orchestration.v1 import software_deployment as _sd from openstack.orchestration.v1 import stack as _stack +from openstack.orchestration.v1 import stack_environment as _stack_environment +from openstack.orchestration.v1 import stack_event as _stack_event +from openstack.orchestration.v1 import stack_files as _stack_files +from openstack.orchestration.v1 import stack_template as _stack_template from openstack.orchestration.v1 import template as _template -from openstack import proxy2 - - -class Proxy(proxy2.BaseProxy): - - def create_stack(self, preview=False, **attrs): +from openstack import proxy +from openstack import resource + + +# TODO(rladntjr4): Some of these methods support lookup by ID, while others +# support lookup by ID or name. We should choose one and use it consistently. +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['1']] = '1' + + _resource_registry = { + "resource": _resource.Resource, + "software_config": _sc.SoftwareConfig, + "software_deployment": _sd.SoftwareDeployment, + "stack": _stack.Stack, + "stack_environment": _stack_environment.StackEnvironment, + "stack_files": _stack_files.StackFiles, + "stack_template": _stack_template.StackTemplate, + } + + def _extract_name_consume_url_parts(self, url_parts): + if ( + len(url_parts) == 3 + and url_parts[0] == 'software_deployments' + and url_parts[1] == 'metadata' + ): + # Another nice example of totally different URL naming scheme, + # which we need to repair /software_deployment/metadata/server_id - + # just replace server_id with metadata to keep further logic + return ['software_deployment', 'metadata'] + if ( + url_parts[0] == 'stacks' + and len(url_parts) > 2 + and url_parts[2] not in ['preview', 'resources'] + ): + # orchestrate introduce having stack name and id part of the URL + # (/stacks/name/id/everything_else), so if on third position we + # have not a known part - discard it, not to brake further logic + del url_parts[2] + return super()._extract_name_consume_url_parts(url_parts) + + def read_env_and_templates( + self, + template_file=None, + template_url=None, + template_object=None, + files=None, + environment_files=None, + ): + """Read templates and environment content and prepares + corresponding stack attributes + + :param string template_file: Path to the template. + :param string template_url: URL of template. + :param string template_object: URL to retrieve template object. + :param dict files: dict of additional file content to include. + :param environment_files: Paths to environment files to apply. + + :returns: Attributes dict to be set on the + :class:`~openstack.orchestration.v1.stack.Stack` + :rtype: dict + """ + stack_attrs = dict() + envfiles = dict() + tpl_files = None + if environment_files: + ( + envfiles, + env, + ) = template_utils.process_multiple_environments_and_files( + env_paths=environment_files + ) + stack_attrs['environment'] = env + if template_file or template_url or template_object: + tpl_files, template = template_utils.get_template_contents( + template_file=template_file, + template_url=template_url, + template_object=template_object, + files=files, + ) + stack_attrs['template'] = template + if tpl_files or envfiles: + stack_attrs['files'] = dict( + list(tpl_files.items()) + list(envfiles.items()) + ) + return stack_attrs + + def create_stack( + self, preview: bool = False, **attrs: ty.Any + ) -> _stack.Stack: """Create a new stack from attributes - :param bool perview: When ``True``, returns - an :class:`~openstack.orchestration.v1.stack.StackPreview` object, - otherwise an :class:`~openstack.orchestration.v1.stack.Stack` - object. + :param bool preview: When ``True``, a preview endpoint will be used to + verify the template *Default: ``False``* :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.orchestration.v1.stack.Stack`, - comprised of the properties on the Stack class. + a :class:`~openstack.orchestration.v1.stack.Stack`, + comprised of the properties on the Stack class. :returns: The results of stack creation :rtype: :class:`~openstack.orchestration.v1.stack.Stack` """ - res_type = _stack.StackPreview if preview else _stack.Stack - return self._create(res_type, **attrs) - def find_stack(self, name_or_id, ignore_missing=True): + base_path = None if not preview else '/stacks/preview' + return self._create(_stack.Stack, base_path=base_path, **attrs) + + def find_stack( + self, name_or_id, ignore_missing=True, resolve_outputs=True + ): """Find a single stack :param name_or_id: The name or ID of a stack. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. :returns: One :class:`~openstack.orchestration.v1.stack.Stack` or None """ - return self._find(_stack.Stack, name_or_id, - ignore_missing=ignore_missing) + return self._find( + _stack.Stack, + name_or_id, + ignore_missing=ignore_missing, + resolve_outputs=resolve_outputs, + ) def stacks(self, **query): """Return a generator of stacks - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of stack objects :rtype: :class:`~openstack.orchestration.v1.stack.Stack` """ - return self._list(_stack.Stack, paginated=False, **query) + return self._list(_stack.Stack, **query) - def get_stack(self, stack): + def get_stack(self, stack, resolve_outputs=True): """Get a single stack :param stack: The value can be the ID of a stack or a - :class:`~openstack.orchestration.v1.stack.Stack` instance. + :class:`~openstack.orchestration.v1.stack.Stack` instance. + :param resolve_outputs: Whether stack should contain outputs resolved. :returns: One :class:`~openstack.orchestration.v1.stack.Stack` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._get(_stack.Stack, stack) + return self._get(_stack.Stack, stack, resolve_outputs=resolve_outputs) - def update_stack(self, stack, **attrs): + def update_stack(self, stack, *, preview=False, **attrs): """Update a stack :param stack: The value can be the ID of a stack or a - :class:`~openstack.orchestration.v1.stack.Stack` instance. - :param kwargs \*\*attrs: The attributes to update on the stack - represented by ``value``. + :class:`~openstack.orchestration.v1.stack.Stack` instance. + :param kwargs attrs: The attributes to update on the stack + represented by ``value``. :returns: The updated stack :rtype: :class:`~openstack.orchestration.v1.stack.Stack` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. """ - return self._update(_stack.Stack, stack, **attrs) + res = self._get_resource(_stack.Stack, stack, **attrs) + return res.commit(self, preview) def delete_stack(self, stack, ignore_missing=True): """Delete a stack :param stack: The value can be either the ID of a stack or a - :class:`~openstack.orchestration.v1.stack.Stack` - instance. + :class:`~openstack.orchestration.v1.stack.Stack` + instance. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the stack does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent stack. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the stack does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent stack. :returns: ``None`` """ @@ -114,7 +211,7 @@ def check_stack(self, stack): is to track the stack's status. :param stack: The value can be either the ID of a stack or an instance - of :class:`~openstack.orchestration.v1.stack.Stack`. + of :class:`~openstack.orchestration.v1.stack.Stack`. :returns: ``None`` """ if isinstance(stack, _stack.Stack): @@ -122,23 +219,133 @@ def check_stack(self, stack): else: stk_obj = _stack.Stack.existing(id=stack) - stk_obj.check(self.session) + stk_obj.check(self) + + def abandon_stack(self, stack): + """Abandon a stack's without deleting it's resources + + :param stack: The value can be either the ID of a stack or an instance + of :class:`~openstack.orchestration.v1.stack.Stack`. + :returns: ``None`` + """ + res = self._get_resource(_stack.Stack, stack) + return res.abandon(self) + + def export_stack(self, stack): + """Get the stack data in JSON format + + :param stack: The value can be the ID or a name or + an instance of :class:`~openstack.orchestration.v1.stack.Stack` + :returns: A dictionary containing the stack data. + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + if isinstance(stack, _stack.Stack): + obj = stack + else: + obj = self._find(_stack.Stack, stack, ignore_missing=False) + return obj.export(self) + + def suspend_stack(self, stack): + """Suspend a stack status + + :param stack: The value can be either the ID of a stack or an instance + of :class:`~openstack.orchestration.v1.stack.Stack`. + :returns: ``None`` + """ + res = self._get_resource(_stack.Stack, stack) + res.suspend(self) + + def resume_stack(self, stack): + """Resume a stack status + + :param stack: The value can be either the ID of a stack or an instance + of :class:`~openstack.orchestration.v1.stack.Stack`. + :returns: ``None`` + """ + res = self._get_resource(_stack.Stack, stack) + res.resume(self) + + def get_stack_template(self, stack): + """Get template used by a stack + + :param stack: The value can be the ID of a stack or an instance of + :class:`~openstack.orchestration.v1.stack.Stack` + + :returns: One object of + :class:`~openstack.orchestration.v1.stack_template.StackTemplate` + :raises: :class:`~openstack.exceptions.NotFoundException` + when no resource can be found. + """ + if isinstance(stack, _stack.Stack): + obj = stack + else: + obj = self._find(_stack.Stack, stack, ignore_missing=False) + + return self._get( + _stack_template.StackTemplate, + requires_id=False, + stack_name=obj.name, + stack_id=obj.id, + ) + + def get_stack_environment(self, stack): + """Get environment used by a stack + + :param stack: The value can be the ID of a stack or an instance of + :class:`~openstack.orchestration.v1.stack.Stack` + + :returns: One object of + :class:`~openstack.orchestration.v1.stack_environment.StackEnvironment` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + """ + if isinstance(stack, _stack.Stack): + obj = stack + else: + obj = self._find(_stack.Stack, stack, ignore_missing=False) + + return self._get( + _stack_environment.StackEnvironment, + requires_id=False, + stack_name=obj.name, + stack_id=obj.id, + ) + + def get_stack_files(self, stack): + """Get files used by a stack + + :param stack: The value can be the ID of a stack or an instance of + :class:`~openstack.orchestration.v1.stack.Stack` + + :returns: A dictionary containing the names and contents of all files + used by the stack. + :raises: :class:`~openstack.exceptions.NotFoundException` + when the stack cannot be found. + """ + if isinstance(stack, _stack.Stack): + stk = stack + else: + stk = self._find(_stack.Stack, stack, ignore_missing=False) + + obj = _stack_files.StackFiles(stack_name=stk.name, stack_id=stk.id) + return obj.fetch(self) def resources(self, stack, **query): """Return a generator of resources :param stack: This can be a stack object, or the name of a stack - for which the resources are to be listed. - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. + for which the resources are to be listed. + :param kwargs query: Optional query parameters to be sent to limit + the resources being returned. :returns: A generator of resource objects if the stack exists and - there are resources in it. If the stack cannot be found, - an exception is thrown. + there are resources in it. If the stack cannot be found, + an exception is thrown. :rtype: A generator of :class:`~openstack.orchestration.v1.resource.Resource` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when the stack cannot be found. + :raises: :class:`~openstack.exceptions.NotFoundException` + when the stack cannot be found. """ # first try treat the value as a stack object or an ID if isinstance(stack, _stack.Stack): @@ -146,8 +353,9 @@ def resources(self, stack, **query): else: obj = self._find(_stack.Stack, stack, ignore_missing=False) - return self._list(_resource.Resource, paginated=False, - stack_name=obj.name, stack_id=obj.id, **query) + return self._list( + _resource.Resource, stack_name=obj.name, stack_id=obj.id, **query + ) def create_software_config(self, **attrs): """Create a new software config from attributes @@ -166,12 +374,12 @@ def software_configs(self, **query): """Returns a generator of software configs :param dict query: Optional query parameters to be sent to limit the - software configs returned. + software configs returned. :returns: A generator of software config objects. :rtype: - :class:`~openstack.orchestration.v1.software_config.SoftwareConfig` + :class:`~openstack.orchestration.v1.software_config.SoftwareConfig` """ - return self._list(_sc.SoftwareConfig, paginated=True, **query) + return self._list(_sc.SoftwareConfig, **query) def get_software_config(self, software_config): """Get details about a specific software config. @@ -192,14 +400,15 @@ def delete_software_config(self, software_config, ignore_missing=True): config or an instance of :class:`~openstack.orchestration.v1.software_config.SoftwareConfig` :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the software config does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent software config. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the software config does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent software config. :returns: ``None`` """ - self._delete(_sc.SoftwareConfig, software_config, - ignore_missing=ignore_missing) + self._delete( + _sc.SoftwareConfig, software_config, ignore_missing=ignore_missing + ) def create_software_deployment(self, **attrs): """Create a new software deployment from attributes @@ -218,12 +427,12 @@ def software_deployments(self, **query): """Returns a generator of software deployments :param dict query: Optional query parameters to be sent to limit the - software deployments returned. + software deployments returned. :returns: A generator of software deployment objects. :rtype: - :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment` + :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment` """ - return self._list(_sd.SoftwareDeployment, paginated=False, **query) + return self._list(_sd.SoftwareDeployment, **query) def get_software_deployment(self, software_deployment): """Get details about a specific software deployment resource @@ -237,22 +446,26 @@ def get_software_deployment(self, software_deployment): """ return self._get(_sd.SoftwareDeployment, software_deployment) - def delete_software_deployment(self, software_deployment, - ignore_missing=True): + def delete_software_deployment( + self, software_deployment, ignore_missing=True + ): """Delete a software deployment :param software_deployment: The value can be either the ID of a software deployment or an instance of :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment` :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the software deployment does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent software deployment. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the software deployment does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent software deployment. :returns: ``None`` """ - self._delete(_sd.SoftwareDeployment, software_deployment, - ignore_missing=ignore_missing) + self._delete( + _sd.SoftwareDeployment, + software_deployment, + ignore_missing=ignore_missing, + ) def update_software_deployment(self, software_deployment, **attrs): """Update a software deployment @@ -260,41 +473,203 @@ def update_software_deployment(self, software_deployment, **attrs): :param server: Either the ID of a software deployment or an instance of :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment` :param dict attrs: The attributes to update on the software deployment - represented by ``software_deployment``. + represented by ``software_deployment``. :returns: The updated software deployment :rtype: - :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment` + :class:`~openstack.orchestration.v1.software_deployment.SoftwareDeployment` """ - return self._update(_sd.SoftwareDeployment, software_deployment, - **attrs) + return self._update( + _sd.SoftwareDeployment, software_deployment, **attrs + ) - def validate_template(self, template, environment=None, template_url=None, - ignore_errors=None): + def validate_template( + self, template, environment=None, template_url=None, ignore_errors=None + ): """Validates a template. :param template: The stack template on which the validation is - performed. + performed. :param environment: A JSON environment for the stack, if provided. :param template_url: A URI to the location containing the stack - template for validation. This parameter is only - required if the ``template`` parameter is None. - This parameter is ignored if ``template`` is - specified. + template for validation. This parameter is only + required if the ``template`` parameter is None. + This parameter is ignored if ``template`` is + specified. :param ignore_errors: A string containing comma separated error codes - to ignore. Currently the only valid error code - is '99001'. + to ignore. Currently the only valid error code + is '99001'. :returns: The result of template validation. :raises: :class:`~openstack.exceptions.InvalidRequest` if neither - `template` not `template_url` is provided. + `template` not `template_url` is provided. :raises: :class:`~openstack.exceptions.HttpException` if the template - fails the validation. + fails the validation. """ if template is None and template_url is None: raise exceptions.InvalidRequest( - "'template_url' must be specified when template is None") + "'template_url' must be specified when template is None" + ) tmpl = _template.Template.new() - return tmpl.validate(self.session, template, environment=environment, - template_url=template_url, - ignore_errors=ignore_errors) + return tmpl.validate( + self, + template, + environment=environment, + template_url=template_url, + ignore_errors=ignore_errors, + ) + + def get_template_contents( + self, + template_file=None, + template_url=None, + template_object=None, + files=None, + ): + try: + return template_utils.get_template_contents( + template_file=template_file, + template_url=template_url, + template_object=template_object, + files=files, + ) + except Exception as e: + raise exceptions.SDKException( + f"Error in processing template files: {e!s}" + ) + + # ========== Stack events ========== + + def stack_events(self, stack, resource_name=None, **attr): + """Get a stack events + + :param stack: The value can be the ID of a stack or an instance of + :class:`~openstack.orchestration.v1.stack.Stack` + :param resource_name: The name of resource. If the resource_name is not + None, the base_path changes. + + :returns: A generator of stack_events objects + :rtype: :class:`~openstack.orchestration.v1.stack_event.StackEvent` + """ + + if isinstance(stack, _stack.Stack): + obj = stack + else: + obj = self._get(_stack.Stack, stack) + + if resource_name: + return self._list( + _stack_event.StackEvent, + stack_name=obj.name, + stack_id=obj.id, + resource_name=resource_name, + base_path=( + '/stacks/%(stack_name)s/%(stack_id)s/resources/' + '%(resource_name)s/events' + ), + **attr, + ) + + return self._list( + _stack_event.StackEvent, + stack_name=obj.name, + stack_id=obj.id, + **attr, + ) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) + + def _get_cleanup_dependencies(self): + return { + 'orchestration': {'before': ['compute', 'network', 'identity']} + } + + def _service_cleanup( + self, + dry_run=True, + client_status_queue=None, + identified_resources=None, + filters=None, + resource_evaluation_fn=None, + skip_resources=None, + ): + if self.should_skip_resource_cleanup("stack", skip_resources): + return + + stacks = [] + for obj in self.stacks(): + need_delete = self._service_cleanup_del_res( + self.delete_stack, + obj, + dry_run=dry_run, + client_status_queue=client_status_queue, + identified_resources=identified_resources, + filters=filters, + resource_evaluation_fn=resource_evaluation_fn, + ) + if not dry_run and need_delete: + stacks.append(obj) + + for stack in stacks: + self.wait_for_delete(stack) diff --git a/openstack/orchestration/v1/resource.py b/openstack/orchestration/v1/resource.py index f2a0a5633f..8f7ad017b9 100644 --- a/openstack/orchestration/v1/resource.py +++ b/openstack/orchestration/v1/resource.py @@ -10,8 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.orchestration import orchestration_service -from openstack import resource2 as resource +from openstack import resource class Resource(resource.Resource): @@ -19,22 +18,22 @@ class Resource(resource.Resource): resource_key = 'resource' resources_key = 'resources' base_path = '/stacks/%(stack_name)s/%(stack_id)s/resources' - service = orchestration_service.OrchestrationService() # capabilities allow_create = False allow_list = True allow_retrieve = False allow_delete = False - allow_update = False + allow_commit = False # Properties #: A list of dictionaries containing links relevant to the resource. links = resource.Body('links') #: ID of the logical resource, usually the literal name of the resource #: as it appears in the stack template. - logical_resource_id = resource.Body('logical_resource_id', - alternate_id=True) + logical_resource_id = resource.Body( + 'logical_resource_id', alternate_id=True + ) #: Name of the resource. name = resource.Body('resource_name') #: ID of the physical resource (if any) that backs up the resource. For diff --git a/openstack/orchestration/v1/software_config.py b/openstack/orchestration/v1/software_config.py index c0f1288526..f0b67b3cd8 100644 --- a/openstack/orchestration/v1/software_config.py +++ b/openstack/orchestration/v1/software_config.py @@ -10,22 +10,20 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.orchestration import orchestration_service -from openstack import resource2 as resource +from openstack import resource class SoftwareConfig(resource.Resource): resource_key = 'software_config' resources_key = 'software_configs' base_path = '/software_configs' - service = orchestration_service.OrchestrationService() # capabilities allow_create = True allow_list = True - allow_get = True + allow_fetch = True allow_delete = True - allow_update = False + allow_commit = False # Properties #: Configuration script or manifest that defines which configuration is @@ -40,14 +38,14 @@ class SoftwareConfig(resource.Resource): inputs = resource.Body('inputs') #: Name of the software config. name = resource.Body('name') - #: A string that contains options that are specific to the configuraiton + #: A string that contains options that are specific to the configuration #: management tool that this resource uses. options = resource.Body('options') #: A list of schemas each representing an output this software config #: produces. outputs = resource.Body('outputs') - def create(self, session): + def create(self, session, prepend_key=False, *args, **kwargs): # This overrides the default behavior of resource creation because # heat doesn't accept resource_key in its request. - return super(SoftwareConfig, self).create(session, prepend_key=False) + return super().create(session, prepend_key, *args, **kwargs) diff --git a/openstack/orchestration/v1/software_deployment.py b/openstack/orchestration/v1/software_deployment.py index 3fb22eafd1..24d6952d25 100644 --- a/openstack/orchestration/v1/software_deployment.py +++ b/openstack/orchestration/v1/software_deployment.py @@ -10,22 +10,20 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.orchestration import orchestration_service -from openstack import resource2 as resource +from openstack import resource class SoftwareDeployment(resource.Resource): resource_key = 'software_deployment' resources_key = 'software_deployments' base_path = '/software_deployments' - service = orchestration_service.OrchestrationService() # capabilities allow_create = True allow_list = True - allow_get = True + allow_fetch = True allow_delete = True - allow_update = True + allow_commit = True # Properties #: The stack action that triggers this deployment resource. @@ -51,14 +49,12 @@ class SoftwareDeployment(resource.Resource): #: The date and time when the software deployment resource was created. updated_at = resource.Body('updated_time') - def create(self, session): + def create(self, session, prepend_key=False, *args, **kwargs): # This overrides the default behavior of resource creation because # heat doesn't accept resource_key in its request. - return super(SoftwareDeployment, self).create( - session, prepend_key=False) + return super().create(session, prepend_key, *args, **kwargs) - def update(self, session): + def commit(self, session, prepend_key=False, *args, **kwargs): # This overrides the default behavior of resource creation because # heat doesn't accept resource_key in its request. - return super(SoftwareDeployment, self).update( - session, prepend_key=False) + return super().commit(session, prepend_key, *args, **kwargs) diff --git a/openstack/orchestration/v1/stack.py b/openstack/orchestration/v1/stack.py index 397cd24349..4bd96a77e1 100644 --- a/openstack/orchestration/v1/stack.py +++ b/openstack/orchestration/v1/stack.py @@ -10,9 +10,14 @@ # License for the specific language governing permissions and limitations # under the License. +import typing as ty + +from keystoneauth1 import adapter +import typing_extensions as ty_ext + +from openstack.common import tag from openstack import exceptions -from openstack.orchestration import orchestration_service -from openstack import resource2 as resource +from openstack import resource from openstack import utils @@ -21,16 +26,29 @@ class Stack(resource.Resource): resource_key = 'stack' resources_key = 'stacks' base_path = '/stacks' - service = orchestration_service.OrchestrationService() # capabilities allow_create = True allow_list = True - allow_get = True - allow_update = True + allow_fetch = True + allow_commit = True allow_delete = True + _query_mapping = resource.QueryParameters( + 'action', + 'name', + 'status', + 'project_id', + 'owner_id', + 'username', + project_id='tenant_id', + **tag.TagMixin._tag_query_parameters, + ) + # Properties + #: A list of resource objects that will be added if a stack update + # is performed. + added = resource.Body('added') #: Placeholder for AWS compatible template listing capabilities #: required by the stack. capabilities = resource.Body('capabilities') @@ -38,6 +56,20 @@ class Stack(resource.Resource): created_at = resource.Body('creation_time') #: A text description of the stack. description = resource.Body('description') + #: A list of resource objects that will be deleted if a stack + #: update is performed. + deleted = resource.Body('deleted', type=list) + #: Timestamp of the stack deletion. + deleted_at = resource.Body('deletion_time') + #: A JSON environment for the stack. + environment = resource.Body('environment') + #: An ordered list of names for environment files found in the files dict. + environment_files = resource.Body('environment_files', type=list) + #: Additional files referenced in the template or the environment + files = resource.Body('files', type=dict) + #: Name of the container in swift that has child + #: templates and environment files. + files_container = resource.Body('files_container') #: Whether the stack will support a rollback operation on stack #: create/update failures. *Type: bool* is_rollback_disabled = resource.Body('disable_rollback', type=bool) @@ -45,6 +77,7 @@ class Stack(resource.Resource): links = resource.Body('links') #: Name of the stack. name = resource.Body('stack_name') + stack_name = resource.URI('stack_name') #: Placeholder for future extensions where stack related events #: can be published. notification_topics = resource.Body('notification_topics') @@ -56,10 +89,15 @@ class Stack(resource.Resource): parameters = resource.Body('parameters', type=dict) #: The ID of the parent stack if any parent_id = resource.Body('parent') + #: A list of resource objects that will be replaced if a stack update + #: is performed. + replaced = resource.Body('replaced') #: A string representation of the stack status, e.g. ``CREATE_COMPLETE``. status = resource.Body('stack_status') #: A text explaining how the stack transits to its current status. status_reason = resource.Body('stack_status_reason') + #: A list of strings used as tags on the stack + tags = resource.Body('tags', type=list, default=[]) #: A dict containing the template use for stack creation. template = resource.Body('template', type=dict) #: Stack template description text. Currently contains the same text @@ -69,44 +107,251 @@ class Stack(resource.Resource): template_url = resource.Body('template_url') #: Stack operation timeout in minutes. timeout_mins = resource.Body('timeout_mins') + #: A list of resource objects that will remain unchanged if a stack + #: update is performed. + unchanged = resource.Body('unchanged') + #: A list of resource objects that will have their properties updated + #: in place if a stack update is performed. + updated = resource.Body('updated') #: Timestamp of last update on the stack. updated_at = resource.Body('updated_time') #: The ID of the user project created for this stack. user_project_id = resource.Body('stack_user_project_id') - def create(self, session): + def create(self, session, prepend_key=False, *args, **kwargs): # This overrides the default behavior of resource creation because # heat doesn't accept resource_key in its request. - return super(Stack, self).create(session, prepend_key=False) + return super().create(session, prepend_key, *args, **kwargs) - def update(self, session): - # This overrides the default behavior of resource creation because - # heat doesn't accept resource_key in its request. - return super(Stack, self).update(session, prepend_key=False, - has_body=False) + def commit( + self, + session, + prepend_key=True, + has_body=True, + retry_on_conflict=None, + base_path=None, + *, + microversion=None, + preview=False, + **kwargs, + ): + # This overrides the default behavior of resource update because + # we need to use other endpoint for update preview. + base_path = None + if self.name and self.id: + base_path = f'/stacks/{self.name}/{self.id}' + elif self.name or self.id: + # We have only one of name/id. Do not try to build a stacks/NAME/ID + # path + base_path = f'/stacks/{self.name or self.id}' + request = self._prepare_request( + prepend_key=False, requires_id=False, base_path=base_path + ) + + microversion = self._get_microversion(session) + + request_url = request.url + if preview: + request_url = utils.urljoin(request_url, 'preview') + + response = session.put( + request_url, + json=request.body, + headers=request.headers, + microversion=microversion, + ) + + self.microversion = microversion + self._translate_response(response, has_body=True) + return self def _action(self, session, body): """Perform stack actions""" url = utils.urljoin(self.base_path, self._get_id(self), 'actions') - resp = session.post(url, endpoint_filter=self.service, json=body) - return resp.json() + resp = session.post(url, json=body, microversion=self.microversion) + exceptions.raise_from_response(resp) + return resp def check(self, session): return self._action(session, {'check': ''}) - def get(self, session, requires_id=True): - stk = super(Stack, self).get(session, requires_id=requires_id) - if stk and stk.status in ['DELETE_COMPLETE', 'ADOPT_COMPLETE']: - raise exceptions.NotFoundException( - "No stack found for %s" % stk.id) - return stk + def abandon(self, session): + url = utils.urljoin( + self.base_path, self.name, self._get_id(self), 'abandon' + ) + resp = session.delete(url) + return resp.json() + + def export(self, session): + """Export a stack data + :param session: The session to use for making this request. + :return: A dictionary containing the stack data. + """ + url = utils.urljoin( + self.base_path, self.name, self._get_id(self), 'export' + ) + resp = session.get(url) + exceptions.raise_from_response(resp) + return resp.json() -class StackPreview(Stack): - base_path = '/stacks/preview' + def suspend(self, session): + """Suspend a stack - allow_create = True - allow_list = False - allow_get = False - allow_update = False - allow_delete = False + :param session: The session to use for making this request + :returns: None + """ + body = {"suspend": None} + self._action(session, body) + + def resume(self, session): + """Resume a stack + + :param session: The session to use for making this request + :returns: None + """ + body = {"resume": None} + self._action(session, body) + + def fetch( + self, + session, + requires_id=True, + base_path=None, + error_message=None, + skip_cache=False, + *, + resolve_outputs=True, + **params, + ): + if not self.allow_fetch: + raise exceptions.MethodNotSupported(self, "fetch") + + request = self._prepare_request( + requires_id=requires_id, base_path=base_path + ) + # session = self._get_session(session) + microversion = self._get_microversion(session) + + # NOTE(gtema): would be nice to simply use QueryParameters, however + # Heat return 302 with parameters being set into URL and requests + # apply parameters again, what results in them being set doubled + if not resolve_outputs: + request.url = request.url + '?resolve_outputs=False' + response = session.get( + request.url, microversion=microversion, skip_cache=skip_cache + ) + kwargs = {} + if error_message: + kwargs['error_message'] = error_message + + self.microversion = microversion + self._translate_response(response, **kwargs) + + if self and self.status in ['DELETE_COMPLETE', 'ADOPT_COMPLETE']: + raise exceptions.NotFoundException(f"No stack found for {self.id}") + return self + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[True] = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[False], + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self: ... + + # excuse the duplication here: it's mypy's fault + # https://github.com/python/mypy/issues/14764 + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: + """Find a resource by its name or id. + + :param session: The session to use for making this request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param name_or_id: This resource's identifier, if needed by + the request. The default is ``None``. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict params: Any additional parameters to be passed into + underlying methods, such as to + :meth:`~openstack.resource.Resource.existing` + in order to pass on URI parameters. + + :return: The :class:`Resource` object matching the given name or id + or None if nothing matches. + :raises: :class:`openstack.exceptions.DuplicateResource` if more + than one resource is found for this request. + :raises: :class:`openstack.exceptions.NotFoundException` if nothing + is found and ignore_missing is ``False``. + """ + session = cls._get_session(session) + # Try to short-circuit by looking directly for a matching ID. + try: + match = cls.existing( + id=name_or_id, + connection=session._get_connection(), # type: ignore + **params, + ) + return match.fetch(session, **params) + except exceptions.NotFoundException: + pass + + # NOTE(gtema) we do not do list, since previous call has done this + # for us already + + if ignore_missing: + return None + + raise exceptions.NotFoundException( + f"No {cls.__name__} found for {name_or_id}" + ) + + +StackPreview = Stack diff --git a/openstack/orchestration/v1/stack_environment.py b/openstack/orchestration/v1/stack_environment.py new file mode 100644 index 0000000000..e61bea9da4 --- /dev/null +++ b/openstack/orchestration/v1/stack_environment.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class StackEnvironment(resource.Resource): + base_path = "/stacks/%(stack_name)s/%(stack_id)s/environment" + + # capabilities + allow_create = False + allow_list = False + allow_fetch = True + allow_delete = False + allow_commit = False + + # Properties + #: Name of the stack where the template is referenced. + name = resource.URI('stack_name') + # Backwards compat + stack_name = name + #: ID of the stack where the template is referenced. + id = resource.URI('stack_id') + # Backwards compat + stack_id = id + #: A list of parameter names whose values are encrypted + encrypted_param_names = resource.Body('encrypted_param_names') + #: A list of event sinks + event_sinks = resource.Body('event_sinks') + #: A map of parameters and their default values defined for the stack. + parameter_defaults = resource.Body('parameter_defaults') + #: A map of parametes defined in the stack template. + parameters = resource.Body('parameters', type=dict) + #: A map containing customized resource definitions. + resource_registry = resource.Body('resource_registry', type=dict) diff --git a/openstack/orchestration/v1/stack_event.py b/openstack/orchestration/v1/stack_event.py new file mode 100644 index 0000000000..640659e6a4 --- /dev/null +++ b/openstack/orchestration/v1/stack_event.py @@ -0,0 +1,53 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class StackEvent(resource.Resource): + base_path = '/stacks/%(stack_name)s/%(stack_id)s/events' + resources_key = 'events' + + # capabilities + allow_create = False + allow_list = True + allow_fetch = True + allow_delete = False + allow_commit = False + + _query_mapping = resource.QueryParameters( + "resource_action", + "resource_status", + "resource_name", + "resource_type", + "nested_depth", + "sort_key", + "sort_dir", + ) + + # Properties + #: The date and time when the event was created + event_time = resource.Body('event_time') + #: The ID of the event object + id = resource.Body('id') + #: A list of dictionaries containing links relevant to the stack. + links = resource.Body('links') + #: The ID of the logical stack resource. + logical_resource_id = resource.Body('logical_resource_id') + #: The ID of the stack physical resource. + physical_resource_id = resource.Body('physical_resource_id') + #: The name of the resource. + resource_name = resource.Body('resource_name') + #: The status of the resource. + resource_status = resource.Body('resource_status') + #: The reason for the current stack resource state. + resource_status_reason = resource.Body('resource_status_reason') diff --git a/openstack/orchestration/v1/stack_files.py b/openstack/orchestration/v1/stack_files.py new file mode 100644 index 0000000000..6c3919b0a8 --- /dev/null +++ b/openstack/orchestration/v1/stack_files.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class StackFiles(resource.Resource): + base_path = "/stacks/%(stack_name)s/%(stack_id)s/files" + + # capabilities + allow_create = False + allow_list = False + allow_fetch = True + allow_delete = False + allow_commit = False + + # Properties + #: Name of the stack where the template is referenced. + name = resource.URI('stack_name') + # Backwards compat + stack_name = name + #: ID of the stack where the template is referenced. + id = resource.URI('stack_id') + # Backwards compat + stack_id = id + + def fetch( + self, session, requires_id=False, base_path=None, *args, **kwargs + ): + # The stack files response contains a map of filenames and file + # contents. + request = self._prepare_request(requires_id=False, base_path=base_path) + resp = session.get(request.url) + return resp.json() diff --git a/openstack/orchestration/v1/stack_template.py b/openstack/orchestration/v1/stack_template.py new file mode 100644 index 0000000000..d8bb7a9678 --- /dev/null +++ b/openstack/orchestration/v1/stack_template.py @@ -0,0 +1,48 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class StackTemplate(resource.Resource): + base_path = "/stacks/%(stack_name)s/%(stack_id)s/template" + + # capabilities + allow_create = False + allow_list = False + allow_fetch = True + allow_delete = False + allow_commit = False + + # Properties + #: Name of the stack where the template is referenced. + name = resource.URI('stack_name') + # Backwards compat. _stack_name will never match, but the alias will + # point it to the value pulled for name. + stack_name = resource.URI('_stack_name', alias='name') + #: ID of the stack where the template is referenced. + stack_id = resource.URI('stack_id', alternate_id=True) + #: The description specified in the template + description = resource.Body('Description') + #: The version of the orchestration HOT template. + heat_template_version = resource.Body('heat_template_version') + #: Key and value that contain output data. + outputs = resource.Body('outputs', type=dict) + #: Key and value pairs that contain template parameters + parameters = resource.Body('parameters', type=dict) + #: Key and value pairs that contain definition of resources in the + #: template + resources = resource.Body('resources', type=dict) + # List parameters grouped. + parameter_groups = resource.Body('parameter_groups', type=list) + # Restrict conditions which supported since '2016-10-14'. + conditions = resource.Body('conditions', type=dict) diff --git a/openstack/orchestration/v1/template.py b/openstack/orchestration/v1/template.py index be2752dd51..1ae9ebfe76 100644 --- a/openstack/orchestration/v1/template.py +++ b/openstack/orchestration/v1/template.py @@ -10,21 +10,18 @@ # License for the specific language governing permissions and limitations # under the License. -from six.moves.urllib import parse +from urllib import parse -from openstack.orchestration import orchestration_service -from openstack import resource2 as resource +from openstack import resource class Template(resource.Resource): - service = orchestration_service.OrchestrationService() - # capabilities allow_create = False allow_list = False - allow_retrieve = False + allow_fetch = False allow_delete = False - allow_update = False + allow_commit = False # Properties #: The description specified in the template @@ -34,8 +31,14 @@ class Template(resource.Resource): #: A list of parameter groups each contains a lsit of parameter names. parameter_groups = resource.Body('ParameterGroups', type=list) - def validate(self, session, template, environment=None, template_url=None, - ignore_errors=None): + def validate( + self, + session, + template, + environment=None, + template_url=None, + ignore_errors=None, + ): url = '/validate' body = {'template': template} @@ -47,6 +50,6 @@ def validate(self, session, template, environment=None, template_url=None, qry = parse.urlencode({'ignore_errors': ignore_errors}) url = '?'.join([url, qry]) - resp = session.post(url, endpoint_filter=self.service, json=body) + resp = session.post(url, json=body) self._translate_response(resp) return self diff --git a/openstack/orchestration/version.py b/openstack/orchestration/version.py index f44118122c..805ce6a345 100644 --- a/openstack/orchestration/version.py +++ b/openstack/orchestration/version.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.orchestration import orchestration_service from openstack import resource @@ -18,13 +17,10 @@ class Version(resource.Resource): resource_key = 'version' resources_key = 'versions' base_path = '/' - service = orchestration_service.OrchestrationService( - version=orchestration_service.OrchestrationService.UNVERSIONED - ) # capabilities allow_list = True # Properties - links = resource.prop('links') - status = resource.prop('status') + links = resource.Body('links') + status = resource.Body('status') diff --git a/openstack/tests/unit/message/v1/__init__.py b/openstack/placement/__init__.py similarity index 100% rename from openstack/tests/unit/message/v1/__init__.py rename to openstack/placement/__init__.py diff --git a/openstack/placement/placement_service.py b/openstack/placement/placement_service.py new file mode 100644 index 0000000000..24913f5238 --- /dev/null +++ b/openstack/placement/placement_service.py @@ -0,0 +1,22 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.placement.v1 import _proxy +from openstack import service_description + + +class PlacementService(service_description.ServiceDescription[_proxy.Proxy]): + """The placement service.""" + + supported_versions = { + '1': _proxy.Proxy, + } diff --git a/openstack/tests/unit/metric/__init__.py b/openstack/placement/v1/__init__.py similarity index 100% rename from openstack/tests/unit/metric/__init__.py rename to openstack/placement/v1/__init__.py diff --git a/openstack/placement/v1/_proxy.py b/openstack/placement/v1/_proxy.py new file mode 100644 index 0000000000..f85270f76a --- /dev/null +++ b/openstack/placement/v1/_proxy.py @@ -0,0 +1,529 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +from openstack.placement.v1 import resource_class as _resource_class +from openstack.placement.v1 import resource_provider as _resource_provider +from openstack.placement.v1 import ( + resource_provider_inventory as _resource_provider_inventory, +) +from openstack.placement.v1 import trait as _trait +from openstack import proxy +from openstack import resource + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['1']] = '1' + + _resource_registry = { + "resource_class": _resource_class.ResourceClass, + "resource_provider": _resource_provider.ResourceProvider, + } + + # resource classes + + def create_resource_class(self, **attrs): + """Create a new resource class from attributes. + + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.placement.v1.resource_provider.ResourceClass`, + comprised of the properties on the ResourceClass class. + + :returns: The results of resource class creation + :rtype: :class:`~openstack.placement.v1.resource_class.ResourceClass` + """ + return self._create(_resource_class.ResourceClass, **attrs) + + def delete_resource_class(self, resource_class, ignore_missing=True): + """Delete a resource class + + :param resource_class: The value can be either the ID of a resource + class or an + :class:`~openstack.placement.v1.resource_class.ResourceClass`, + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource class does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + resource class. + + :returns: ``None`` + """ + self._delete( + _resource_class.ResourceClass, + resource_class, + ignore_missing=ignore_missing, + ) + + def update_resource_class(self, resource_class, **attrs): + """Update a resource class + + :param resource_class: The value can be either the ID of a resource + class or an + :class:`~openstack.placement.v1.resource_class.ResourceClass`, + instance. + :param attrs: The attributes to update on the resource class + represented by ``resource_class``. + + :returns: The updated resource class + :rtype: :class:`~openstack.placement.v1.resource_class.ResourceClass` + """ + return self._update( + _resource_class.ResourceClass, + resource_class, + **attrs, + ) + + def get_resource_class(self, resource_class): + """Get a single resource_class. + + :param resource_class: The value can be either the ID of a resource + class or an + :class:`~openstack.placement.v1.resource_class.ResourceClass`, + instance. + + :returns: An instance of + :class:`~openstack.placement.v1.resource_class.ResourceClass` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource class matching the criteria could be found. + """ + return self._get( + _resource_class.ResourceClass, + resource_class, + ) + + def resource_classes(self, **query): + """Retrieve a generator of resource classs. + + :param kwargs query: Optional query parameters to be sent to + restrict the resource classs to be returned. + + :returns: A generator of resource class instances. + """ + return self._list(_resource_class.ResourceClass, **query) + + # resource providers + + def create_resource_provider(self, **attrs): + """Create a new resource provider from attributes. + + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.placement.v1.resource_provider.ResourceProvider`, + comprised of the properties on the ResourceProvider class. + + :returns: The results of resource provider creation + :rtype: :class:`~openstack.placement.v1.resource_provider.ResourceProvider` + """ # noqa: E501 + return self._create(_resource_provider.ResourceProvider, **attrs) + + def delete_resource_provider(self, resource_provider, ignore_missing=True): + """Delete a resource provider + + :param resource_provider: The value can be either the ID of a resource + provider or an + :class:`~openstack.placement.v1.resource_provider.ResourceProvider`, + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource provider does not exist. When set to ``True``, no + exception will be set when attempting to delete a nonexistent + resource provider. + + :returns: ``None`` + """ + self._delete( + _resource_provider.ResourceProvider, + resource_provider, + ignore_missing=ignore_missing, + ) + + def update_resource_provider(self, resource_provider, **attrs): + """Update a resource provider + + :param resource_provider: The value can be either the ID of a resource + provider or an + :class:`~openstack.placement.v1.resource_provider.ResourceProvider`, + instance. + :param attrs: The attributes to update on the resource provider + represented by ``resource_provider``. + + :returns: The updated resource provider + :rtype: :class:`~openstack.placement.v1.resource_provider.ResourceProvider` + """ # noqa: E501 + return self._update( + _resource_provider.ResourceProvider, + resource_provider, + **attrs, + ) + + def get_resource_provider(self, resource_provider): + """Get a single resource_provider. + + :param resource_provider: The value can be either the ID of a resource + provider or an + :class:`~openstack.placement.v1.resource_provider.ResourceProvider`, + instance. + + :returns: An instance of + :class:`~openstack.placement.v1.resource_provider.ResourceProvider` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource provider matching the criteria could be found. + """ + return self._get( + _resource_provider.ResourceProvider, + resource_provider, + ) + + def find_resource_provider(self, name_or_id, ignore_missing=True): + """Find a single resource_provider. + + :param name_or_id: The name or ID of a resource provider. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + + :returns: An instance of + :class:`~openstack.placement.v1.resource_provider.ResourceProvider` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource provider matching the criteria could be found. + """ + return self._find( + _resource_provider.ResourceProvider, + name_or_id, + ignore_missing=ignore_missing, + ) + + def resource_providers(self, **query): + """Retrieve a generator of resource providers. + + :param kwargs query: Optional query parameters to be sent to + restrict the resource providers to be returned. + + :returns: A generator of resource provider instances. + """ + return self._list(_resource_provider.ResourceProvider, **query) + + # resource provider aggregates + + def get_resource_provider_aggregates(self, resource_provider): + """Get a list of aggregates for a resource provider. + + :param resource_provider: The value can be either the ID of a resource + provider or an + :class:`~openstack.placement.v1.resource_provider.ResourceProvider`, + instance. + + :returns: An instance of + :class:`~openstack.placement.v1.resource_provider.ResourceProvider` + with the ``aggregates`` attribute populated. + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource provider matching the criteria could be found. + """ + res = self._get_resource( + _resource_provider.ResourceProvider, + resource_provider, + ) + return res.fetch_aggregates(self) + + def set_resource_provider_aggregates(self, resource_provider, *aggregates): + """Update aggregates for a resource provider. + + :param resource_provider: The value can be either the ID of a resource + provider or an + :class:`~openstack.placement.v1.resource_provider.ResourceProvider`, + instance. + :param aggregates: A list of aggregates. These aggregates will replace + all aggregates currently present. + + :returns: An instance of + :class:`~openstack.placement.v1.resource_provider.ResourceProvider` + with the ``aggregates`` attribute populated with the updated value. + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource provider matching the criteria could be found. + """ + res = self._get_resource( + _resource_provider.ResourceProvider, + resource_provider, + ) + return res.set_aggregates(self, aggregates=aggregates) + + # resource provider inventories + + def create_resource_provider_inventory( + self, + resource_provider, + resource_class, + *, + total, + **attrs, + ): + """Create a new resource provider inventory from attributes + + :param resource_provider: Either the ID of a resource provider or a + :class:`~openstack.placement.v1.resource_provider.ResourceProvider` + instance. + :param total: The actual amount of the resource that the provider can + accommodate. + :param attrs: Keyword arguments which will be used to create a + :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory`, + comprised of the properties on the ResourceProviderInventory class. + + :returns: The results of resource provider inventory creation + :rtype: :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory` + """ # noqa: E501 + resource_provider_id = resource.Resource._get_id(resource_provider) + resource_class_name = resource.Resource._get_id(resource_class) + return self._create( + _resource_provider_inventory.ResourceProviderInventory, + resource_provider_id=resource_provider_id, + resource_class=resource_class_name, + total=total, + **attrs, + ) + + def delete_resource_provider_inventory( + self, + resource_provider_inventory, + resource_provider=None, + ignore_missing=True, + ): + """Delete a resource provider inventory + + :param resource_provider_inventory: The value can be either the ID of a + resource provider or an + :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory`, + instance. + :param resource_provider: Either the ID of a resource provider or a + :class:`~openstack.placement.v1.resource_provider.ResourceProvider` + instance. This value must be specified when + ``resource_provider_inventory`` is an ID. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource provider inventory does not exist. When set to + ``True``, no exception will be set when attempting to delete a + nonexistent resource provider inventory. + + :returns: ``None`` + """ + resource_provider_id = self._get_uri_attribute( + resource_provider_inventory, + resource_provider, + 'resource_provider_id', + ) + self._delete( + _resource_provider_inventory.ResourceProviderInventory, + resource_provider_inventory, + resource_provider_id=resource_provider_id, + ignore_missing=ignore_missing, + ) + + def update_resource_provider_inventory( + self, + resource_provider_inventory, + resource_provider=None, + *, + resource_provider_generation=None, + **attrs, + ): + """Update a resource provider's inventory + + :param resource_provider_inventory: The value can be either the ID of a resource + provider inventory or an + :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory`, + instance. + :param resource_provider: Either the ID of a resource provider or a + :class:`~openstack.placement.v1.resource_provider.ResourceProvider` + instance. This value must be specified when + ``resource_provider_inventory`` is an ID. + :attrs kwargs: The attributes to update on the resource provider inventory + represented by ``resource_provider_inventory``. + + :returns: The updated resource provider inventory + :rtype: :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory` + """ # noqa: E501 + resource_provider_id = self._get_uri_attribute( + resource_provider_inventory, + resource_provider, + 'resource_provider_id', + ) + return self._update( + _resource_provider_inventory.ResourceProviderInventory, + resource_provider_inventory, + resource_provider_id=resource_provider_id, + resource_provider_generation=resource_provider_generation, + **attrs, + ) + + def get_resource_provider_inventory( + self, + resource_provider_inventory, + resource_provider=None, + ): + """Get a single resource_provider_inventory + + :param resource_provider_inventory: The value can be either the ID of a + resource provider inventory or an + :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory`, + instance. + :param resource_provider: Either the ID of a resource provider or a + :class:`~openstack.placement.v1.resource_provider.ResourceProvider` + instance. This value must be specified when + ``resource_provider_inventory`` is an ID. + + :returns: An instance of + :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource provider inventory matching the criteria could be found. + """ + resource_provider_id = self._get_uri_attribute( + resource_provider_inventory, + resource_provider, + 'resource_provider_id', + ) + return self._get( + _resource_provider_inventory.ResourceProviderInventory, + resource_provider_inventory, + resource_provider_id=resource_provider_id, + ) + + def resource_provider_inventories(self, resource_provider, **query): + """Retrieve a generator of resource provider inventories + + :param resource_provider: Either the ID of a resource provider or a + :class:`~openstack.placement.v1.resource_provider.ResourceProvider` + instance. + :param query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of resource provider inventory instances. + """ + resource_provider_id = resource.Resource._get_id(resource_provider) + return self._list( + _resource_provider_inventory.ResourceProviderInventory, + resource_provider_id=resource_provider_id, + **query, + ) + + # ========== Traits ========== + + def create_trait(self, name): + """Create a new trait + + :param name: The name of the new trait + + :returns: The results of trait creation + :rtype: :class:`~openstack.placement.v1.trait.Trait` + """ + return self._create(_trait.Trait, name=name) + + def delete_trait(self, trait, ignore_missing=True): + """Delete a trait + + :param trait: The value can be either the ID of a trait or an + :class:`~openstack.placement.v1.trait.Trait`, instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource provider inventory does not exist. When set to + ``True``, no exception will be set when attempting to delete a + nonexistent resource provider inventory. + + :returns: ``None`` + """ + self._delete(_trait.Trait, trait, ignore_missing=ignore_missing) + + def get_trait(self, trait): + """Get a single trait + + :param trait: The value can be either the ID of a trait or an + :class:`~openstack.placement.v1.trait.Trait`, instance. + + :returns: An instance of + :class:`~openstack.placement.v1.resource_provider_inventory.ResourceProviderInventory` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + trait matching the criteria could be found. + """ + return self._get(_trait.Trait, trait) + + def traits(self, **query): + """Retrieve a generator of traits + + :param query: Optional query parameters to be sent to limit + the resources being returned. + + :returns: A generator of trait objects + """ + return self._list(_trait.Trait, **query) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/placement/v1/resource_class.py b/openstack/placement/v1/resource_class.py new file mode 100644 index 0000000000..e45f5e6421 --- /dev/null +++ b/openstack/placement/v1/resource_class.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ResourceClass(resource.Resource): + resource_key = None + resources_key = 'resource_classes' + base_path = '/resource_classes' + + # Capabilities + + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Added in 1.2 + _max_microversion = '1.2' + + name = resource.Body('name', alternate_id=True) diff --git a/openstack/placement/v1/resource_provider.py b/openstack/placement/v1/resource_provider.py new file mode 100644 index 0000000000..4419ab4b2a --- /dev/null +++ b/openstack/placement/v1/resource_provider.py @@ -0,0 +1,114 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class ResourceProvider(resource.Resource): + resource_key = None + resources_key = 'resource_providers' + base_path = '/resource_providers' + + # Capabilities + + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Filters + + _query_mapping = resource.QueryParameters( + 'name', + 'member_of', + 'resources', + 'in_tree', + 'required', + id='uuid', + ) + + # The parent_provider_uuid and root_provider_uuid fields were introduced in + # 1.14 + # The required query parameter was added in 1.18 + # The create operation started returning a body in 1.20 + _max_microversion = '1.20' + + # Properties + + #: Aggregates + aggregates = resource.Body('aggregates', type=list, list_type=str) + #: The UUID of a resource provider. + id = resource.Body('uuid', alternate_id=True) + #: A consistent view marker that assists with the management of concurrent + #: resource provider updates. + generation = resource.Body('generation') + #: Links pertaining to this flavor. This is a list of dictionaries, + #: each including keys ``href`` and ``rel``. + links = resource.Body('links') + #: The name of this resource provider. + name = resource.Body('name') + #: The UUID of the immediate parent of the resource provider. + parent_provider_id = resource.Body('parent_provider_uuid') + #: Read-only UUID of the top-most provider in this provider tree. + root_provider_id = resource.Body('root_provider_uuid') + + def fetch_aggregates(self, session): + """List aggregates set on the resource provider + + :param session: The session to use for making this request + :return: The resource provider with aggregates populated + """ + url = utils.urljoin(self.base_path, self.id, 'aggregates') + microversion = self._get_microversion(session) + + response = session.get(url, microversion=microversion) + exceptions.raise_from_response(response) + data = response.json() + + updates = {'aggregates': data['aggregates']} + if utils.supports_microversion(session, '1.19'): + updates['generation'] = data['resource_provider_generation'] + self._body.attributes.update(updates) + + return self + + def set_aggregates(self, session, aggregates=None): + """Replaces aggregates on the resource provider + + :param session: The session to use for making this request + :param list aggregates: List of aggregates + :return: The resource provider with updated aggregates populated + """ + url = utils.urljoin(self.base_path, self.id, 'aggregates') + microversion = self._get_microversion(session) + + body = { + 'aggregates': aggregates or [], + } + if utils.supports_microversion(session, '1.19'): + body['resource_provider_generation'] = self.generation + + response = session.put(url, json=body, microversion=microversion) + exceptions.raise_from_response(response) + data = response.json() + + updates = {'aggregates': data['aggregates']} + if 'resource_provider_generation' in data: + updates['resource_provider_generation'] = data[ + 'resource_provider_generation' + ] + self._body.attributes.update(updates) + + return self diff --git a/openstack/placement/v1/resource_provider_inventory.py b/openstack/placement/v1/resource_provider_inventory.py new file mode 100644 index 0000000000..2173338981 --- /dev/null +++ b/openstack/placement/v1/resource_provider_inventory.py @@ -0,0 +1,196 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import fields +from openstack import resource + + +class ResourceProviderInventory(resource.Resource): + resource_key = None + resources_key = None + base_path = '/resource_providers/%(resource_provider_id)s/inventories' + + _query_mapping = resource.QueryParameters( + include_pagination_defaults=False + ) + + # Capabilities + + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + + # Properties + + #: The UUID of a resource provider. + resource_provider_id = resource.URI('resource_provider_id') + #: The name of the resource class. + resource_class = resource.Body('resource_class', alternate_id=True) + #: A consistent view marker that assists with the management of concurrent + #: resource provider updates. + resource_provider_generation = resource.Body( + 'resource_provider_generation', + type=int, + ) + + #: It is used in determining whether consumption of the resource of the + #: provider can exceed physical constraints. + allocation_ratio = resource.Body('allocation_ratio', type=float) + #: A maximum amount any single allocation against an inventory can have. + max_unit = resource.Body('max_unit', type=int) + #: A minimum amount any single allocation against an inventory can have. + min_unit = resource.Body('min_unit', type=int) + #: The amount of the resource a provider has reserved for its own use. + reserved = resource.Body('reserved', type=int) + #: A representation of the divisible amount of the resource that may be + #: requested. For example, step_size = 5 means that only values divisible + #: by 5 (5, 10, 15, etc.) can be requested. + step_size = resource.Body('step_size', type=int) + #: The actual amount of the resource that the provider can accommodate. + total = resource.Body('total', type=int) + + def commit( + self, + session, + prepend_key=True, + has_body=True, + retry_on_conflict=None, + base_path=None, + *, + microversion=None, + **kwargs, + ): + # resource_provider_generation must always be provided on update, but + # it will appear to be identical (by design) so we strip it. Prevent + # tihs happening. + self._body._dirty.add('resource_provider_generation') + return super().commit( + session, + prepend_key=prepend_key, + has_body=has_body, + retry_on_conflict=retry_on_conflict, + base_path=base_path, + microversion=microversion, + **kwargs, + ) + + # TODO(stephenfin): It would be nicer if we could do this in Resource + # itself since the logic is also found elsewhere (e.g. + # openstack.identity.v2.extension.Extension) but that code is a bit of a + # rat's nest right now and needs a spring clean + @classmethod + def list( + cls, + session, + paginated=True, + base_path=None, + allow_unknown_params=False, + *, + microversion=None, + **params, + ): + """This method is a generator which yields resource objects. + + A re-implementation of :meth:`~openstack.resource.Resource.list` that + handles placement's single, unpaginated list implementation. + + Refer to :meth:`~openstack.resource.Resource.list` for full + documentation including parameter, exception and return type + documentation. + """ + session = cls._get_session(session) + + if microversion is None: + microversion = cls._get_microversion(session) + + if base_path is None: + base_path = cls.base_path + + # There is no server-side filtering, only client-side + client_filters = {} + # Gather query parameters which are not supported by the server + for k, v in params.items(): + if ( + # Known attr + hasattr(cls, k) + # Is real attr property + and isinstance(getattr(cls, k), fields.Body) + # not included in the query_params + and k not in cls._query_mapping._mapping.keys() + ): + client_filters[k] = v + + uri = base_path % params + uri_params = {} + + for k, v in params.items(): + # We need to gather URI parts to set them on the resource later + if hasattr(cls, k) and isinstance(getattr(cls, k), fields.URI): + uri_params[k] = v + + def _dict_filter(f, d): + """Dict param based filtering""" + if not d: + return False + for key in f.keys(): + if isinstance(f[key], dict): + if not _dict_filter(f[key], d.get(key, None)): + return False + elif d.get(key, None) != f[key]: + return False + return True + + response = session.get( + uri, + headers={"Accept": "application/json"}, + params={}, + microversion=microversion, + ) + exceptions.raise_from_response(response) + data = response.json() + + for resource_class, resource_data in data['inventories'].items(): + resource_inventory = { + 'resource_class': resource_class, + 'resource_provider_generation': data[ + 'resource_provider_generation' + ], + **resource_data, + **uri_params, + } + value = cls.existing( + microversion=microversion, + connection=session._get_connection(), + **resource_inventory, + ) + + filters_matched = True + # Iterate over client filters and return only if matching + for key in client_filters.keys(): + if isinstance(client_filters[key], dict): + if not _dict_filter( + client_filters[key], + value.get(key, None), + ): + filters_matched = False + break + elif value.get(key, None) != client_filters[key]: + filters_matched = False + break + + if filters_matched: + yield value + + return None diff --git a/openstack/placement/v1/trait.py b/openstack/placement/v1/trait.py new file mode 100644 index 0000000000..be941f2b79 --- /dev/null +++ b/openstack/placement/v1/trait.py @@ -0,0 +1,143 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import fields +from openstack import resource + + +class Trait(resource.Resource): + resource_key = None + resources_key = None + base_path = '/traits' + + # Capabilities + + allow_create = True + allow_fetch = True + allow_delete = True + allow_list = True + + create_method = 'PUT' + + # Added in 1.6 + _max_microversion = '1.6' + + _query_mapping = resource.QueryParameters( + 'name', + 'associated', + include_pagination_defaults=False, + ) + + name = resource.Body('name', alternate_id=True) + + @classmethod + def list( + cls, + session, + paginated=True, + base_path=None, + allow_unknown_params=False, + *, + microversion=None, + **params, + ): + """This method is a generator which yields resource objects. + + A re-implementation of :meth:`~openstack.resource.Resource.list` that + handles the list of strings (as opposed to a list of objects) that this + call returns. + + Refer to :meth:`~openstack.resource.Resource.list` for full + documentation including parameter, exception and return type + documentation. + """ + session = cls._get_session(session) + + if microversion is None: + microversion = cls._get_microversion(session) + + if base_path is None: + base_path = cls.base_path + + # There is no server-side filtering, only client-side + client_filters = {} + # Gather query parameters which are not supported by the server + for k, v in params.items(): + if ( + # Known attr + hasattr(cls, k) + # Is real attr property + and isinstance(getattr(cls, k), fields.Body) + # not included in the query_params + and k not in cls._query_mapping._mapping.keys() + ): + client_filters[k] = v + + uri = base_path % params + uri_params = {} + + for k, v in params.items(): + # We need to gather URI parts to set them on the resource later + if hasattr(cls, k) and isinstance(getattr(cls, k), fields.URI): + uri_params[k] = v + + def _dict_filter(f, d): + """Dict param based filtering""" + if not d: + return False + for key in f.keys(): + if isinstance(f[key], dict): + if not _dict_filter(f[key], d.get(key, None)): + return False + elif d.get(key, None) != f[key]: + return False + return True + + response = session.get( + uri, + headers={"Accept": "application/json"}, + params={}, + microversion=microversion, + ) + exceptions.raise_from_response(response) + data = response.json() + + for trait_name in data['traits']: + trait = { + 'name': trait_name, + **uri_params, + } + value = cls.existing( + microversion=microversion, + connection=session._get_connection(), + **trait, + ) + + filters_matched = True + # Iterate over client filters and return only if matching + for key in client_filters.keys(): + if isinstance(client_filters[key], dict): + if not _dict_filter( + client_filters[key], + value.get(key, None), + ): + filters_matched = False + break + elif value.get(key, None) != client_filters[key]: + filters_matched = False + break + + if filters_matched: + yield value + + return None diff --git a/openstack/profile.py b/openstack/profile.py deleted file mode 100644 index 1fed6564a8..0000000000 --- a/openstack/profile.py +++ /dev/null @@ -1,207 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -:class:`~openstack.profile.Profile` is the class that is used to -define the various preferences for different services. The preferences that -are currently supported are service name, region, version and interface. -The :class:`~openstack.profile.Profile` and the -:class:`~openstack.connection.Connection` classes are the most important -user facing classes. - -Examples --------- - -The :class:`~openstack.profile.Profile` class is constructed -with no arguments. - -Set Methods -~~~~~~~~~~~ - -A user's preferences are set based on the service type. Service type would -normally be something like 'compute', 'identity', 'object-store', etc.:: - - from openstack import profile - prof = profile.Profile() - prof.set_name('compute', 'matrix') - prof.set_region(prof.ALL, 'zion') - prof.set_version('identity', 'v3') - prof.set_interface('object-store', 'internal') - for service in prof.get_services(): - print(prof.get_filter(service.service_type) - -The resulting preference print out would look something like:: - - service_type=compute,region=zion,service_name=matrix - service_type=network,region=zion - service_type=database,region=zion - service_type=image,region=zion - service_type=metering,region=zion - service_type=orchestration,region=zion - service_type=object-store,interface=internal,region=zion - service_type=identity,region=zion,version=v3 -""" - -import copy -import logging -import six - -from openstack.bare_metal import bare_metal_service -from openstack.block_store import block_store_service -from openstack.cluster import cluster_service -from openstack.compute import compute_service -from openstack.database import database_service -from openstack import exceptions -from openstack.identity import identity_service -from openstack.image import image_service -from openstack.key_manager import key_manager_service -from openstack.message import message_service -from openstack import module_loader -from openstack.network import network_service -from openstack.object_store import object_store_service -from openstack.orchestration import orchestration_service -from openstack.telemetry.alarm import alarm_service -from openstack.telemetry import telemetry_service - -_logger = logging.getLogger(__name__) - - -class Profile(object): - - ALL = "*" - """Wildcard service identifier representing all services.""" - - def __init__(self, plugins=None): - """User preference for each service. - - :param plugins: List of entry point namespaces to load. - - Create a new :class:`~openstack.profile.Profile` - object with no preferences defined, but knowledge of the services. - Services are identified by their service type, e.g.: 'identity', - 'compute', etc. - """ - self._services = {} - - self._add_service(alarm_service.AlarmService(version="v2")) - self._add_service(bare_metal_service.BareMetalService(version="v1")) - self._add_service(block_store_service.BlockStoreService(version="v2")) - self._add_service(cluster_service.ClusterService(version="v1")) - self._add_service(compute_service.ComputeService(version="v2")) - self._add_service(database_service.DatabaseService(version="v1")) - self._add_service(identity_service.IdentityService(version="v3")) - self._add_service(image_service.ImageService(version="v2")) - self._add_service(key_manager_service.KeyManagerService(version="v1")) - self._add_service(message_service.MessageService(version="v1")) - self._add_service(network_service.NetworkService(version="v2")) - self._add_service( - object_store_service.ObjectStoreService(version="v1")) - self._add_service( - orchestration_service.OrchestrationService(version="v1")) - self._add_service(telemetry_service.TelemetryService(version="v2")) - - if plugins: - for plugin in plugins: - self._load_plugin(plugin) - self.service_keys = sorted(self._services.keys()) - - def __repr__(self): - return repr(self._services) - - def _add_service(self, serv): - serv.interface = None - self._services[serv.service_type] = serv - - def _load_plugin(self, namespace): - """Load a service plugin. - - :param str namespace: Entry point namespace - """ - services = module_loader.load_service_plugins(namespace) - for service_type in services: - if service_type in self._services: - _logger.debug("Overriding %s with %s", service_type, - services[service_type]) - self._add_service(services[service_type]) - - def get_filter(self, service): - """Get a service preference. - - :param str service: Desired service type. - """ - return copy.copy(self._get_filter(service)) - - def _get_filter(self, service): - """Get a service preference. - - :param str service: Desired service type. - """ - serv = self._services.get(service, None) - if serv is not None: - return serv - msg = ("Service %s not in list of valid services: %s" % - (service, self.service_keys)) - raise exceptions.SDKException(msg) - - def _get_services(self, service): - return self.service_keys if service == self.ALL else [service] - - def _setter(self, service, attr, value): - for service in self._get_services(service): - setattr(self._get_filter(service), attr, value) - - def get_services(self): - """Get a list of all the known services.""" - services = [] - for name, service in six.iteritems(self._services): - services.append(service) - return services - - def set_name(self, service, name): - """Set the desired name for the specified service. - - :param str service: Service type. - :param str name: Desired service name. - """ - self._setter(service, "service_name", name) - - def set_region(self, service, region): - """Set the desired region for the specified service. - - :param str service: Service type. - :param str region: Desired service region. - """ - self._setter(service, "region", region) - - def set_version(self, service, version): - """Set the desired version for the specified service. - - :param str service: Service type. - :param str version: Desired service version. - """ - self._get_filter(service).version = version - - def set_api_version(self, service, api_version): - """Set the desired API micro-version for the specified service. - - :param str service: Service type. - :param str api_version: Desired service API micro-version. - """ - self._setter(service, "api_version", api_version) - - def set_interface(self, service, interface): - """Set the desired interface for the specified service. - - :param str service: Service type. - :param str interface: Desired service interface. - """ - self._setter(service, "interface", interface) diff --git a/openstack/proxy.py b/openstack/proxy.py index a3f71c7a13..8f26e343bc 100644 --- a/openstack/proxy.py +++ b/openstack/proxy.py @@ -10,287 +10,1080 @@ # License for the specific language governing permissions and limitations # under the License. +# This is needed due to https://github.com/eventlet/eventlet/issues/1026 which +# nova (and possibly others) expose +from __future__ import annotations + +import collections.abc +import functools +import logging +import queue +import typing as ty +import urllib +from urllib.parse import urlparse +import warnings + +try: + import simplejson + + JSONDecodeError = simplejson.scanner.JSONDecodeError +except ImportError: + JSONDecodeError = ValueError # type: ignore +import iso8601 +import jmespath +from keystoneauth1 import adapter +from keystoneauth1 import session + +from openstack import _log from openstack import exceptions from openstack import resource +from openstack import utils +from openstack import warnings as os_warnings + +if ty.TYPE_CHECKING: + import influxdb as influxdb_client # type: ignore[import-not-found] + from keystoneauth1 import plugin + import prometheus_client + import requests + from statsd.client import base as statsd_client + + from openstack import connection + + +ProxyT = ty.TypeVar('ProxyT', bound='Proxy') + + +def normalize_metric_name(name: str) -> str: + name = name.replace('.', '_') + name = name.replace(':', '_') + return name + + +class CleanupDependency(ty.TypedDict): + before: list[str] + after: list[str] + + +class Proxy(adapter.Adapter): + """Represents a service.""" + + api_version: ty.ClassVar[str] + """The API version. + + This is used as a descriminating attribute for type checking. + """ + + retriable_status_codes: list[int] | None = None + """HTTP status codes that should be retried by default. + + The number of retries is defined by the configuration in parameters called + ``_status_code_retries``. + """ + + _resource_registry: dict[str, type[resource.Resource]] = {} + """Registry of the supported resourses. + + Dictionary of resource names (key) types (value). + """ + + _connection: connection.Connection + + def __init__( + self, + session: session.Session, + *, + service_type: str | None = None, + service_name: str | None = None, + interface: str | None = None, + region_name: str | None = None, + endpoint_override: str | None = None, + version: str | None = None, + auth: plugin.BaseAuthPlugin | None = None, + user_agent: str | None = None, + connect_retries: int | None = None, + logger: logging.Logger | None = None, + allow: dict[str, ty.Any] | None = None, + additional_headers: collections.abc.MutableMapping[str, str] + | None = None, + client_name: str | None = None, + client_version: str | None = None, + allow_version_hack: bool | None = None, + global_request_id: str | None = None, + min_version: str | None = None, + max_version: str | None = None, + default_microversion: str | None = None, + status_code_retries: int | None = None, + retriable_status_codes: list[int] | None = None, + raise_exc: bool | None = None, + rate_limit: float | None = None, + concurrency: int | None = None, + connect_retry_delay: float | None = None, + status_code_retry_delay: float | None = None, + # everything from here on is SDK-specific + statsd_client: statsd_client.StatsClient | None = None, + statsd_prefix: str | None = None, + prometheus_counter: prometheus_client.Counter | None = None, + prometheus_histogram: prometheus_client.Histogram | None = None, + influxdb_config: dict[str, ty.Any] | None = None, + influxdb_client: influxdb_client.InfluxDBClient | None = None, + ): + # NOTE(dtantsur): keystoneauth defaults retriable_status_codes to None, + # override it with a class-level value. + if retriable_status_codes is None: + retriable_status_codes = self.retriable_status_codes + + super().__init__( + session=session, + service_type=service_type, + service_name=service_name, + interface=interface, + region_name=region_name, + endpoint_override=endpoint_override, + version=version, + auth=auth, + user_agent=user_agent, + connect_retries=connect_retries, + logger=logger, + allow=allow, + additional_headers=additional_headers, + client_name=client_name, + client_version=client_version, + allow_version_hack=allow_version_hack, + global_request_id=global_request_id, + min_version=min_version, + max_version=max_version, + default_microversion=default_microversion, + status_code_retries=status_code_retries, + retriable_status_codes=retriable_status_codes, + raise_exc=raise_exc, + rate_limit=rate_limit, + concurrency=concurrency, + connect_retry_delay=connect_retry_delay, + status_code_retry_delay=status_code_retry_delay, + ) + + self._statsd_client = statsd_client + self._statsd_prefix = statsd_prefix + self._prometheus_counter = prometheus_counter + self._prometheus_histogram = prometheus_histogram + self._influxdb_client = influxdb_client + self._influxdb_config = influxdb_config + if self.service_type: + log_name = f'openstack.{self.service_type}' + else: + log_name = 'openstack' + self.log = _log.setup_logging(log_name) + + def _get_cache_key_prefix(self, url: str) -> str: + """Calculate cache prefix for the url""" + if not self.service_type: + # narrow type + raise RuntimeError('expected service_type to be set') + + name_parts = self._extract_name( + url, self.service_type, self.session.get_project_id() + ) + + return '.'.join([self.service_type, *name_parts]) + + def _invalidate_cache( + self, + conn: connection.Connection, + key_prefix: str, + ) -> None: + """Invalidate all cache entries starting with given prefix""" + for k in set(conn._api_cache_keys): + if k.startswith(key_prefix): + conn._cache.delete(k) + conn._api_cache_keys.remove(k) + def request( + self, + url: str, + method: str, + error_message: str | None = None, + raise_exc: bool = False, + connect_retries: int = 1, + global_request_id: str | None = None, + *args: ty.Any, + **kwargs: ty.Any, + ) -> requests.Response: + conn = self._get_connection() + if not conn: + # narrow type + raise RuntimeError('no connection found') + + if not global_request_id: + # Per-request setting should take precedence + global_request_id = conn._global_request_id + + key = None + key_prefix = self._get_cache_key_prefix(url) + # The caller might want to force cache bypass. + skip_cache = kwargs.pop('skip_cache', False) + if conn.cache_enabled: + # Construct cache key. It consists of: + # service.name_parts.URL.str(kwargs) + key = '.'.join([key_prefix, url, str(kwargs)]) + + # Track cache key for invalidating possibility + conn._api_cache_keys.add(key) + + try: + if conn.cache_enabled and not skip_cache and method == 'GET': + assert key is not None # type narrow + # Get the object expiration time from config + # default to 0 to disable caching for this resource type + expiration_time = int( + conn._cache_expirations.get(key_prefix, 0) + ) + # Get from cache or execute and cache + _response = conn._cache.get_or_create( + key=key, + creator=super().request, + creator_args=( + [url, method], + { + 'connect_retries': connect_retries, + 'raise_exc': raise_exc, + 'global_request_id': global_request_id, + **kwargs, + }, + ), + expiration_time=expiration_time, + ) + response = ty.cast('requests.Response', _response) + else: + # invalidate cache if we send modification request or user + # asked for cache bypass + self._invalidate_cache(conn, key_prefix) + # Pass through the API request bypassing cache + response = super().request( + url, + method, + connect_retries=connect_retries, + raise_exc=raise_exc, + global_request_id=global_request_id, + **kwargs, + ) + + for h in response.history: + self._report_stats(h) + self._report_stats(response) + return response + except Exception as e: + # If we want metrics to be generated we also need to generate some + # in case of exceptions as well, so that timeouts and connection + # problems (especially when called from ansible) are being + # generated as well. + self._report_stats(None, url, method, e) + raise + + @functools.lru_cache(maxsize=256) + def _extract_name( + self, + url: str, + service_type: str | None = None, + project_id: str | None = None, + ) -> list[str]: + """Produce a key name to use in logging/metrics from the URL path. + + We want to be able to logic/metric sane general things, so we pull + the url apart to generate names. The function returns a list because + there are two different ways in which the elements want to be combined + below (one for logging, one for statsd) + + Some examples are likely useful:: + + /servers -> ['servers'] + /servers/{id} -> ['server'] + /servers/{id}/os-security-groups -> ['server', 'os-security-groups'] + /v2.0/networks.json -> ['networks'] + """ + if service_type is not None: + warnings.warn( + "The 'service_type' parameter is unnecesary and will be " + "removed in a future release.", + os_warnings.RemovedInSDK60Warning, + ) + + url_path = urllib.parse.urlparse(url).path.strip() + # Remove / from the beginning to keep the list indexes of interesting + # things consistent + if url_path.startswith('/'): + url_path = url_path[1:] + + # Special case for neutron, which puts .json on the end of urls + if url_path.endswith('.json'): + url_path = url_path[: -len('.json')] + + # Split url into parts and exclude potential project_id in some urls + url_parts = [ + x + for x in url_path.split('/') + if ( + x != project_id + and ( + not project_id + or (project_id and x != 'AUTH_' + project_id) + ) + ) + ] + if url_parts[-1] == 'detail': + # Special case detail calls + # GET /servers/detail + # returns ['servers', 'detail'] + name_parts = url_parts[-2:] + else: + # Strip leading version piece so that + # GET /v2.0/networks + # returns ['networks'] + if ( + url_parts[0] + and url_parts[0][0] == 'v' + and url_parts[0][1] + and url_parts[0][1].isdigit() + ): + url_parts = url_parts[1:] + name_parts = self._extract_name_consume_url_parts(url_parts) + + # Keystone Token fetching is a special case, so we name it "tokens" + # NOTE(gtema): there is no metric triggered for regular authorization + # with openstack.connect(), since it bypassed SDK and goes directly to + # keystoneauth1. If you need to measure performance of the token + # fetching - trigger a separate call. + if url_path.endswith('tokens'): + name_parts = ['tokens'] + + if not name_parts: + name_parts = ['discovery'] + + # Strip out anything that's empty or None + return [part for part in name_parts if part] + + def _extract_name_consume_url_parts( + self, url_parts: list[str] + ) -> list[str]: + """Pull out every other URL portion. + + For example, ``GET /servers/{id}/os-security-groups`` returns + ``['server', 'os-security-groups']``. + """ + name_parts = [] + for idx in range(0, len(url_parts)): + if not idx % 2 and url_parts[idx]: + # If we are on first segment and it end with 's' stip this 's' + # to differentiate LIST and GET_BY_ID + if ( + len(url_parts) > idx + 1 + and url_parts[idx][-1] == 's' + and url_parts[idx][-2:] != 'is' + ): + name_parts.append(url_parts[idx][:-1]) + else: + name_parts.append(url_parts[idx]) + + return name_parts + + def _report_stats( + self, + response: requests.Response | None, + url: str | None = None, + method: str | None = None, + exc: BaseException | None = None, + ) -> None: + self._report_stats_statsd(response, url, method, exc) + self._report_stats_prometheus(response, url, method, exc) + self._report_stats_influxdb(response, url, method, exc) + + def _report_stats_statsd( + self, + response: requests.Response | None, + url: str | None = None, + method: str | None = None, + exc: BaseException | None = None, + ) -> None: + if not self._statsd_prefix: + return None + + if not self._statsd_client: + return None + + try: + if response is not None and not url: + url = response.request.url + if response is not None and not method: + method = response.request.method -# The _check_resource decorator is used on BaseProxy methods to ensure that -# the `actual` argument is in fact the type of the `expected` argument. -# It does so under two cases: -# 1. When strict=False, if and only if `actual` is a Resource instance, -# it is checked to see that it's an instance of the `expected` class. -# This allows `actual` to be other types, such as strings, when it makes -# sense to accept a raw id value. -# 2. When strict=True, `actual` must be an instance of the `expected` class. -def _check_resource(strict=False): - def wrap(method): - def check(self, expected, actual=None, *args, **kwargs): - if (strict and actual is not None and not - isinstance(actual, resource.Resource)): - raise ValueError("A %s must be passed" % expected.__name__) - elif (isinstance(actual, resource.Resource) and not - isinstance(actual, expected)): - raise ValueError("Expected %s but received %s" % ( - expected.__name__, actual.__class__.__name__)) - - return method(self, expected, actual, *args, **kwargs) - return check - return wrap - - -class BaseProxy(object): - - def __init__(self, session): - self.session = session - - def _get_resource(self, resource_type, value, path_args=None): + # narrow types + assert url is not None + assert method is not None + assert self.service_type is not None + + name_parts = [ + normalize_metric_name(f) + for f in self._extract_name( + url, self.service_type, self.session.get_project_id() + ) + ] + key = '.'.join( + [ + self._statsd_prefix, + normalize_metric_name(self.service_type), + method, + '_'.join(name_parts), + ] + ) + with self._statsd_client.pipeline() as pipe: + if response is not None: + duration = int(response.elapsed.total_seconds() * 1000) + metric_name = f'{key}.{response.status_code!s}' + pipe.timing(metric_name, duration) + pipe.incr(metric_name) + if duration > 1000: + pipe.incr(f'{key}.over_1000') + elif exc is not None: + pipe.incr(f'{key}.failed') + pipe.incr(f'{key}.attempted') + except Exception: + # We do not want errors in metric reporting ever break client + self.log.exception("Exception reporting metrics") + + def _report_stats_prometheus( + self, + response: requests.Response | None, + url: str | None = None, + method: str | None = None, + exc: BaseException | None = None, + ) -> None: + if not self._prometheus_counter: + return None + + if not self._prometheus_histogram: + return None + + if response is not None and not url: + url = response.request.url + if response is not None and not method: + method = response.request.method + parsed_url = urlparse(url) + endpoint = ( + f"{parsed_url.scheme}://{parsed_url.netloc}{parsed_url.path}" # type: ignore[str-bytes-safe] + ) + if response is not None: + labels = { + 'method': method, + 'endpoint': endpoint, + 'service_type': self.service_type, + 'status_code': response.status_code, + } + self._prometheus_counter.labels(**labels).inc() + self._prometheus_histogram.labels(**labels).observe( + response.elapsed.total_seconds() * 1000 + ) + + def _report_stats_influxdb( + self, + response: requests.Response | None, + url: str | None = None, + method: str | None = None, + exc: BaseException | None = None, + ) -> None: + if not self._influxdb_client: + return None + + if not self._influxdb_config: + return None + + # NOTE(gtema): status_code is saved both as tag and field to give + # ability showing it as a value and not only as a legend. + # However Influx is not ok with having same name in tags and fields, + # therefore use different names. + if response is not None and not url: + url = response.request.url + if response is not None and not method: + method = response.request.method + tags = { + 'method': method, + 'name': '_'.join( + [ + normalize_metric_name(f) + for f in self._extract_name( + url, self.service_type, self.session.get_project_id() + ) + ] + ), + } + fields = {'attempted': 1} + if response is not None: + fields['duration'] = int(response.elapsed.total_seconds() * 1000) + tags['status_code'] = str(response.status_code) + # Note(gtema): emit also status_code as a value (counter) + fields[str(response.status_code)] = 1 + fields[f'{method}.{response.status_code}'] = 1 + # Note(gtema): status_code field itself is also very helpful on the + # graphs to show what was the code, instead of counting its + # occurences + fields['status_code_val'] = response.status_code + elif exc: + fields['failed'] = 1 + if 'additional_metric_tags' in self._influxdb_config: + tags.update(self._influxdb_config['additional_metric_tags']) + measurement = ( + self._influxdb_config.get('measurement', 'openstack_api') + if self._influxdb_config + else 'openstack_api' + ) + # Note(gtema) append service name into the measurement name + measurement = f'{measurement}.{self.service_type}' + data = [{'measurement': measurement, 'tags': tags, 'fields': fields}] + try: + self._influxdb_client.write_points(data) + except Exception: + self.log.exception('Error writing statistics to InfluxDB') + + def _get_connection(self) -> connection.Connection | None: + """Get the Connection object associated with this Proxy. + + When the Session is created, a reference to the Connection is attached + to the ``_sdk_connection`` attribute. We also add a reference to it + directly on ourselves. Use one of them. + """ + return getattr( + self, '_connection', getattr(self.session, '_sdk_connection', None) + ) + + def _get_resource( + self, + resource_type: type[resource.ResourceT], + value: None | str | resource.ResourceT | utils.Munch, + **attrs: ty.Any, + ) -> resource.ResourceT: """Get a resource object to work on :param resource_type: The type of resource to operate on. This should - be a subclass of - :class:`~openstack.resource.Resource` with a - ``from_id`` method. + be a subclass of :class:`~openstack.resource.Resource` with a + ``from_id`` method. :param value: The ID of a resource or an object of ``resource_type`` - class if using an existing instance, or None to create a - new instance. - :param path_args: A dict containing arguments for forming the request - URL, if needed. + class if using an existing instance, or ``utils.Munch``, + or None to create a new instance. + :param attrs: A dict containing arguments for forming the request + URL, if needed. """ + conn = self._get_connection() if value is None: # Create a bare resource - res = resource_type() + res = resource_type.new(connection=conn, **attrs) + elif isinstance(value, dict) and not isinstance( + value, resource.Resource + ): + res = resource_type._from_munch(value, connection=conn) + res._update(**attrs) elif not isinstance(value, resource_type): # Create from an ID - args = {resource_type.id_attribute: - resource.Resource.get_id(value)} - res = resource_type.existing(**args) + res = resource_type.new(id=value, connection=conn, **attrs) else: # An existing resource instance + if not isinstance(value, resource_type): + raise ValueError( + f'Expected {resource_type.__name__} but received ' + f'{value.__class__.__name__}' + ) res = value - - # Set any intermediate path arguments, but don't overwrite Nones. - if path_args is not None: - res.update_attrs(ignore_none=True, **path_args) + res._update(**attrs) return res - def _find(self, resource_type, name_or_id, path_args=None, - ignore_missing=True): + def _get_uri_attribute( + self, + child: resource.Resource | str, + parent: resource.Resource | str | None, + name: str, + ) -> str: + """Get a value to be associated with a URI attribute + + `child` will not be None here as it's a required argument + on the proxy method. `parent` is allowed to be None if `child` + is an actual resource, but when an ID is given for the child + one must also be provided for the parent. An example of this + is that a parent is a Server and a child is a ServerInterface. + """ + if parent is None: + value = getattr(child, name) + assert isinstance(value, str) # narrow type + return value + + return resource.Resource._get_id(parent) + + @ty.overload + def _find( + self, + resource_type: type[resource.ResourceT], + name_or_id: str, + ignore_missing: ty.Literal[True] = True, + **attrs: ty.Any, + ) -> resource.ResourceT | None: ... + + @ty.overload + def _find( + self, + resource_type: type[resource.ResourceT], + name_or_id: str, + ignore_missing: ty.Literal[False], + **attrs: ty.Any, + ) -> resource.ResourceT: ... + + # excuse the duplication here: it's mypy's fault + # https://github.com/python/mypy/issues/14764 + @ty.overload + def _find( + self, + resource_type: type[resource.ResourceT], + name_or_id: str, + ignore_missing: bool, + **attrs: ty.Any, + ) -> resource.ResourceT | None: ... + + def _find( + self, + resource_type: type[resource.ResourceT], + name_or_id: str, + ignore_missing: bool = True, + **attrs: ty.Any, + ) -> resource.ResourceT | None: """Find a resource + :param resource_type: The type of resource to find. This should be a + :class:`~openstack.resource.Resource` subclass. :param name_or_id: The name or ID of a resource to find. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict attrs: Attributes to be passed onto the + :meth:`~openstack.resource.Resource.find` + method, such as query parameters. :returns: An instance of ``resource_type`` or None """ - return resource_type.find(self.session, name_or_id, - path_args=path_args, - ignore_missing=ignore_missing) + return resource_type.find( + self, name_or_id, ignore_missing=ignore_missing, **attrs + ) - @_check_resource(strict=False) - def _delete(self, resource_type, value, path_args=None, - ignore_missing=True): + def _delete( + self, + resource_type: type[resource.ResourceT], + value: str | resource.ResourceT | None, + ignore_missing: bool = True, + **attrs: ty.Any, + ) -> resource.ResourceT | None: """Delete a resource - :param resource_type: The type of resource to delete. This should - be a :class:`~openstack.resource.Resource` - subclass with a ``from_id`` method. - :param value: The value to delete. Can be either the ID of a - resource or a :class:`~openstack.resource.Resource` - subclass. - :param path_args: A dict containing arguments for forming the request - URL, if needed. + :param resource_type: The type of resource to delete. This should be a + :class:`~openstack.resource.Resource` subclass. + :param value: The resource to delete. This can be the ID of a resource, + a :class:`~openstack.resource.Resource` subclass instance, or None + for resources that don't have their own identifier or have + identifiers with multiple parts. If None, you must pass these other + identifiers as kwargs. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent resource. - + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent resource. + :param dict attrs: Attributes to be used to form the request URL such + as the ID of a parent resource. :returns: The result of the ``delete`` :raises: ``ValueError`` if ``value`` is a - :class:`~openstack.resource.Resource` that doesn't match - the ``resource_type``. - :class:`~openstack.exceptions.ResourceNotFound` when - ignore_missing if ``False`` and a nonexistent resource - is attempted to be deleted. - + :class:`~openstack.resource.Resource` that doesn't match + the ``resource_type``. + :class:`~openstack.exceptions.NotFoundException` when + ignore_missing if ``False`` and a nonexistent resource + is attempted to be deleted. """ - res = self._get_resource(resource_type, value, path_args) + res = self._get_resource(resource_type, value, **attrs) try: - rv = res.delete(self.session) - except exceptions.NotFoundException as e: + rv = res.delete(self) + except exceptions.NotFoundException: if ignore_missing: return None - else: - # Reraise with a more specific type and message - raise exceptions.ResourceNotFound( - message="No %s found for %s" % - (resource_type.__name__, value), - details=e.details, response=e.response, - request_id=e.request_id, url=e.url, method=e.method, - http_status=e.http_status, cause=e.cause) + raise return rv - @_check_resource(strict=False) - def _update(self, resource_type, value, path_args=None, **attrs): + def _update( + self, + resource_type: type[resource.ResourceT], + value: str | resource.ResourceT | None, + base_path: str | None = None, + **attrs: ty.Any, + ) -> resource.ResourceT: """Update a resource - :param resource_type: The type of resource to update. - :type resource_type: :class:`~openstack.resource.Resource` - :param value: The resource to update. This must either be a - :class:`~openstack.resource.Resource` or an id - that corresponds to a resource. - :param path_args: A dict containing arguments for forming the request - URL, if needed. - :param **attrs: Attributes to update on a Resource object. - These attributes will be used in conjunction with - ``resource_type``. + :param resource_type: The type of resource to update. This should be a + :class:`~openstack.resource.Resource` subclass. + :param value: The resource to update. This can be the ID of a resource, + a :class:`~openstack.resource.Resource` subclass instance, or None + for resources that don't have their own identifier or have + identifiers with multiple parts. If None, you must pass these other + identifiers as kwargs. + :param str base_path: Base part of the URI for updating resources, if + different from + :data:`~openstack.resource.Resource.base_path`. + :param dict attrs: Attributes to be passed onto the + :meth:`~openstack.resource.Resource.update` + method to be updated. These should correspond + to either :class:`~openstack.resource.Body` + or :class:`~openstack.resource.Header` + values on this resource. :returns: The result of the ``update`` :rtype: :class:`~openstack.resource.Resource` """ - res = self._get_resource(resource_type, value, path_args) - res.update_attrs(attrs) - return res.update(self.session) + res = self._get_resource(resource_type, value, **attrs) + return res.commit(self, base_path=base_path) - def _create(self, resource_type, path_args=None, **attrs): + def _create( + self, + resource_type: type[resource.ResourceT], + base_path: str | None = None, + **attrs: ty.Any, + ) -> resource.ResourceT: """Create a resource from attributes - :param resource_type: The type of resource to create. - :type resource_type: :class:`~openstack.resource.Resource` - :param path_args: A dict containing arguments for forming the request - URL, if needed. - :param **attrs: Attributes from which to create a Resource object. - These attributes will be used in conjunction with - ``resource_type``. + :param resource_type: The type of resource to create. This should be a + :class:`~openstack.resource.Resource` subclass. + :param base_path: Base part of the URI for creating resources, if + different from :data:`~openstack.resource.Resource.base_path`. + :param dict attrs: Attributes to be passed onto the + :meth:`~openstack.resource.Resource.create` + method to be created. These should correspond + to either :class:`~openstack.resource.Body` + or :class:`~openstack.resource.Header` + values on this resource. :returns: The result of the ``create`` :rtype: :class:`~openstack.resource.Resource` """ - res = resource_type.new(**attrs) - if path_args is not None: - res.update_attrs(path_args) - return res.create(self.session) + # Check for attributes whose names conflict with the parameters + # specified in the method. + conflicting_attrs = attrs.get('__conflicting_attrs', {}) + if conflicting_attrs: + for k, v in conflicting_attrs.items(): + attrs[k] = v + attrs.pop('__conflicting_attrs') + conn = self._get_connection() + res = resource_type.new(connection=conn, **attrs) + return res.create(self, base_path=base_path) - @_check_resource(strict=False) - def _get(self, resource_type, value=None, path_args=None, args=None): - """Get a resource + def _bulk_create( + self, + resource_type: type[resource.ResourceT], + data: list[dict[str, ty.Any]], + base_path: str | None = None, + ) -> ty.Generator[resource.ResourceT, None, None]: + """Create a resource from attributes - :param resource_type: The type of resource to get. - :type resource_type: :class:`~openstack.resource.Resource` - :param value: The value to get. Can be either the ID of a - resource or a :class:`~openstack.resource.Resource` - subclass. - :param path_args: A dict containing arguments for forming the request - URL, if needed. - :param args: A optional dict containing arguments that will be - translated into query strings when forming the request URL. - - :returns: The result of the ``get`` + :param resource_type: The type of resource to create. This should be a + :class:`~openstack.resource.Resource` subclass. + :param data: List of attributes dicts to be passed onto the + :meth:`~openstack.resource.Resource.create` + method to be created. These should correspond + to either :class:`~openstack.resource.Body` + or :class:`~openstack.resource.Header` + values on this resource. + :param str base_path: Base part of the URI for creating resources, if + different from + :data:`~openstack.resource.Resource.base_path`. + + :returns: A generator of Resource objects. :rtype: :class:`~openstack.resource.Resource` """ - res = self._get_resource(resource_type, value, path_args) + return resource_type.bulk_create(self, data, base_path=base_path) - try: - return res.get(self.session, args=args) - except exceptions.NotFoundException as e: - raise exceptions.ResourceNotFound( - message="No %s found for %s" % - (resource_type.__name__, value), - details=e.details, response=e.response, - request_id=e.request_id, url=e.url, method=e.method, - http_status=e.http_status, cause=e.cause) - - def _list(self, resource_type, value=None, paginated=False, - path_args=None, **query): + def _get( + self, + resource_type: type[resource.ResourceT], + value: str | resource.ResourceT | None = None, + requires_id: bool = True, + base_path: str | None = None, + skip_cache: bool = False, + **attrs: ty.Any, + ) -> resource.ResourceT: + """Fetch a resource + + :param resource_type: The type of resource to get. This should be a + :class:`~openstack.resource.Resource` subclass. + :param value: The resource to get. This can be the ID of a resource, + a :class:`~openstack.resource.Resource` subclass instance, or None + for resources that don't have their own identifier or have + identifiers with multiple parts. If None, you must pass these other + identifiers as kwargs. + :param requires_id: Whether the resource is identified by an ID or not. + :param base_path: Base part of the URI for fetching resources, if + different from + :data:`~openstack.resource.Resource.base_path`. + :param skip_cache: A boolean indicating whether optional API + cache should be skipped for this invocation. + :param attrs: Attributes to be passed onto the + :meth:`~openstack.resource.Resource.get` + method. These should correspond + to either :class:`~openstack.resource.Body` + or :class:`~openstack.resource.Header` + values on this resource. + + :returns: The result of the ``fetch`` + :rtype: :class:`~openstack.resource.Resource` + """ + res = self._get_resource(resource_type, value, **attrs) + + return res.fetch( + self, + requires_id=requires_id, + base_path=base_path, + skip_cache=skip_cache, + error_message=f"No {resource_type.__name__} found for {value}", + ) + + def _list( + self, + resource_type: type[resource.ResourceT], + paginated: bool = True, + base_path: str | None = None, + jmespath_filters: str | None = None, + **attrs: ty.Any, + ) -> ty.Generator[resource.ResourceT, None, None]: """List a resource - :param resource_type: The type of resource to delete. This should - be a :class:`~openstack.resource.Resource` - subclass with a ``from_id`` method. - :param value: The resource to list. It can be the ID of a resource, or - a :class:`~openstack.resource.Resource` object. When set - to None, a new bare resource is created. + :param resource_type: The type of resource to list. This should + be a :class:`~openstack.resource.Resource` + subclass with a ``from_id`` method. :param bool paginated: When set to ``False``, expect all of the data - to be returned in one response. When set to - ``True``, the resource supports data being - returned across multiple pages. - :param path_args: A dictionary containing arguments for use when - forming the request URL for resource retrieval. - :param kwargs **query: Keyword arguments that are sent to the list - method, which are then attached as query - parameters on the request URL. + to be returned in one response. When set to + ``True``, the resource supports data being + returned across multiple pages. + :param str base_path: Base part of the URI for listing resources, if + different from + :data:`~openstack.resource.Resource.base_path`. + :param str jmespath_filters: A string containing a jmespath expression + for further filtering. + + :param dict attrs: Attributes to be passed onto the + :meth:`~openstack.resource.Resource.list` method. These should + correspond to either :class:`~openstack.resource.URI` values + or appear in :data:`~openstack.resource.Resource._query_mapping`. :returns: A generator of Resource objects. :raises: ``ValueError`` if ``value`` is a - :class:`~openstack.resource.Resource` that doesn't match - the ``resource_type``. + :class:`~openstack.resource.Resource` that doesn't match + the ``resource_type``. """ - res = self._get_resource(resource_type, value, path_args) + # Check for attributes whose names conflict with the parameters + # specified in the method. + conflicting_attrs = attrs.get('__conflicting_attrs', {}) + if conflicting_attrs: + for k, v in conflicting_attrs.items(): + attrs[k] = v + attrs.pop('__conflicting_attrs') + + data = resource_type.list( + self, paginated=paginated, base_path=base_path, **attrs + ) + + if jmespath_filters and isinstance(jmespath_filters, str): + warnings.warn( + 'Support for jmespath-style filters is deprecated and will be ' + 'removed in a future release.', + os_warnings.RemovedInSDK60Warning, + ) + return jmespath.search(jmespath_filters, data) # type: ignore[no-any-return] - query = res.convert_ids(query) - return res.list(self.session, path_args=path_args, paginated=paginated, - params=query) + return data - def _head(self, resource_type, value=None, path_args=None): + def _head( + self, + resource_type: type[resource.ResourceT], + value: str | resource.ResourceT | None = None, + base_path: str | None = None, + **attrs: ty.Any, + ) -> resource.ResourceT: """Retrieve a resource's header :param resource_type: The type of resource to retrieve. :type resource_type: :class:`~openstack.resource.Resource` - :param value: The value of a specific resource to retreive headers - for. Can be either the ID of a resource, - a :class:`~openstack.resource.Resource` subclass, - or ``None``. - :param path_args: A dict containing arguments for forming the request - URL, if needed. + :param value: The value of a specific resource to retrieve headers + for. Can be either the ID of a resource, + a :class:`~openstack.resource.Resource` subclass, + or ``None``. + :param str base_path: Base part of the URI for heading resources, if + different from + :data:`~openstack.resource.Resource.base_path`. + :param dict attrs: Attributes to be passed onto the + :meth:`~openstack.resource.Resource.head` method. + These should correspond to + :class:`~openstack.resource.URI` values. :returns: The result of the ``head`` call :rtype: :class:`~openstack.resource.Resource` """ - res = self._get_resource(resource_type, value, path_args) - - return res.head(self.session) - - def wait_for_status(self, value, status, failures=[], interval=2, - wait=120): - """Wait for a resource to be in a particular status. - - :param value: The resource to wait on to reach the status. The - resource must have a status attribute. - :type value: :class:`~openstack.resource.Resource` - :param status: Desired status of the resource. - :param list failures: Statuses that would indicate the transition - failed such as 'ERROR'. - :param interval: Number of seconds to wait between checks. - :param wait: Maximum number of seconds to wait for the change. - - :return: Method returns resource on success. - :raises: :class:`~openstack.exceptions.ResourceTimeout` transition - to status failed to occur in wait seconds. - :raises: :class:`~openstack.exceptions.ResourceFailure` resource - transitioned to one of the failure states. - :raises: :class:`~AttributeError` if the resource does not have a - status attribute - """ - return resource.wait_for_status(self.session, value, status, - failures, interval, wait) + res = self._get_resource(resource_type, value, **attrs) + return res.head(self, base_path=base_path) - def wait_for_delete(self, value, interval=2, wait=120): - """Wait for the resource to be deleted. + def _get_cleanup_dependencies( + self, + ) -> dict[str, CleanupDependency] | None: + return None - :param value: The resource to wait on to be deleted. - :type value: :class:`~openstack.resource.Resource` - :param interval: Number of seconds to wait between checks. - :param wait: Maximum number of seconds to wait for the delete. + # TODO(stephenfin): Add type for filters. We expect the created_at or + # updated_at keys + def _service_cleanup( + self, + dry_run: bool = True, + client_status_queue: queue.Queue[resource.Resource] | None = None, + identified_resources: dict[str, resource.Resource] | None = None, + filters: dict[str, ty.Any] | None = None, + resource_evaluation_fn: ty.Callable[ + [ + resource.Resource, + dict[str, ty.Any] | None, + dict[str, resource.Resource] | None, + ], + bool, + ] + | None = None, + skip_resources: list[str] | None = None, + ) -> None: + return None - :return: Method returns resource on success. - :raises: :class:`~openstack.exceptions.ResourceTimeout` transition - to delete failed to occur in wait seconds. - """ - return resource.wait_for_delete(self.session, value, interval, wait) + def _service_cleanup_del_res( + self, + del_fn: ty.Callable[[resource.Resource], None], + obj: resource.Resource, + dry_run: bool = True, + client_status_queue: queue.Queue[resource.Resource] | None = None, + identified_resources: dict[str, resource.Resource] | None = None, + filters: dict[str, ty.Any] | None = None, + resource_evaluation_fn: ty.Callable[ + [ + resource.Resource, + dict[str, ty.Any] | None, + dict[str, resource.Resource] | None, + ], + bool, + ] + | None = None, + ) -> bool: + need_delete = False + try: + if resource_evaluation_fn and callable(resource_evaluation_fn): + # Ask a user-provided evaluation function if we need to delete + # the resource + need_del = resource_evaluation_fn( + obj, filters, identified_resources + ) + if isinstance(need_del, bool): + # Just double check function returned bool + need_delete = need_del + else: + need_delete = ( + self._service_cleanup_resource_filters_evaluation( + obj, filters=filters + ) + ) + + if need_delete: + if client_status_queue: + # Put into queue for client status info + client_status_queue.put(obj) + if identified_resources is not None: + # Put into internal dict shared between threads so that + # other services might know which other resources were + # identified + identified_resources[obj.id] = obj + if not dry_run: + del_fn(obj) + except Exception as e: + self.log.exception('Cannot delete resource %s: %s', obj, str(e)) + return need_delete + + def _service_cleanup_resource_filters_evaluation( + self, + obj: resource.Resource, + filters: dict[str, ty.Any] | None = None, + ) -> bool: + part_cond = [] + if filters is not None and isinstance(filters, dict): + for k, v in filters.items(): + try: + res_val = None + if k == 'created_at' and hasattr(obj, 'created_at'): + res_val = getattr(obj, 'created_at') + if k == 'updated_at' and hasattr(obj, 'updated_at'): + res_val = getattr(obj, 'updated_at') + if res_val: + res_date = iso8601.parse_date(res_val) + cmp_date = iso8601.parse_date(v) + if res_date and cmp_date and res_date <= cmp_date: + part_cond.append(True) + else: + part_cond.append(False) + else: + # There are filters set, but we can't get required + # attribute, so skip the resource + self.log.debug( + f'Requested cleanup attribute {k} is not ' + 'available on the resource' + ) + part_cond.append(False) + except Exception: + self.log.exception('Error during condition evaluation') + if all(part_cond): + return True + else: + return False + + def should_skip_resource_cleanup( + self, resource: str, skip_resources: list[str] | None = None + ) -> bool: + if skip_resources is None: + return False + + if self.service_type is None: + # to keep mypy happy - this should never happen + return False + + resource_name = f"{self.service_type.replace('-', '_')}.{resource}" + + if resource_name in skip_resources: + self.log.debug( + f"Skipping resource {resource_name} in project cleanup" + ) + return True + + return False + + +# TODO(stephenfin): Remove this and all users. Use of this generally indicates +# a missing Resource type. +def _json_response( + response: requests.Response, + error_message: str | None = None, +) -> requests.Response | ty.Any: + """Temporary method to use to bridge from ShadeAdapter to SDK calls.""" + exceptions.raise_from_response(response, error_message=error_message) + + if not response.content: + # This doesn't have any content + return response + + # Some REST calls do not return json content. Don't decode it. + content_type = response.headers.get('Content-Type') + if not content_type or 'application/json' not in content_type: + return response + + try: + return response.json() + except JSONDecodeError: + return response diff --git a/openstack/proxy2.py b/openstack/proxy2.py deleted file mode 100644 index 6d528d8d08..0000000000 --- a/openstack/proxy2.py +++ /dev/null @@ -1,308 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import exceptions -from openstack import resource2 - - -# The _check_resource decorator is used on BaseProxy methods to ensure that -# the `actual` argument is in fact the type of the `expected` argument. -# It does so under two cases: -# 1. When strict=False, if and only if `actual` is a Resource instance, -# it is checked to see that it's an instance of the `expected` class. -# This allows `actual` to be other types, such as strings, when it makes -# sense to accept a raw id value. -# 2. When strict=True, `actual` must be an instance of the `expected` class. -def _check_resource(strict=False): - def wrap(method): - def check(self, expected, actual=None, *args, **kwargs): - if (strict and actual is not None and not - isinstance(actual, resource2.Resource)): - raise ValueError("A %s must be passed" % expected.__name__) - elif (isinstance(actual, resource2.Resource) and not - isinstance(actual, expected)): - raise ValueError("Expected %s but received %s" % ( - expected.__name__, actual.__class__.__name__)) - - return method(self, expected, actual, *args, **kwargs) - return check - return wrap - - -class BaseProxy(object): - - def __init__(self, session): - self.session = session - - def _get_resource(self, resource_type, value, **attrs): - """Get a resource object to work on - - :param resource_type: The type of resource to operate on. This should - be a subclass of - :class:`~openstack.resource2.Resource` with a - ``from_id`` method. - :param value: The ID of a resource or an object of ``resource_type`` - class if using an existing instance, or None to create a - new instance. - :param path_args: A dict containing arguments for forming the request - URL, if needed. - """ - if value is None: - # Create a bare resource - res = resource_type.new(**attrs) - elif not isinstance(value, resource_type): - # Create from an ID - res = resource_type.new(id=value, **attrs) - else: - # An existing resource instance - res = value - res._update(**attrs) - - return res - - def _get_uri_attribute(self, child, parent, name): - """Get a value to be associated with a URI attribute - - `child` will not be None here as it's a required argument - on the proxy method. `parent` is allowed to be None if `child` - is an actual resource, but when an ID is given for the child - one must also be provided for the parent. An example of this - is that a parent is a Server and a child is a ServerInterface. - """ - if parent is None: - value = getattr(child, name) - else: - value = resource2.Resource._get_id(parent) - return value - - def _find(self, resource_type, name_or_id, ignore_missing=True, - **attrs): - """Find a resource - - :param name_or_id: The name or ID of a resource to find. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource2. - :param dict attrs: Attributes to be passed onto the - :meth:`~openstack.resource2.Resource.find` - method, such as query parameters. - - :returns: An instance of ``resource_type`` or None - """ - return resource_type.find(self.session, name_or_id, - ignore_missing=ignore_missing, - **attrs) - - @_check_resource(strict=False) - def _delete(self, resource_type, value, ignore_missing=True, **attrs): - """Delete a resource - - :param resource_type: The type of resource to delete. This should - be a :class:`~openstack.resource2.Resource` - subclass with a ``from_id`` method. - :param value: The value to delete. Can be either the ID of a - resource or a :class:`~openstack.resource2.Resource` - subclass. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent resource2. - :param dict attrs: Attributes to be passed onto the - :meth:`~openstack.resource2.Resource.delete` - method, such as the ID of a parent resource. - - :returns: The result of the ``delete`` - :raises: ``ValueError`` if ``value`` is a - :class:`~openstack.resource2.Resource` that doesn't match - the ``resource_type``. - :class:`~openstack.exceptions.ResourceNotFound` when - ignore_missing if ``False`` and a nonexistent resource - is attempted to be deleted. - - """ - res = self._get_resource(resource_type, value, **attrs) - - try: - rv = res.delete(self.session) - except exceptions.NotFoundException as e: - if ignore_missing: - return None - else: - # Reraise with a more specific type and message - raise exceptions.ResourceNotFound( - message="No %s found for %s" % - (resource_type.__name__, value), - details=e.details, response=e.response, - request_id=e.request_id, url=e.url, method=e.method, - http_status=e.http_status, cause=e.cause) - - return rv - - @_check_resource(strict=False) - def _update(self, resource_type, value, **attrs): - """Update a resource - - :param resource_type: The type of resource to update. - :type resource_type: :class:`~openstack.resource2.Resource` - :param value: The resource to update. This must either be a - :class:`~openstack.resource2.Resource` or an id - that corresponds to a resource2. - :param dict attrs: Attributes to be passed onto the - :meth:`~openstack.resource2.Resource.update` - method to be updated. These should correspond - to either :class:`~openstack.resource2.Body` - or :class:`~openstack.resource2.Header` - values on this resource. - - :returns: The result of the ``update`` - :rtype: :class:`~openstack.resource2.Resource` - """ - res = self._get_resource(resource_type, value, **attrs) - return res.update(self.session) - - def _create(self, resource_type, **attrs): - """Create a resource from attributes - - :param resource_type: The type of resource to create. - :type resource_type: :class:`~openstack.resource2.Resource` - :param path_args: A dict containing arguments for forming the request - URL, if needed. - :param dict attrs: Attributes to be passed onto the - :meth:`~openstack.resource2.Resource.create` - method to be created. These should correspond - to either :class:`~openstack.resource2.Body` - or :class:`~openstack.resource2.Header` - values on this resource. - - :returns: The result of the ``create`` - :rtype: :class:`~openstack.resource2.Resource` - """ - res = resource_type.new(**attrs) - return res.create(self.session) - - @_check_resource(strict=False) - def _get(self, resource_type, value=None, requires_id=True, **attrs): - """Get a resource - - :param resource_type: The type of resource to get. - :type resource_type: :class:`~openstack.resource2.Resource` - :param value: The value to get. Can be either the ID of a - resource or a :class:`~openstack.resource2.Resource` - subclass. - :param dict attrs: Attributes to be passed onto the - :meth:`~openstack.resource2.Resource.get` - method. These should correspond - to either :class:`~openstack.resource2.Body` - or :class:`~openstack.resource2.Header` - values on this resource. - - :returns: The result of the ``get`` - :rtype: :class:`~openstack.resource2.Resource` - """ - res = self._get_resource(resource_type, value, **attrs) - - try: - return res.get(self.session, requires_id=requires_id) - except exceptions.NotFoundException as e: - raise exceptions.ResourceNotFound( - message="No %s found for %s" % - (resource_type.__name__, value), - details=e.details, response=e.response, - request_id=e.request_id, url=e.url, method=e.method, - http_status=e.http_status, cause=e.cause) - - def _list(self, resource_type, value=None, paginated=False, **attrs): - """List a resource - - :param resource_type: The type of resource to delete. This should - be a :class:`~openstack.resource2.Resource` - subclass with a ``from_id`` method. - :param value: The resource to list. It can be the ID of a resource, or - a :class:`~openstack.resource2.Resource` object. When set - to None, a new bare resource is created. - :param bool paginated: When set to ``False``, expect all of the data - to be returned in one response. When set to - ``True``, the resource supports data being - returned across multiple pages. - :param dict attrs: Attributes to be passed onto the - :meth:`~openstack.resource2.Resource.list` method. These should - correspond to either :class:`~openstack.resource2.URI` values - or appear in :data:`~openstack.resource2.Resource._query_mapping`. - - :returns: A generator of Resource objects. - :raises: ``ValueError`` if ``value`` is a - :class:`~openstack.resource2.Resource` that doesn't match - the ``resource_type``. - """ - res = self._get_resource(resource_type, value, **attrs) - return res.list(self.session, paginated=paginated, **attrs) - - def _head(self, resource_type, value=None, **attrs): - """Retrieve a resource's header - - :param resource_type: The type of resource to retrieve. - :type resource_type: :class:`~openstack.resource2.Resource` - :param value: The value of a specific resource to retreive headers - for. Can be either the ID of a resource, - a :class:`~openstack.resource2.Resource` subclass, - or ``None``. - :param dict attrs: Attributes to be passed onto the - :meth:`~openstack.resource2.Resource.head` method. - These should correspond to - :class:`~openstack.resource2.URI` values. - - :returns: The result of the ``head`` call - :rtype: :class:`~openstack.resource2.Resource` - """ - res = self._get_resource(resource_type, value, **attrs) - return res.head(self.session) - - def wait_for_status(self, value, status, failures=[], interval=2, - wait=120): - """Wait for a resource to be in a particular status. - - :param value: The resource to wait on to reach the status. The - resource must have a status attribute. - :type value: :class:`~openstack.resource2.Resource` - :param status: Desired status of the resource2. - :param list failures: Statuses that would indicate the transition - failed such as 'ERROR'. - :param interval: Number of seconds to wait between checks. - :param wait: Maximum number of seconds to wait for the change. - - :return: Method returns resource on success. - :raises: :class:`~openstack.exceptions.ResourceTimeout` transition - to status failed to occur in wait seconds. - :raises: :class:`~openstack.exceptions.ResourceFailure` resource - transitioned to one of the failure states. - :raises: :class:`~AttributeError` if the resource does not have a - status attribute - """ - return resource2.wait_for_status(self.session, value, status, - failures, interval, wait) - - def wait_for_delete(self, value, interval=2, wait=120): - """Wait for the resource to be deleted. - - :param value: The resource to wait on to be deleted. - :type value: :class:`~openstack.resource2.Resource` - :param interval: Number of seconds to wait between checks. - :param wait: Maximum number of seconds to wait for the delete. - - :return: Method returns resource on success. - :raises: :class:`~openstack.exceptions.ResourceTimeout` transition - to delete failed to occur in wait seconds. - """ - return resource2.wait_for_delete(self.session, value, interval, wait) diff --git a/openstack/tests/unit/metric/v1/__init__.py b/openstack/py.typed similarity index 100% rename from openstack/tests/unit/metric/v1/__init__.py rename to openstack/py.typed diff --git a/openstack/resource.py b/openstack/resource.py index 2bc559e717..fa30c22ffb 100644 --- a/openstack/resource.py +++ b/openstack/resource.py @@ -10,1025 +10,2502 @@ # License for the specific language governing permissions and limitations # under the License. -""" -The :class:`~openstack.resource.Resource` class is a base -class that represent a remote resource. Attributes of the resource -are defined by the responses from the server rather than in code so -that we don't have to try and keep up with all possible attributes -and extensions. This may be changed in the future. - -The :class:`~openstack.resource.prop` class is a helper for -definiting properties in a resource. - -For update management, :class:`~openstack.resource.Resource` -maintains a dirty list so when updating an object only the attributes -that have actually been changed are sent to the server. - -There is also some support here for lazy loading that needs improvement. +"""Base resource class. -There are plenty of examples of use of this class in the SDK code. +The :class:`~openstack.resource.Resource` class is a base +class that represent a remote resource. The attributes that +comprise a request or response for this resource are specified +as class members on the Resource subclass where their values +are of a component type, including :class:`~openstack.fields.Body`, +:class:`~openstack.fields.Header`, and :class:`~openstack.fields.URI`. + +For update management, :class:`~openstack.resource.Resource` employs +a series of :class:`~openstack.resource._ComponentManager` instances +to look after the attributes of that particular component type. This is +particularly useful for Body and Header types, so that only the values +necessary are sent in requests to the server. + +When making requests, each of the managers are looked at to gather the +necessary URI, body, and header data to build a request to be sent +via keystoneauth's sessions. Responses from keystoneauth are then +converted into this Resource class' appropriate components and types +and then returned to the caller. """ -import abc +from __future__ import annotations + +import builtins import collections -import copy +import collections.abc +import inspect import itertools -import time - -import six -from six.moves.urllib import parse as url_parse - +import operator +import typing as ty +import urllib.parse +import warnings + +import jsonpatch +from keystoneauth1 import adapter +from keystoneauth1 import discover +import requests +import typing_extensions as ty_ext + +from openstack import _log from openstack import exceptions -from openstack import format +from openstack import fields from openstack import utils +from openstack import warnings as os_warnings + +if ty.TYPE_CHECKING: + from openstack import connection + +LOG = _log.setup_logging(__name__) + +AdapterT = ty.TypeVar('AdapterT', bound=adapter.Adapter) +ResourceT = ty.TypeVar('ResourceT', bound='Resource') + + +# TODO(stephenfin): We should deprecate the 'type' and 'list_type' arguments +# for all of the below in favour of annotations. To that end, we have stuck +# with Any rather than generating super complex types +def Body( + name: str, + type: ty.Any | None = None, + default: ty.Any = None, + alias: str | None = None, + aka: str | None = None, + alternate_id: bool = False, + list_type: ty.Any | None = None, + coerce_to_default: bool = False, + deprecated: bool = False, + deprecation_reason: str | None = None, +) -> ty.Any: + return fields.Body( + name, + type=type, + default=default, + alias=alias, + aka=aka, + alternate_id=alternate_id, + list_type=list_type, + coerce_to_default=coerce_to_default, + deprecated=deprecated, + deprecation_reason=deprecation_reason, + ) + + +def Header( + name: str, + type: ty.Any | None = None, + default: ty.Any = None, + alias: str | None = None, + aka: str | None = None, + alternate_id: bool = False, + list_type: ty.Any | None = None, + coerce_to_default: bool = False, + deprecated: bool = False, + deprecation_reason: str | None = None, +) -> ty.Any: + return fields.Header( + name, + type=type, + default=default, + alias=alias, + aka=aka, + alternate_id=alternate_id, + list_type=list_type, + coerce_to_default=coerce_to_default, + deprecated=deprecated, + deprecation_reason=deprecation_reason, + ) + + +def URI( + name: str, + type: ty.Any | None = None, + default: ty.Any = None, + alias: str | None = None, + aka: str | None = None, + alternate_id: bool = False, + list_type: ty.Any | None = None, + coerce_to_default: bool = False, + deprecated: bool = False, + deprecation_reason: str | None = None, +) -> ty.Any: + return fields.URI( + name, + type=type, + default=default, + alias=alias, + aka=aka, + alternate_id=alternate_id, + list_type=list_type, + coerce_to_default=coerce_to_default, + deprecated=deprecated, + deprecation_reason=deprecation_reason, + ) + + +def Computed( + name: str, + type: ty.Any | None = None, + default: ty.Any = None, + alias: str | None = None, + aka: str | None = None, + alternate_id: bool = False, + list_type: ty.Any | None = None, + coerce_to_default: bool = False, + deprecated: bool = False, + deprecation_reason: str | None = None, +) -> ty.Any: + return fields.Computed( + name, + type=type, + default=default, + alias=alias, + aka=aka, + alternate_id=alternate_id, + list_type=list_type, + coerce_to_default=coerce_to_default, + deprecated=deprecated, + deprecation_reason=deprecation_reason, + ) + + +class _ComponentManager(collections.abc.MutableMapping): + """Storage of a component type""" + + attributes: dict[str, ty.Any] + + def __init__(self, attributes=None, synchronized=False): + self.attributes = dict() if attributes is None else attributes.copy() + self._dirty = set() if synchronized else set(self.attributes.keys()) + + def __getitem__(self, key): + return self.attributes[key] + + def __setitem__(self, key, value): + try: + orig = self.attributes[key] + except KeyError: + changed = True + else: + changed = orig != value + if changed: + self.attributes[key] = value + self._dirty.add(key) -class prop(object): - """A helper for defining properties in a resource. - - A prop defines some known attributes within a resource's values. - For example we know a User resource will have a name: - - >>> class User(Resource): - ... name = prop('name') - ... - >>> u = User() - >>> u.name = 'John Doe' - >>> print u['name'] - John Doe - - User objects can now be accessed via the User().name attribute. The 'name' - value we pass as an attribute is the name of the attribute in the message. - This means that you don't need to use the same name for your attribute as - will be set within the object. For example: + def __delitem__(self, key): + del self.attributes[key] + self._dirty.add(key) - >>> class User(Resource): - ... name = prop('userName') - ... - >>> u = User() - >>> u.name = 'John Doe' - >>> print u['userName'] - John Doe + def __iter__(self): + return iter(self.attributes) - There is limited validation ability in props. + def __len__(self): + return len(self.attributes) - You can validate the type of values that are set: + @property + def dirty(self) -> dict[str, ty.Any]: + """Return a dict of modified attributes""" + return {key: self.attributes.get(key, None) for key in self._dirty} - >>> class User(Resource): - ... name = prop('userName') - ... age = prop('age', type=int) - ... - >>> u = User() - >>> u.age = 'thirty' - TypeError: Invalid type for attr age + def clean(self, only: collections.abc.Iterable[str] | None = None) -> None: + """Signal that the resource no longer has modified attributes. + :param only: an optional set of attributes to no longer consider + changed + """ + if only: + self._dirty = self._dirty - set(only) + else: + self._dirty = set() + + +class _Request: + """Prepared components that go into a KSA request""" + + def __init__(self, url, body, headers): + self.url = url + self.body = body + self.headers = headers + + +class QueryMapping(ty.TypedDict): + name: ty_ext.NotRequired[str] + type: ty_ext.NotRequired[ty.Callable[[ty.Any, type[ResourceT]], ResourceT]] + + +class QueryParameters: + def __init__( + self, + *names: str, + include_pagination_defaults: bool = True, + **mappings: str | QueryMapping, + ): + """Create a dict of accepted query parameters + + :param names: List of strings containing client-side query parameter + names. Each name in the list maps directly to the name + expected by the server. + :param include_pagination_defaults: If true, include default pagination + parameters, ``limit`` and ``marker``. These are the most common + query parameters used for listing resources in OpenStack APIs. + :param mappings: Key-value pairs where the key is the client-side + name we'll accept here and the value is the name + the server expects, e.g, ``changes_since=changes-since``. + Additionally, a value can be a dict with optional keys: + + - ``name`` - server-side name, + - ``type`` - callable to convert from client to server + representation + """ + self._mapping: dict[str, str | QueryMapping] = {} + if include_pagination_defaults: + self._mapping.update({"limit": "limit", "marker": "marker"}) + self._mapping.update({name: name for name in names}) + self._mapping.update(mappings) + + def _validate(self, query, base_path=None, allow_unknown_params=False): + """Check that supplied query keys match known query mappings + + :param dict query: Collection of key-value pairs where each key is the + client-side parameter name or server side name. + :param base_path: Formatted python string of the base url path for + the resource. + :param allow_unknown_params: Exclude query params not known by the + resource. + + :returns: Filtered collection of the supported QueryParameters + """ + expected_params = list(self._mapping) + expected_params.extend( + value.get('name', key) if isinstance(value, dict) else value + for key, value in self._mapping.items() + ) + + if base_path: + expected_params += utils.get_string_format_keys(base_path) + + invalid_keys = set(query) - set(expected_params) + if not invalid_keys: + return query + else: + if not allow_unknown_params: + raise exceptions.InvalidResourceQuery( + message="Invalid query params: {}".format( + ",".join(invalid_keys) + ), + extra_data=invalid_keys, + ) + else: + known_keys = set(query).intersection(set(expected_params)) + return {k: query[k] for k in known_keys} - By specifying an alias attribute name, that alias will be read when the - primary attribute name does not appear within the resource: + def _transpose(self, query, resource_type): + """Transpose the keys in query based on the mapping - >>> class User(Resource): - ... name = prop('address', alias='location') - ... - >>> u = User(location='Far Away') - >>> print u['address'] - Far Away - """ + If a query is supplied with its server side name, we will still use + it, but take preference to the client-side name when both are supplied. - def __init__(self, name, alias=None, type=None, default=None): - self.name = name - self.type = type - self.alias = alias - self.default = default + :param dict query: Collection of key-value pairs where each key is the + client-side parameter name to be transposed to its + server side name. + :param resource_type: Class of a resource. + """ + result = {} + for client_side, server_side in self._mapping.items(): + if isinstance(server_side, dict): + name = server_side.get('name', client_side) + type_ = server_side.get('type') + else: + name = server_side + type_ = None - def __get__(self, instance, owner): - if instance is None: - return None - try: - value = instance[self.name] - # self.type() should not be called on None objects. - if value is None: - return None - except KeyError: + # NOTE(dtantsur): a small hack to be compatible with both + # single-argument (like int) and double-argument type functions. try: - value = instance[self.alias] - except (KeyError, AttributeError): - # If we either don't find the key or we don't have an alias - return self.default - - if self.type and not isinstance(value, self.type): - if issubclass(self.type, Resource): - if isinstance(value, six.string_types): - value = self.type({self.type.id_attribute: value}) - else: - value = self.type(value) - elif issubclass(self.type, format.Formatter): - value = self.type.deserialize(value) + provide_resource_type = ( + len(inspect.getfullargspec(type_).args) > 1 + ) + except TypeError: + provide_resource_type = False + + if client_side in query: + value = query[client_side] + elif name in query: + value = query[name] else: - value = self.type(value) - - return value + continue - def __set__(self, instance, value): - if (self.type and not isinstance(value, self.type) and - value != self.default): - if issubclass(self.type, Resource): - if isinstance(value, six.string_types): - value = self.type({self.type.id_attribute: value}) + if type_ is not None: + if provide_resource_type: + result[name] = type_(value, resource_type) else: - value = self.type(value) - elif issubclass(self.type, format.Formatter): - value = self.type.serialize(value) + result[name] = type_(value) # type: ignore else: - value = str(self.type(value)) # validate to fail fast - - # If we already have a value set for the alias name, pop it out - # and store the real name instead. This happens when the alias - # has the same name as this prop is named. - if self.alias in instance._attrs: - instance._attrs.pop(self.alias) - - instance[self.name] = value + result[name] = value + return result - def __delete__(self, instance): - try: - del instance[self.name] - except KeyError: - try: - del instance[self.alias] - except KeyError: - pass +class ResourceMixinProtocol(ty.Protocol): + id: str + base_path: str -#: Key in attributes for header properties -HEADERS = 'headers' - - -class header(prop): - """A helper for defining header properties in a resource. - - This property should be used for values passed in the header of a resource. - Header values are stored in a special 'headers' attribute of a resource. - Using this property will make it easier for users to access those values. - For example, and object store container: - - >>> class Container(Resource): - ... name = prop("name") - ... object_count = header("x-container-object-count") - ... - >>> c = Container({name='pix'}) - >>> c.head(session) - >>> print c["headers"]["x-container-object-count"] - 4 - >>> print c.object_count - 4 - - The first print shows accessing the header value without the property - and the second print shows accessing the header with the property helper. - """ - - def _get_headers(self, instance): - if instance is None: - return None - if HEADERS in instance: - return instance[HEADERS] - return None + _body: _ComponentManager + _header: _ComponentManager + _uri: _ComponentManager + _computed: _ComponentManager - def __get__(self, instance, owner): - headers = self._get_headers(instance) - return super(header, self).__get__(headers, owner) + @classmethod + def _get_session(cls, session: AdapterT) -> AdapterT: ... - def __set__(self, instance, value): - headers = self._get_headers(instance) - if headers is None: - headers = instance._attrs[HEADERS] = {} - headers[self.name] = value - instance.set_headers(headers) + @classmethod + def _get_microversion(cls, session: adapter.Adapter) -> str | None: ... -@six.add_metaclass(abc.ABCMeta) -class Resource(collections.MutableMapping): +class Resource(dict): + # TODO(mordred) While this behaves mostly like a munch for the purposes + # we need, sub-resources, such as Server.security_groups, which is a list + # of dicts, will contain lists of real dicts, not lists of munch-like dict + # objects. We should probably figure out a Resource class, perhaps + # SubResource or something, that we can use to define the data-model of + # complex object attributes where those attributes are not already covered + # by a different resource such as Server.image which should ultimately + # be an Image. We subclass dict so that things like json.dumps and pprint + # will work properly. #: Singular form of key for resource. - resource_key = None - #: Common name for resource. - resource_name = None + resource_key: str | None = None #: Plural form of key for resource. - resources_key = None + resources_key: str | None = None + #: Key used for pagination links + pagination_key: str | None = None - #: Attribute key associated with the id for this resource. - id_attribute = 'id' - #: Attribute key associated with the name for this resource. - name_attribute = 'name' - #: Attribute key associated with 'location' from response headers - location = header('location') + #: The ID of this resource. + id: str = Body("id") - #: The base part of the url for this resource. - base_path = '' + #: The name of this resource. + name: str = Body("name") + #: The OpenStack location of this resource. + location: dict[str, ty.Any] = Computed('location') - #: The service associated with this resource to find the service URL. - service = None + #: Mapping of accepted query parameter names. + _query_mapping = QueryParameters() + + #: The base part of the URI for this resource. + base_path: str = "" #: Allow create operation for this resource. allow_create = False - #: Allow retrieve/get operation for this resource. - allow_retrieve = False + #: Allow get operation for this resource. + allow_fetch = False #: Allow update operation for this resource. - allow_update = False + allow_commit = False #: Allow delete operation for this resource. allow_delete = False #: Allow list operation for this resource. allow_list = False #: Allow head operation for this resource. allow_head = False - - patch_update = False - - def __init__(self, attrs=None, loaded=False): - """Construct a Resource to interact with a service's REST API. - - The Resource class offers two class methods to construct - resource objects, which are preferrable to entering through - this initializer. See :meth:`Resource.new` and - :meth:`Resource.existing`. - - :param dict attrs: The attributes to set when constructing - this Resource. - :param bool loaded: ``True`` if this Resource exists on - the server, ``False`` if it does not. + #: Allow patch operation for this resource. + allow_patch = False + + #: Commits happen without header or body being dirty. + allow_empty_commit = False + + #: Method for committing a resource (PUT, PATCH, POST) + commit_method = "PUT" + #: Method for creating a resource (POST, PUT) + create_method = "POST" + #: Whether commit uses JSON patch format. + commit_jsonpatch = False + + #: Do calls for this resource require an id + requires_id = True + #: Whether create requires an ID (determined from method if None). + create_requires_id: bool | None = None + #: Whether create should exclude ID in the body of the request. + create_exclude_id_from_body = False + #: Do responses for this resource have bodies + has_body = True + #: Does create returns a body (if False requires ID), defaults to has_body + create_returns_body: bool | None = None + + #: Maximum microversion to use for getting/creating/updating the Resource + _max_microversion: str | None = None + #: API microversion (string or None) this Resource was loaded with + microversion = None + + _connection = None + _body: _ComponentManager + _header: _ComponentManager + _uri: _ComponentManager + _computed: _ComponentManager + _original_body: dict[str, ty.Any] = {} + _store_unknown_attrs_as_properties = False + _allow_unknown_attrs_in_body = False + _unknown_attrs_in_body: dict[str, ty.Any] = {} + + # Placeholder for aliases as dict of {__alias__:__original} + _attr_aliases: dict[str, str] = {} + + def __init__(self, _synchronized=False, connection=None, **attrs): + """The base resource + + :param bool _synchronized: + This is not intended to be used directly. See + :meth:`~openstack.resource.Resource.new` and + :meth:`~openstack.resource.Resource.existing`. + :param openstack.connection.Connection connection: + Reference to the Connection being used. Defaults to None to allow + Resource objects to be used without an active Connection, such as + in unit tests. Use of ``self._connection`` in Resource code should + protect itself with a check for None. """ - self._attrs = {} if attrs is None else attrs.copy() - self._dirty = set() if loaded else set(self._attrs.keys()) - self.update_attrs(self._attrs) - self._loaded = loaded - - def __repr__(self): - return "%s.%s(attrs=%s, loaded=%s)" % (self.__module__, - self.__class__.__name__, - self._attrs, self._loaded) + self._connection = connection + self.microversion = attrs.pop('microversion', None) + + self._unknown_attrs_in_body = {} + + # NOTE: _collect_attrs modifies **attrs in place, removing + # items as they match up with any of the body, header, + # or uri mappings. + body, header, uri, computed = self._collect_attrs(attrs) + + if self._allow_unknown_attrs_in_body: + self._unknown_attrs_in_body.update(attrs) + + self._body = _ComponentManager( + attributes=body, synchronized=_synchronized + ) + self._header = _ComponentManager( + attributes=header, synchronized=_synchronized + ) + self._uri = _ComponentManager( + attributes=uri, synchronized=_synchronized + ) + self._computed = _ComponentManager( + attributes=computed, synchronized=_synchronized + ) + if self.commit_jsonpatch or self.allow_patch: + # We need the original body to compare against + if _synchronized: + self._original_body = self._body.attributes.copy() + elif self.id: + # Never record ID as dirty. + self._original_body = {self._alternate_id() or 'id': self.id} + else: + self._original_body = {} + if self._store_unknown_attrs_as_properties: + # When storing of unknown attributes is requested - ensure + # we have properties attribute (with type=None) + self._store_unknown_attrs_as_properties = ( + hasattr(self.__class__, 'properties') + and self.__class__.properties.type is None + ) + + self._update_location() + + for attr, component in self._attributes_iterator(): + if component.aka: + # Register alias for the attribute (local name) + self._attr_aliases[component.aka] = attr + + # TODO(mordred) This is terrible, but is a hack at the moment to ensure + # json.dumps works. The json library does basically if not obj: and + # obj.items() ... but I think the if not obj: is short-circuiting down + # in the C code and thus since we don't store the data in self[] it's + # always False even if we override __len__ or __bool__. + dict.update(self, self.to_dict()) @classmethod - def get_resource_name(cls): - if cls.resource_name: - return cls.resource_name - if cls.resource_key: - return cls.resource_key - return cls().__class__.__name__ + def _attributes_iterator( + cls, components=tuple([fields.Body, fields.Header]) + ): + """Iterator over all Resource attributes""" + # isinstance stricly requires this to be a tuple + # Since we're looking at class definitions we need to include + # subclasses, so check the whole MRO. + for klass in cls.__mro__: + for attr, component in klass.__dict__.items(): + if isinstance(component, components): + yield attr, component - ## - # CONSTRUCTORS - ## + def __repr__(self): + pairs = [ + "{}={}".format(k, v if v is not None else 'None') + for k, v in dict( + itertools.chain( + self._body.attributes.items(), + self._header.attributes.items(), + self._uri.attributes.items(), + self._computed.attributes.items(), + ) + ).items() + ] + args = ", ".join(pairs) + + return f"{self.__module__}.{self.__class__.__name__}({args})" + + def __eq__(self, comparand): + """Return True if another resource has the same contents""" + if not isinstance(comparand, Resource): + return False + return all( + [ + self._body.attributes == comparand._body.attributes, + self._header.attributes == comparand._header.attributes, + self._uri.attributes == comparand._uri.attributes, + self._computed.attributes == comparand._computed.attributes, + ] + ) + + def __getattribute__(self, name): + """Return an attribute on this instance + + This is mostly a pass-through except for a specialization on + the 'id' name, as this can exist under a different name via the + `alternate_id` argument to resource.Body. + """ + if name == "id": + if name in self._body: + return self._body[name] + else: + key = self._alternate_id() + if key: + return self._body.get(key) + else: + try: + return object.__getattribute__(self, name) + except AttributeError as e: + if name in self._attr_aliases: + # Hmm - not found. But hey, the alias exists... + return object.__getattribute__( + self, self._attr_aliases[name] + ) + if self._allow_unknown_attrs_in_body: + # Last chance, maybe it's in body as attribute which isn't + # in the mapping at all... + if name in self._unknown_attrs_in_body: + return self._unknown_attrs_in_body[name] + raise e - @classmethod - def new(cls, **kwargs): - """Create a new instance of this resource. + def __getitem__(self, name): + """Provide dictionary access for elements of the data model.""" + # Check the class, since BaseComponent is a descriptor and thus + # behaves like its wrapped content. If we get it on the class, + # it returns the BaseComponent itself, not the results of __get__. + real_item = getattr(self.__class__, name, None) + if not real_item and name in self._attr_aliases: + # Not found? But we know an alias exists. + name = self._attr_aliases[name] + real_item = getattr(self.__class__, name, None) + if isinstance(real_item, fields._BaseComponent): + return getattr(self, name) + if not real_item: + # In order to maintain backwards compatibility where we were + # returning Munch (and server side names) and Resource object with + # normalized attributes we can offer dict access via server side + # names. + for attr, component in self._attributes_iterator((fields.Body,)): + if component.name == name: + warnings.warn( + f"Access to '{self.__class__}[{name}]' is deprecated. " + f"Use '{self.__class__}.{attr}' attribute instead", + os_warnings.LegacyAPIWarning, + ) + return getattr(self, attr) + if self._allow_unknown_attrs_in_body: + if name in self._unknown_attrs_in_body: + return self._unknown_attrs_in_body[name] + raise KeyError(name) - Internally set flags such that it is marked as not present on the - server. + def __delitem__(self, name): + delattr(self, name) - :param dict kwargs: Each of the named arguments will be set as - attributes on the resulting Resource object. + def __setitem__(self, name, value): + real_item = getattr(self.__class__, name, None) + if isinstance(real_item, fields._BaseComponent): + self.__setattr__(name, value) + else: + if self._allow_unknown_attrs_in_body: + self._unknown_attrs_in_body[name] = value + return + raise KeyError( + f"{name} is not found. " + f"{self.__module__}.{self.__class__.__name__} objects do not " + f"support setting arbitrary keys through the dict interface." + ) + + def _attributes( + self, remote_names=False, components=None, include_aliases=True + ): + """Generate list of supported attributes""" + attributes = [] + + if not components: + components = ( + fields.Body, + fields.Header, + fields.Computed, + fields.URI, + ) + + for attr, component in self._attributes_iterator(components): + key = attr if not remote_names else component.name + attributes.append(key) + if include_aliases and component.aka: + attributes.append(component.aka) + + return attributes + + def keys(self): + # NOTE(mordred) In python2, dict.keys returns a list. In python3 it + # returns a dict_keys view. For 2, we can return a list from the + # itertools chain. In 3, return the chain so it's at least an iterator. + # It won't strictly speaking be an actual dict_keys, so it's possible + # we may want to get more clever, but for now let's see how far this + # will take us. + # NOTE(gtema) For now let's return list of 'public' attributes and not + # remotes or "unknown" + return self._attributes() + + def items(self): + # This method is critically required for Ansible "jsonify" + # NOTE(gtema) For some reason when running from SDK itself the native + # implementation of the method is absolutely sifficient, when called + # from Ansible - the values are often empty. Even integrating all + # Ansible internal methods did not help to find the root cause. Another + # fact is that under Py2 everything is fine, while under Py3 it fails. + # There is currently no direct test for Ansible-SDK issue. It is tested + # implicitely in the keypair role for ansible module, where an assert + # verifies presence of attributes. + res = [] + for attr in self._attributes(): + # Append key, value tuple to result list + res.append((attr, self[attr])) + return res + + def _update(self, **attrs: ty.Any) -> None: + """Given attributes, update them on this instance + + This is intended to be used from within the proxy + layer when updating instances that may have already + been created. """ - return cls(kwargs, loaded=False) - - @classmethod - def existing(cls, **kwargs): - """Create an instance of an existing remote resource. - - It is marked as an exact replication of a resource present on a server. + self.microversion = attrs.pop('microversion', None) + body, header, uri, computed = self._collect_attrs(attrs) + + self._body.update(body) + self._header.update(header) + self._uri.update(uri) + self._computed.update(computed) + self._update_location() + + # TODO(mordred) This is terrible, but is a hack at the moment to ensure + # json.dumps works. The json library does basically if not obj: and + # obj.items() ... but I think the if not obj: is short-circuiting down + # in the C code and thus since we don't store the data in self[] it's + # always False even if we override __len__ or __bool__. + dict.update(self, self.to_dict()) + + def _collect_attrs(self, attrs): + """Given attributes, return a dict per type of attribute + + This method splits up **attrs into separate dictionaries + that correspond to the relevant body, header, and uri + attributes that exist on this class. + """ + body = self._consume_body_attrs(attrs) + header = self._consume_header_attrs(attrs) + uri = self._consume_uri_attrs(attrs) + + if attrs: + if self._allow_unknown_attrs_in_body: + body.update(attrs) + elif self._store_unknown_attrs_as_properties: + # Keep also remaining (unknown) attributes + body = self._pack_attrs_under_properties(body, attrs) + + if any([body, header, uri]): + attrs = self._compute_attributes(body, header, uri) + + body.update(self._consume_attrs(self._body_mapping(), attrs)) + + header.update(self._consume_attrs(self._header_mapping(), attrs)) + uri.update(self._consume_attrs(self._uri_mapping(), attrs)) + computed = self._consume_attrs(self._computed_mapping(), attrs) + # TODO(mordred) We should make a Location Resource and add it here + # instead of just the dict. + if self._connection: + computed.setdefault('location', self._connection.current_location) + + return body, header, uri, computed + + def _update_location(self) -> None: + """Update location to include resource project/zone information. + + Location should describe the location of the resource. For some + resources, where the resource doesn't have any such baked-in notion + we assume the resource exists in the same project as the logged-in + user's token. + + However, if a resource contains a project_id, then that project is + where the resource lives, and the location should reflect that. + """ + if not self._connection: + return + kwargs = {} + if hasattr(self, 'project_id'): + kwargs['project_id'] = self.project_id + if hasattr(self, 'availability_zone'): + kwargs['zone'] = self.availability_zone + if kwargs: + self.location = self._connection._get_current_location(**kwargs) + + def _compute_attributes(self, body, header, uri): + """Compute additional attributes from the remote resource.""" + return {} - :param dict kwargs: Each of the named arguments will be set as - attributes on the resulting Resource object. + def _consume_body_attrs( + self, attrs: collections.abc.MutableMapping[str, ty.Any] + ) -> dict[str, ty.Any]: + return self._consume_mapped_attrs(fields.Body, attrs) + + def _consume_header_attrs( + self, attrs: collections.abc.MutableMapping[str, ty.Any] + ) -> dict[str, ty.Any]: + return self._consume_mapped_attrs(fields.Header, attrs) + + def _consume_uri_attrs( + self, attrs: collections.abc.MutableMapping[str, ty.Any] + ) -> dict[str, ty.Any]: + return self._consume_mapped_attrs(fields.URI, attrs) + + def _update_from_body_attrs( + self, attrs: collections.abc.MutableMapping[str, ty.Any] + ) -> None: + body = self._consume_body_attrs(attrs) + self._body.attributes.update(body) + self._body.clean() + + def _update_from_header_attrs( + self, attrs: collections.abc.MutableMapping[str, ty.Any] + ) -> None: + headers = self._consume_header_attrs(attrs) + self._header.attributes.update(headers) + self._header.clean() + + def _update_uri_from_attrs( + self, attrs: collections.abc.MutableMapping[str, ty.Any] + ) -> None: + uri = self._consume_uri_attrs(attrs) + self._uri.attributes.update(uri) + self._uri.clean() + + def _consume_mapped_attrs( + self, + mapping_cls: type[fields._BaseComponent], + attrs: collections.abc.MutableMapping[str, ty.Any], + ) -> dict[str, ty.Any]: + mapping = self._get_mapping(mapping_cls) + return self._consume_attrs(mapping, attrs) + + def _consume_attrs( + self, + mapping: collections.abc.MutableMapping[str, ty.Any], + attrs: collections.abc.MutableMapping[str, ty.Any], + ) -> dict[str, ty.Any]: + """Given a mapping and attributes, return relevant matches + + This method finds keys in attrs that exist in the mapping, then + both transposes them to their server-side equivalent key name + to be returned, and finally pops them out of attrs. This allows + us to only calculate their place and existence in a particular + type of Resource component one time, rather than looking at the + same source dict several times. """ - return cls(kwargs, loaded=True) + relevant_attrs = {} + consumed_keys = [] + for key, value in attrs.items(): + # We want the key lookup in mapping to be case insensitive if the + # mapping is, thus the use of get. We want value to be exact. + # If we find a match, we then have to loop over the mapping for + # to find the key to return, as there isn't really a "get me the + # key that matches this other key". We lower() in the inner loop + # because we've already done case matching in the outer loop. + if key in mapping.values() or mapping.get(key): + for map_key, map_value in mapping.items(): + if key.lower() in (map_key.lower(), map_value.lower()): + relevant_attrs[map_key] = value + consumed_keys.append(key) + continue + + for key in consumed_keys: + attrs.pop(key) + + return relevant_attrs + + def _clean_body_attrs(self, attrs): + """Mark the attributes as up-to-date.""" + self._body.clean(only=attrs) + if self.commit_jsonpatch or self.allow_patch: + for attr in attrs: + if attr in self._body: + self._original_body[attr] = self._body[attr] @classmethod - def _from_attr(cls, attribute, value): - # This method is useful in the higher level, in cases where operations - # need to depend on having Resource objects, but the API is flexible - # in taking text values which represent those objects. - if isinstance(value, cls): - return value - elif isinstance(value, six.string_types): - return cls.new(**{attribute: value}) - else: - raise ValueError("value must be %s instance or %s" % ( - cls.__name__, attribute)) + def _get_mapping( + cls, component: type[fields._BaseComponent] + ) -> ty.MutableMapping[str, ty.Any]: + """Return a dict of attributes of a given component on the class""" + mapping = component._map_cls() + ret = component._map_cls() + for key, value in cls._attributes_iterator(component): + # Make sure base classes don't end up overwriting + # mappings we've found previously in subclasses. + if key not in mapping: + # Make it this way first, to get MRO stuff correct. + mapping[key] = value.name + for k, v in mapping.items(): + ret[v] = k + return ret @classmethod - def from_id(cls, value): - """Create an instance from an ID or return an existing instance. - - New instances are created with :meth:`~openstack.resource.Resource.new` + def _body_mapping(cls): + """Return all Body members of this class""" + return cls._get_mapping(fields.Body) - :param value: If ``value`` is an instance of this Resource type, - it is returned. - If ``value`` is an ID which an instance of this - Resource type can be created with, one is created - and returned. + @classmethod + def _header_mapping(cls): + """Return all Header members of this class""" + return cls._get_mapping(fields.Header) - :rtype: :class:`~openstack.resource.Resource` or the - appropriate subclass. - :raises: :exc:`ValueError` if ``value`` is not an instance of - this Resource type or a valid ``id``. - """ - return cls._from_attr(cls.id_attribute, value) + @classmethod + def _uri_mapping(cls): + """Return all URI members of this class""" + return cls._get_mapping(fields.URI) @classmethod - def from_name(cls, value): - """Create an instance from a name or return an existing instance. + def _computed_mapping(cls): + """Return all Computed members of this class""" + return cls._get_mapping(fields.Computed) - New instances are created with :meth:`~openstack.resource.Resource.new` + @classmethod + def _alternate_id(cls): + """Return the name of any value known as an alternate_id - :param value: If ``value`` is an instance of this Resource type, - it is returned. - If ``value`` is a name which an instance of this - Resource type can be created with, one is created - and returned. + NOTE: This will only ever return the first such alternate_id. + Only one alternate_id should be specified. - :rtype: :class:`~openstack.resource.Resource` or the - appropriate subclass. - :raises: :exc:`ValueError` if ``value`` is not an instance of - this Resource type or a valid ``name``. + Returns an empty string if no name exists, as this method is + consumed by _get_id and passed to getattr. """ - return cls._from_attr(cls.name_attribute, value) + for value in cls.__dict__.values(): + if isinstance(value, fields.Body): + if value.alternate_id: + return value.name + return "" - ## - # MUTABLE MAPPING IMPLEMENTATION - ## - - def __getitem__(self, name): - return self._attrs[name] + @staticmethod + def _get_id(value: Resource | str) -> str: + """If a value is a Resource, return the canonical ID - def __setitem__(self, name, value): - try: - orig = self._attrs[name] - except KeyError: - changed = True + This will return either the value specified by `id` or + `alternate_id` in that order if `value` is a Resource. + If `value` is anything other than a Resource, likely to + be a string already representing an ID, it is returned. + """ + if isinstance(value, Resource): + return value.id else: - changed = orig != value - - if changed: - self._attrs[name] = value - self._dirty.add(name) - - def __delitem__(self, name): - del self._attrs[name] - self._dirty.add(name) - - def __len__(self): - return len(self._attrs) + return value - def __iter__(self): - return iter(self._attrs) + @classmethod + def new(cls, **kwargs: ty.Any) -> ty_ext.Self: + """Create a new instance of this resource. - ## - # BASE PROPERTIES/OPERATIONS - ## + When creating the instance set the ``_synchronized`` parameter + of :class:`Resource` to ``False`` to indicate that the resource does + not yet exist on the server side. This marks all attributes passed + in ``**kwargs`` as "dirty" on the resource, and thusly tracked + as necessary in subsequent calls such as :meth:`update`. - @property - def id(self): - """The identifier associated with this resource. - - The true value of the ``id`` property comes from the - attribute set as :data:`id_attribute`. For example, - a container's name may be the appropirate identifier, - so ``id_attribute = "name"`` would be set on the - :class:`Resource`, and ``Resource.name`` would be - conveniently accessible through ``id``. + :param dict kwargs: Each of the named arguments will be set as + attributes on the resulting Resource object. """ - return self._attrs.get(self.id_attribute, None) + return cls(_synchronized=False, **kwargs) - @id.deleter - def id(self): - del self._attrs[self.id_attribute] + @classmethod + def existing(cls, connection=None, **kwargs): + """Create an instance of an existing remote resource. - @property - def name(self): - """The name associated with this resource. + When creating the instance set the ``_synchronized`` parameter + of :class:`Resource` to ``True`` to indicate that it represents the + state of an existing server-side resource. As such, all attributes + passed in ``**kwargs`` are considered "clean", such that an immediate + :meth:`update` call would not generate a body of attributes to be + modified on the server. - The true value of the ``name`` property comes from the - attribute set as :data:`name_attribute`. + :param dict kwargs: Each of the named arguments will be set as + attributes on the resulting Resource object. """ - return self._attrs.get(self.name_attribute, None) - - @name.setter - def name(self, value): - self._attrs[self.name_attribute] = value - - @name.deleter - def name(self): - del self._attrs[self.name_attribute] - - @property - def is_dirty(self): - """True if the resource needs to be updated to the remote.""" - return len(self._dirty) > 0 - - def _reset_dirty(self): - self._dirty = set() + return cls(_synchronized=True, connection=connection, **kwargs) - def _update_attrs_from_response(self, resp, include_headers=False): - resp_headers = resp.pop(HEADERS, None) - self._attrs.update(resp) - self.update_attrs(self._attrs) - if include_headers and (resp_headers is not None): - self.set_headers(resp_headers) - - def update_attrs(self, *args, **kwargs): - """Update the attributes on this resource + @classmethod + def _from_munch( + cls, + obj: dict[str, ty.Union], + synchronized: bool = True, + connection: connection.Connection | None = None, + ) -> ty_ext.Self: + """Create an instance from a ``utils.Munch`` object. + + This is intended as a temporary measure to convert between shade-style + Munch objects and original openstacksdk resources. + + :param obj: a ``utils.Munch`` object to convert from. + :param bool synchronized: whether this object already exists on server + Must be set to ``False`` for newly created objects. + """ + return cls(_synchronized=synchronized, connection=connection, **obj) - Note that this is implemented because Resource.update overrides - the update method we would get from the MutableMapping base class. + def _attr_to_dict(self, attr, to_munch): + """For a given attribute, convert it into a form suitable for a dict + value. - :params args: A dictionary of attributes to be updated. - :params kwargs: Named arguments to be set on this instance. - When a key corresponds to a resource.prop, - it will be set via resource.prop.__set__. + :param bool attr: Attribute name to convert - :rtype: None + :return: A dictionary of key/value pairs where keys are named + as they exist as attributes of this class. + :param bool _to_munch: Converts subresources to munch instead of dict. """ - ignore_none = kwargs.pop("ignore_none", False) - - # ensure setters are called for type coercion - for key, value in itertools.chain(dict(*args).items(), kwargs.items()): - if key != self.id_attribute: # id property is read only - - # Don't allow None values to override a key unless we've - # explicitly specified they can. Proxy methods have default - # None arguments that we don't want to override any values - # that may have been passed in on Resource instances. - if not all([ignore_none, value is None]): - if key != "id": - setattr(self, key, value) - self[key] = value - - def get_headers(self): - if HEADERS in self._attrs: - return self._attrs[HEADERS] - return {} + value = getattr(self, attr, None) + if isinstance(value, Resource): + return value.to_dict(_to_munch=to_munch) + elif isinstance(value, dict) and to_munch: + return utils.Munch(value) + elif value and isinstance(value, list): + converted = [] + for raw in value: + if isinstance(raw, Resource): + converted.append(raw.to_dict(_to_munch=to_munch)) + elif isinstance(raw, dict) and to_munch: + converted.append(utils.Munch(raw)) + else: + converted.append(raw) + return converted + return value - def set_headers(self, values): - self._attrs[HEADERS] = values - self._dirty.add(HEADERS) + def to_dict( + self, + body: bool = True, + headers: bool = True, + computed: bool = True, + ignore_none: bool = False, + original_names: bool = False, + _to_munch: bool = False, + ) -> dict[str, ty.Any]: + """Return a dictionary of this resource's contents + + :param bool body: Include the :class:`~openstack.fields.Body` + attributes in the returned dictionary. + :param bool headers: Include the :class:`~openstack.fields.Header` + attributes in the returned dictionary. + :param bool computed: Include the :class:`~openstack.fields.Computed` + attributes in the returned dictionary. + :param bool ignore_none: When True, exclude key/value pairs where + the value is None. This will exclude attributes that the server + hasn't returned. + :param bool original_names: When True, use attribute names as they + were received from the server. + :param bool _to_munch: For internal use only. Converts to `utils.Munch` + instead of dict. + + :return: A dictionary of key/value pairs where keys are named + as they exist as attributes of this class. + """ + mapping: utils.Munch | dict + if _to_munch: + mapping = utils.Munch() + else: + mapping = {} - def to_dict(self): - attrs = copy.deepcopy(self._attrs) - headers = attrs.pop(HEADERS, {}) - attrs.update(headers) - return attrs + components: list[type[fields._BaseComponent]] = [] + if body: + components.append(fields.Body) + if headers: + components.append(fields.Header) + if computed: + components.append(fields.Computed) + if not components: + raise ValueError( + "At least one of `body`, `headers` or `computed` must be True" + ) + + if body and self._allow_unknown_attrs_in_body: + for key in self._unknown_attrs_in_body: + converted = self._attr_to_dict( + key, + to_munch=_to_munch, + ) + if not ignore_none or converted is not None: + mapping[key] = converted + + # NOTE: This is similar to the implementation in _get_mapping + # but is slightly different in that we're looking at an instance + # and we're mapping names on this class to their actual stored + # values. + # NOTE: isinstance stricly requires components to be a tuple + for attr, component in self._attributes_iterator(tuple(components)): + if original_names: + key = component.name + else: + key = attr + for key in filter(None, (key, component.aka)): + # Make sure base classes don't end up overwriting + # mappings we've found previously in subclasses. + if key not in mapping: + converted = self._attr_to_dict( + attr, + to_munch=_to_munch, + ) + if ignore_none and converted is None: + continue + mapping[key] = converted + + return mapping + + # Compatibility with the utils.Munch.toDict method + toDict = to_dict + # Make the munch copy method use to_dict + copy = to_dict + + def _to_munch(self, original_names=True): + """Convert this resource into a Munch compatible with shade.""" + return self.to_dict( + body=True, + headers=False, + original_names=original_names, + _to_munch=True, + ) + + def _unpack_properties_to_resource_root(self, body): + if not body: + return + # We do not want to modify caller + body = body.copy() + props = body.pop('properties', {}) + if props and isinstance(props, dict): + # unpack dict of properties back to the root of the resource + body.update(props) + elif props and isinstance(props, str): + # A string value only - bring it back + body['properties'] = props + return body - ## - # CRUD OPERATIONS - ## + def _pack_attrs_under_properties(self, body, attrs): + props = body.get('properties', {}) + if not isinstance(props, dict): + props = {'properties': props} + props.update(attrs) + body['properties'] = props + return body - @staticmethod - def get_id(value): - """If a value is a Resource, return the canonical ID.""" - if isinstance(value, Resource): - return value.id + def _prepare_request_body( + self, + patch: bool, + prepend_key: bool, + *, + resource_request_key: str | None = None, + ) -> dict[str, ty.Any] | builtins.list[ty.Any]: + body: dict[str, ty.Any] | list[ty.Any] + if patch: + if not self._store_unknown_attrs_as_properties: + # Default case + new = self._body.attributes + original_body = self._original_body + else: + new = self._unpack_properties_to_resource_root( + self._body.attributes + ) + original_body = self._unpack_properties_to_resource_root( + self._original_body + ) + + # NOTE(gtema) sort result, since we might need validate it in tests + body = sorted( + list(jsonpatch.make_patch(original_body, new).patch), + key=operator.itemgetter('path'), + ) else: - return value - - @staticmethod - def convert_ids(attrs): - """Return an attribute dictionary suitable for create/update + if not self._store_unknown_attrs_as_properties: + # Default case + body = self._body.dirty + else: + body = self._unpack_properties_to_resource_root( + self._body.dirty + ) + + if prepend_key: + if resource_request_key is not None: + body = {resource_request_key: body} + elif self.resource_key is not None: + body = {self.resource_key: body} + return body - As some attributes may be Resource types, their ``id`` attribute - needs to be put in the Resource instance's place in order - to be properly serialized and understood by the server. + def _prepare_request( + self, + requires_id=None, + prepend_key=False, + patch=False, + base_path=None, + params=None, + *, + resource_request_key=None, + **kwargs, + ): + """Prepare a request to be sent to the server + + Create operations don't require an ID, but all others do, + so only try to append an ID when it's needed with + requires_id. Create and update operations sometimes require + their bodies to be contained within an dict -- if the + instance contains a resource_key and prepend_key=True, + the body will be wrapped in a dict with that key. + If patch=True, a JSON patch is prepared instead of the full body. + + Return a _Request object that contains the constructed URI + as well a body and headers that are ready to send. + Only dirty body and header contents will be returned. """ - if attrs is None: - return + if requires_id is None: + requires_id = self.requires_id + + # Conditionally construct arguments for _prepare_request_body + request_kwargs = {"patch": patch, "prepend_key": prepend_key} + if resource_request_key is not None: + request_kwargs['resource_request_key'] = resource_request_key + body = self._prepare_request_body(**request_kwargs) + + # TODO(mordred) Ensure headers have string values better than this + headers = {} + for k, v in self._header.dirty.items(): + if isinstance(v, list): + headers[k] = ", ".join(v) + else: + headers[k] = str(v) + + if base_path is None: + base_path = self.base_path + uri = base_path % self._uri.attributes + if requires_id: + if self.id is None: + raise exceptions.InvalidRequest( + "Request requires an ID but none was found" + ) + + uri = utils.urljoin(uri, self.id) + + if params: + query_params = urllib.parse.urlencode(params) + uri += '?' + query_params + + return _Request(uri, body, headers) + + def _translate_response( + self, + response: requests.Response, + has_body: bool | None = None, + error_message: str | None = None, + *, + resource_response_key: str | None = None, + ) -> None: + """Given a KSA response, inflate this instance with its data + + DELETE operations don't return a body, so only try to work + with a body when has_body is True. + + This method updates attributes that correspond to headers + and body on this instance and clears the dirty set. + """ + if has_body is None: + has_body = self.has_body - converted = attrs.copy() - for key, value in converted.items(): - if isinstance(value, Resource): - converted[key] = value.id + exceptions.raise_from_response(response, error_message=error_message) - return converted + if has_body: + try: + body = response.json() + if resource_response_key and resource_response_key in body: + body = body[resource_response_key] + elif self.resource_key and self.resource_key in body: + body = body[self.resource_key] + + # Do not allow keys called "self" through. Glance chose + # to name a key "self", so we need to pop it out because + # we can't send it through cls.existing and into the + # Resource initializer. "self" is already the first + # argument and is practically a reserved word. + body.pop("self", None) + + body_attrs = self._consume_body_attrs(body) + if self._allow_unknown_attrs_in_body: + body_attrs.update(body) + self._unknown_attrs_in_body.update(body) + elif self._store_unknown_attrs_as_properties: + body_attrs = self._pack_attrs_under_properties( + body_attrs, body + ) + + self._body.attributes.update(body_attrs) + self._body.clean() + if self.commit_jsonpatch or self.allow_patch: + # We need the original body to compare against + self._original_body = self._body.attributes.copy() + except ValueError: + # Server returned not parse-able response (202, 204, etc) + # Do simply nothing + pass - @classmethod - def _get_create_body(cls, attrs): - if cls.resource_key: - return {cls.resource_key: attrs} - else: - return attrs + headers = self._consume_header_attrs(response.headers) + self._header.attributes.update(headers) + self._header.clean() + self._update_location() + dict.update(self, self.to_dict()) @classmethod - def _get_url(cls, path_args=None, resource_id=None): - if path_args: - url = cls.base_path % path_args - else: - url = cls.base_path - if resource_id is not None: - url = utils.urljoin(url, resource_id) - return url + def _get_session(cls, session: AdapterT) -> AdapterT: + """Attempt to get an Adapter from a raw session. + + Some older code used conn.session has the session argument to Resource + methods. That does not work anymore, as Resource methods expect an + Adapter not a session. We've hidden an _sdk_connection on the Session + stored on the connection. If we get something that isn't an Adapter, + pull the connection from the Session and look up the adapter by + service_type. + """ + # TODO(mordred) We'll need to do this for every method in every + # Resource class that is calling session.$something to be complete. + if isinstance(session, adapter.Adapter): + return session + raise ValueError( + "The session argument to Resource methods requires either an " + "instance of an openstack.proxy.Proxy object or at the very least " + "a raw keystoneauth1.adapter.Adapter." + ) @classmethod - def create_by_id(cls, session, attrs, resource_id=None, path_args=None): - """Create a remote resource from its attributes. + def _get_microversion(cls, session: adapter.Adapter) -> str | None: + """Get microversion to use for the given action. - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param dict attrs: The attributes to be sent in the body - of the request. - :param resource_id: This resource's identifier, if needed by - the request. The default is ``None``. - :param dict path_args: A dictionary of arguments to construct - a compound URL. - See `How path_args are used`_ for details. - - :return: A ``dict`` representing the response body. - :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_create` is not set to ``True``. - """ - if not cls.allow_create: - raise exceptions.MethodNotSupported(cls, 'create') + The base version uses the following logic: - # Convert attributes from Resource types into their ids. - attrs = cls.convert_ids(attrs) - headers = attrs.pop(HEADERS, None) + 1. If the session has a default microversion for the current service, + just use it. + 2. If ``self._max_microversion`` is not ``None``, use minimum between + it and the maximum microversion supported by the server. + 3. Otherwise use ``None``. - body = cls._get_create_body(attrs) + Subclasses can override this method if more complex logic is needed. - url = cls._get_url(path_args, resource_id) - args = {'json': body} - if headers: - args[HEADERS] = headers - if resource_id: - resp = session.put(url, endpoint_filter=cls.service, **args) - else: - resp = session.post(url, endpoint_filter=cls.service, **args) - resp_headers = resp.headers - resp = resp.json() + :param session: The session to use for making the request. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :return: Microversion as string or ``None`` + """ + if session.default_microversion: + return session.default_microversion - if cls.resource_key: - resp = resp[cls.resource_key] - if resp_headers: - resp[HEADERS] = copy.deepcopy(resp_headers) + return utils.maximum_supported_microversion( + session, cls._max_microversion + ) - return resp + @classmethod + def _assert_microversion_for( + cls, + session: adapter.Adapter, + expected: str | None, + *, + error_message: str | None = None, + maximum: str | None = None, + ) -> str: + """Enforce that the microversion for action satisfies the requirement. + + :param session: :class`keystoneauth1.adapter.Adapter` + :param expected: Expected microversion. + :param error_message: Optional error message with details. Will be + prepended to the message generated here. + :param maximum: Maximum microversion. + :return: resulting microversion as string. + :raises: :exc:`~openstack.exceptions.NotSupported` if the version + used for the action is lower than the expected one. + """ - def create(self, session): - """Create a remote resource from this instance. + def _raise(message: str) -> ty.NoReturn: + if error_message: + error_message.rstrip('.') + message = f'{error_message}. {message}' + + raise exceptions.NotSupported(message) + + actual = cls._get_microversion(session) + + if actual is None: + message = ( + f"API version {expected} is required, but the default " + f"version will be used." + ) + _raise(message) + + actual_n = discover.normalize_version_number(actual) + + if expected is not None: + expected_n = discover.normalize_version_number(expected) + if actual_n < expected_n: + message = ( + f"API version {expected} is required, but {actual} " + f"will be used." + ) + _raise(message) + + if maximum is not None: + maximum_n = discover.normalize_version_number(maximum) + # Assume that if a service supports higher versions, it also + # supports lower ones. Breaks for services that remove old API + # versions (which is not something that has been done yet). + if actual_n > maximum_n: + return maximum + + return actual + + def create( + self, + session: adapter.Adapter, + prepend_key: bool = True, + base_path: str | None = None, + *, + resource_request_key: str | None = None, + resource_response_key: str | None = None, + microversion: str | None = None, + **params: ty.Any, + ) -> ty_ext.Self: + """Create a remote resource based on this instance. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param prepend_key: A boolean indicating whether the resource_key + should be prepended in a resource creation request. Default to + True. + :param str base_path: Base part of the URI for creating resources, if + different from :data:`~openstack.resource.Resource.base_path`. + :param str resource_request_key: Overrides the usage of + self.resource_key when prepending a key to the request body. + Ignored if `prepend_key` is false. + :param str resource_response_key: Overrides the usage of + self.resource_key when processing response bodies. + Ignored if `prepend_key` is false. + :param str microversion: API version to override the negotiated one. + :param dict params: Additional params to pass. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_create` is not set to ``True``. + :data:`Resource.allow_create` is not set to ``True``. """ - resp = self.create_by_id(session, self._attrs, self.id, path_args=self) - self._update_attrs_from_response(resp, include_headers=True) - self._reset_dirty() + if not self.allow_create: + raise exceptions.MethodNotSupported(self, 'create') + + session = self._get_session(session) + if microversion is None: + microversion = self._get_microversion(session) + requires_id = ( + self.create_requires_id + if self.create_requires_id is not None + else self.create_method == 'PUT' + ) + + # Construct request arguments. + request_kwargs = { + "requires_id": requires_id, + "prepend_key": prepend_key, + "base_path": base_path, + } + if resource_request_key is not None: + request_kwargs['resource_request_key'] = resource_request_key + + if self.create_exclude_id_from_body: + self._body._dirty.discard("id") + + if self.create_method == 'PUT': + request = self._prepare_request(**request_kwargs) + response = session.put( + request.url, + json=request.body, + headers=request.headers, + microversion=microversion, + params=params, + ) + elif self.create_method == 'POST': + request = self._prepare_request(**request_kwargs) + response = session.post( + request.url, + json=request.body, + headers=request.headers, + microversion=microversion, + params=params, + ) + else: + raise exceptions.ResourceFailure( + f"Invalid create method: {self.create_method}" + ) + + has_body = ( + self.has_body + if self.create_returns_body is None + else self.create_returns_body + ) + self.microversion = microversion + + self._translate_response( + response, + has_body=has_body, + resource_response_key=resource_response_key, + ) + # direct comparision to False since we need to rule out None + if self.has_body and self.create_returns_body is False: + # fetch the body if it's required but not returned by create + return self.fetch( + session, + resource_response_key=resource_response_key, + ) return self @classmethod - def get_data_by_id(cls, session, resource_id, path_args=None, args=None, - include_headers=False): - """Get the attributes of a remote resource from an id. + def bulk_create( + cls, + session: adapter.Adapter, + data: builtins.list[dict[str, ty.Any]], + prepend_key: bool = True, + base_path: str | None = None, + *, + microversion: str | None = None, + **params: ty.Any, + ) -> ty.Generator[ty_ext.Self, None, None]: + """Create multiple remote resources based on this class and data. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param resource_id: This resource's identifier, if needed by - the request. - :param dict path_args: A dictionary of arguments to construct - a compound URL. - See `How path_args are used`_ for details. - :param dict args: A dictionary of query parameters to be appended to - the compound URL. - :param bool include_headers: ``True`` if header data should be - included in the response body, - ``False`` if not. - - :return: A ``dict`` representing the response body. - :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_retrieve` is not set to ``True``. - """ - if not cls.allow_retrieve: - raise exceptions.MethodNotSupported(cls, 'retrieve') - - url = cls._get_url(path_args, resource_id) - if args: - url = '?'.join([url, url_parse.urlencode(args)]) - response = session.get(url, endpoint_filter=cls.service) - body = response.json() - - if cls.resource_key: - body = body[cls.resource_key] - - if include_headers: - body[HEADERS] = response.headers + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param data: list of dicts, which represent resources to create. + :param prepend_key: A boolean indicating whether the resource_key + should be prepended in a resource creation request. Default to + True. + :param str base_path: Base part of the URI for creating resources, if + different from :data:`~openstack.resource.Resource.base_path`. + :param str microversion: API version to override the negotiated one. + :param dict params: Additional params to pass. - return body - - @classmethod - def get_by_id(cls, session, resource_id, path_args=None, - include_headers=False): - """Get an object representing a remote resource from an id. - - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param resource_id: This resource's identifier, if needed by - the request. - :param dict path_args: A dictionary of arguments to construct - a compound URL. - See `How path_args are used`_ for details. - :param bool include_headers: ``True`` if header data should be - included in the response body, - ``False`` if not. - - :return: A :class:`Resource` object representing the - response body. + :return: A generator of :class:`Resource` objects. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_retrieve` is not set to ``True``. + :data:`Resource.allow_create` is not set to ``True``. """ - body = cls.get_data_by_id(session, resource_id, path_args=path_args, - include_headers=include_headers) - return cls.existing(**body) + if not cls.allow_create: + raise exceptions.MethodNotSupported(cls, 'create') - def get(self, session, include_headers=False, args=None): - """Get the remote resource associated with this instance. + if not ( + data + and isinstance(data, list) + and all([isinstance(x, dict) for x in data]) + ): + raise ValueError(f'Invalid data passed: {data}') + + session = cls._get_session(session) + if microversion is None: + microversion = cls._get_microversion(session) + requires_id = ( + cls.create_requires_id + if cls.create_requires_id is not None + else cls.create_method == 'PUT' + ) + if cls.create_method == 'PUT': + method = session.put + elif cls.create_method == 'POST': + method = session.post + else: + raise exceptions.ResourceFailure( + f"Invalid create method: {cls.create_method}" + ) + + _body: list[ty.Any] = [] + resources = [] + for attrs in data: + # NOTE(gryf): we need to create resource objects, since + # _prepare_request only works on instances, not classes. + # Those objects will be used in case where request doesn't return + # JSON data representing created resource, and yet it's required + # to return newly created resource objects. + # TODO(stephenfin): Our types say we accept a ksa Adapter, but this + # requires an SDK Proxy. Do we update the types or rework this to + # support use of an adapter. + resource = cls.new(connection=session._get_connection(), **attrs) # type: ignore + resources.append(resource) + request = resource._prepare_request( + requires_id=requires_id, base_path=base_path + ) + _body.append(request.body) + + body: dict[str, ty.Any] | list[ty.Any] = _body + + if prepend_key: + if not cls.resources_key: + raise exceptions.ResourceFailure( + "Cannot request prepend_key with Unset resources key" + ) + + body = {cls.resources_key: body} + + response = method( + request.url, + json=body, + headers=request.headers, + microversion=microversion, + params=params, + ) + exceptions.raise_from_response(response) + json = response.json() + + if cls.resources_key: + json = json[cls.resources_key] + else: + json = json + + if isinstance(data, list): + json = json + else: + json = [json] + + has_body = ( + cls.has_body + if cls.create_returns_body is None + else cls.create_returns_body + ) + if has_body and cls.create_returns_body is False: + return (r.fetch(session) for r in resources) + else: + return ( + # TODO(stephenfin): Our types say we accept a ksa Adapter, but + # this requires an SDK Proxy. Do we update the types or rework + # this to support use of an adapter. + cls.existing( + microversion=microversion, + connection=session._get_connection(), # type: ignore + **res_dict, + ) + for res_dict in json + ) + + def fetch( + self, + session: adapter.Adapter, + requires_id: bool = True, + base_path: str | None = None, + error_message: str | None = None, + skip_cache: bool = False, + *, + resource_response_key: str | None = None, + microversion: str | None = None, + **params: ty.Any, + ) -> ty_ext.Self: + """Get a remote resource based on this instance. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param bool include_headers: ``True`` if header data should be - included in the response body, - ``False`` if not. - :param dict args: A dictionary of query parameters to be appended to - the compound URL. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param boolean requires_id: A boolean indicating whether resource ID + should be part of the requested URI. + :param str base_path: Base part of the URI for fetching resources, if + different from :data:`~openstack.resource.Resource.base_path`. + :param str error_message: An Error message to be returned if + requested object does not exist. + :param bool skip_cache: A boolean indicating whether optional API + cache should be skipped for this invocation. + :param str resource_response_key: Overrides the usage of + self.resource_key when processing the response body. + :param str microversion: API version to override the negotiated one. + :param dict params: Additional parameters that can be consumed. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_retrieve` is not set to ``True``. + :data:`Resource.allow_fetch` is not set to ``True``. + :raises: :exc:`~openstack.exceptions.NotFoundException` if + the resource was not found. """ - body = self.get_data_by_id(session, self.id, path_args=self, args=args, - include_headers=include_headers) - self._update_attrs_from_response(body, include_headers) - self._loaded = True + if not self.allow_fetch: + raise exceptions.MethodNotSupported(self, 'fetch') + + request = self._prepare_request( + requires_id=requires_id, + base_path=base_path, + ) + + session = self._get_session(session) + if microversion is None: + microversion = self._get_microversion(session) + self.microversion = microversion + + response = session.get( + request.url, + microversion=microversion, + params=params, + skip_cache=skip_cache, + ) + + self._translate_response( + response, + error_message=error_message, + resource_response_key=resource_response_key, + ) + return self - @classmethod - def head_data_by_id(cls, session, resource_id, path_args=None): - """Get a dictionary representing the headers of a remote resource. + def head( + self, + session: adapter.Adapter, + base_path: str | None = None, + *, + microversion: str | None = None, + ) -> ty_ext.Self: + """Get headers from a remote resource based on this instance. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param resource_id: This resource's identifier, if needed by - the request. - :param dict path_args: A dictionary of arguments to construct - a compound URL. - See `How path_args are used`_ for details. - - :return: A ``dict`` containing the headers. + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param str base_path: Base part of the URI for fetching resources, if + different from :data:`~openstack.resource.Resource.base_path`. + :param str microversion: API version to override the negotiated one. + + :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_head` is not set to ``True``. + :data:`Resource.allow_head` is not set to ``True``. + :raises: :exc:`~openstack.exceptions.NotFoundException` if the resource + was not found. """ - if not cls.allow_head: - raise exceptions.MethodNotSupported(cls, 'head') - - url = cls._get_url(path_args, resource_id) + if not self.allow_head: + raise exceptions.MethodNotSupported(self, 'head') - headers = {'Accept': ''} - resp = session.head(url, endpoint_filter=cls.service, headers=headers) + session = self._get_session(session) + if microversion is None: + microversion = self._get_microversion(session) + self.microversion = microversion - return {HEADERS: resp.headers} + request = self._prepare_request(base_path=base_path) + response = session.head(request.url, microversion=microversion) - @classmethod - def head_by_id(cls, session, resource_id, path_args=None): - """Get an object representing the headers of a remote resource. + self._translate_response(response, has_body=False) - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param resource_id: This resource's identifier, if needed by - the request. - :param dict path_args: A dictionary of arguments to construct - a compound URL. - See `How path_args are used`_ for details. - - :return: A :class:`Resource` representing the headers. - :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_head` is not set to ``True``. - """ - data = cls.head_data_by_id(session, resource_id, path_args=path_args) - return cls.existing(**data) + return self - def head(self, session): - """Get the remote resource headers associated with this instance. + @property + def requires_commit(self): + """Whether the next commit() call will do anything.""" + return ( + self._body.dirty or self._header.dirty or self.allow_empty_commit + ) + + def commit( + self, + session: adapter.Adapter, + prepend_key: bool = True, + has_body: bool = True, + retry_on_conflict: bool | None = None, + base_path: str | None = None, + *, + microversion: str | None = None, + **kwargs: ty.Any, + ) -> ty_ext.Self: + """Commit the state of the instance to the remote resource. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param prepend_key: A boolean indicating whether the resource_key + should be prepended in a resource update request. + Default to True. + :param bool retry_on_conflict: Whether to enable retries on HTTP + CONFLICT (409). Value of ``None`` leaves the `Adapter` defaults. + :param str base_path: Base part of the URI for modifying resources, if + different from :data:`~openstack.resource.Resource.base_path`. + :param str microversion: API version to override the negotiated one. + :param dict kwargs: Parameters that will be passed to + _prepare_request() :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_head` is not set to ``True``. + :data:`Resource.allow_commit` is not set to ``True``. """ - data = self.head_data_by_id(session, self.id, path_args=self) - self._attrs.update(data) - self._loaded = True - return self + if not self.allow_commit: + raise exceptions.MethodNotSupported(self, 'commit') + + # The id cannot be dirty for an commit + self._body._dirty.discard("id") + + # Only try to update if we actually have anything to commit. + if not self.requires_commit: + return self + + # Avoid providing patch unconditionally to avoid breaking subclasses + # without it. + if self.commit_jsonpatch: + kwargs['patch'] = True + + request = self._prepare_request( + prepend_key=prepend_key, + base_path=base_path, + **kwargs, + ) + if microversion is None: + microversion = self._get_microversion(session) + + return self._commit( + session, + request, + self.commit_method, + microversion, + has_body=has_body, + retry_on_conflict=retry_on_conflict, + ) + + def _commit( + self, + session, + request, + method, + microversion, + has_body=True, + retry_on_conflict=None, + ): + session = self._get_session(session) + + kwargs = {} + retriable_status_codes = set(session.retriable_status_codes or ()) + if retry_on_conflict: + kwargs['retriable_status_codes'] = retriable_status_codes | {409} + elif retry_on_conflict is not None and retriable_status_codes: + # The baremetal proxy defaults to retrying on conflict, allow + # overriding it via an explicit retry_on_conflict=False. + kwargs['retriable_status_codes'] = retriable_status_codes - {409} - @classmethod - def update_by_id(cls, session, resource_id, attrs, path_args=None): - """Update a remote resource with the given attributes. + try: + call = getattr(session, method.lower()) + except AttributeError: + raise exceptions.ResourceFailure( + f"Invalid commit method: {method}" + ) - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param resource_id: This resource's identifier, if needed by - the request. - :param dict attrs: The attributes to be sent in the body - of the request. - :param dict path_args: A dictionary of arguments to construct - a compound URL. - See `How path_args are used`_ for details. - - :return: A ``dict`` representing the response body. - :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_update` is not set to ``True``. - """ - if not cls.allow_update: - raise exceptions.MethodNotSupported(cls, 'update') + response = call( + request.url, + json=request.body, + headers=request.headers, + microversion=microversion, + **kwargs, + ) - # Convert attributes from Resource types into their ids. - attrs = cls.convert_ids(attrs) - if attrs and cls.id_attribute in attrs: - del attrs[cls.id_attribute] - headers = attrs.pop(HEADERS, None) + self.microversion = microversion - body = cls._get_create_body(attrs) + self._translate_response(response, has_body=has_body) - url = cls._get_url(path_args, resource_id) - args = {'json': body} - if headers: - args[HEADERS] = headers - if cls.patch_update: - resp = session.patch(url, endpoint_filter=cls.service, **args) - else: - resp = session.put(url, endpoint_filter=cls.service, **args) - resp_headers = resp.headers - resp = resp.json() + return self - if cls.resource_key and cls.resource_key in resp.keys(): - resp = resp[cls.resource_key] - if resp_headers: - resp[HEADERS] = resp_headers + def _convert_patch(self, patch): + if not isinstance(patch, list): + patch = [patch] - return resp + converted = [] + for item in patch: + try: + path = item['path'] + parts = path.lstrip('/').split('/', 1) + field = parts[0] + except (KeyError, IndexError): + raise ValueError(f"Malformed or missing path in {item}") + + try: + component = getattr(self.__class__, field) + except AttributeError: + server_field = field + else: + server_field = component.name + + if len(parts) > 1: + new_path = f'/{server_field}/{parts[1]}' + else: + new_path = f'/{server_field}' + converted.append(dict(item, path=new_path)) + + return converted - def update(self, session): - """Update the remote resource associated with this instance. + def patch( + self, + session, + patch=None, + prepend_key=True, + has_body=True, + retry_on_conflict=None, + base_path=None, + *, + microversion=None, + ): + """Patch the remote resource. + + Allows modifying the resource by providing a list of JSON patches to + apply to it. The patches can use both the original (server-side) and + SDK field names. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param patch: Additional JSON patch as a list or one patch item. + If provided, it is applied on top of any changes to the current + resource. + :param prepend_key: A boolean indicating whether the resource_key + should be prepended in a resource update request. Default to True. + :param bool retry_on_conflict: Whether to enable retries on HTTP + CONFLICT (409). Value of ``None`` leaves the `Adapter` defaults. + :param str base_path: Base part of the URI for modifying resources, if + different from :data:`~openstack.resource.Resource.base_path`. + :param str microversion: API version to override the negotiated one. :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_update` is not set to ``True``. + :data:`Resource.allow_patch` is not set to ``True``. """ - if not self.is_dirty: - return - - dirty_attrs = dict((k, self._attrs[k]) for k in self._dirty) - resp = self.update_by_id(session, self.id, dirty_attrs, path_args=self) - - try: - resp_id = resp.pop(self.id_attribute) - except KeyError: - pass - else: - assert resp_id == self.id - self._update_attrs_from_response(resp, include_headers=True) - self._reset_dirty() - return self - - @classmethod - def delete_by_id(cls, session, resource_id, path_args=None): - """Delete a remote resource with the given id. + if not self.allow_patch: + raise exceptions.MethodNotSupported(self, 'patch') + + # The id cannot be dirty for an commit + self._body._dirty.discard("id") + + # Only try to update if we actually have anything to commit. + if not patch and not self.requires_commit: + return self + + request = self._prepare_request( + prepend_key=prepend_key, + base_path=base_path, + patch=True, + ) + if microversion is None: + microversion = self._get_microversion(session) + if patch: + request.body += self._convert_patch(patch) + + return self._commit( + session, + request, + 'PATCH', + microversion, + has_body=has_body, + retry_on_conflict=retry_on_conflict, + ) + + def delete( + self, + session: adapter.Adapter, + error_message: str | None = None, + *, + microversion: str | None = None, + **kwargs: ty.Any, + ) -> ty_ext.Self: + """Delete the remote resource based on this instance. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param resource_id: This resource's identifier, if needed by - the request. - :param dict path_args: A dictionary of arguments to construct - a compound URL. - See `How path_args are used`_ for details. - - :return: ``None`` + :type session: :class:`~keystoneauth1.adapter.Adapter` + :param str microversion: API version to override the negotiated one. + :param dict kwargs: Parameters that will be passed to + _prepare_request() + + :return: This :class:`Resource` instance. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_delete` is not set to ``True``. + :data:`Resource.allow_commit` is not set to ``True``. + :raises: :exc:`~openstack.exceptions.NotFoundException` if + the resource was not found. """ - if not cls.allow_delete: - raise exceptions.MethodNotSupported(cls, 'delete') - url = cls._get_url(path_args, resource_id) - headers = {'Accept': ''} - session.delete(url, endpoint_filter=cls.service, headers=headers) + response = self._raw_delete( + session, microversion=microversion, **kwargs + ) + kwargs = {} + if error_message: + kwargs['error_message'] = error_message - def delete(self, session): - """Delete the remote resource associated with this instance. + self._translate_response( + response, has_body=False, error_message=error_message + ) + return self - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` + def _raw_delete(self, session, microversion=None, **kwargs): + if not self.allow_delete: + raise exceptions.MethodNotSupported(self, 'delete') - :return: ``None`` - :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_update` is not set to ``True``. - """ - self.delete_by_id(session, self.id, path_args=self) + request = self._prepare_request(**kwargs) + session = self._get_session(session) + if microversion is None: + microversion = self._get_microversion(session) + + return session.delete( + request.url, + headers=request.headers, + microversion=microversion, + ) @classmethod - def list(cls, session, path_args=None, paginated=False, params=None): + def list( + cls, + session: adapter.Adapter, + paginated: bool = True, + base_path: str | None = None, + allow_unknown_params: bool = False, + *, + microversion: str | None = None, + headers: dict[str, str] | None = None, + max_items: int | None = None, + **params: ty.Any, + ) -> ty.Generator[ty_ext.Self, None, None]: """This method is a generator which yields resource objects. This resource object list generator handles pagination and takes query params for response filtering. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param dict path_args: A dictionary of arguments to construct - a compound URL. - See `How path_args are used`_ for details. + :type session: :class:`~keystoneauth1.adapter.Adapter` :param bool paginated: ``True`` if a GET to this resource returns - a paginated series of responses, or ``False`` - if a GET returns only one page of data. - **When paginated is False only one - page of data will be returned regardless - of the API's support of pagination.** - :param dict params: Query parameters to be passed into the underlying - :meth:`~openstack.session.Session.get` method. - Values that the server may support include `limit` - and `marker`. + a paginated series of responses, or ``False`` if a GET returns only + one page of data. **When paginated is False only one page of data + will be returned regardless of the API's support of pagination.** + :param str base_path: Base part of the URI for listing resources, if + different from :data:`~openstack.resource.Resource.base_path`. + :param bool allow_unknown_params: ``True`` to accept, but discard + unknown query parameters. This allows getting list of 'filters' and + passing everything known to the server. ``False`` will result in + validation exception when unknown query parameters are passed. + :param str microversion: API version to override the negotiated one. + :param dict headers: Additional headers to inject into the HTTP + request. + :param int max_items: The maximum number of items to return. Typically + this must be used with ``paginated=True``. + :param dict params: These keyword arguments are passed through the + :meth:`~openstack.resource.QueryParamter._transpose` method + to find if any of them match expected query parameters to be sent + in the *params* argument to + :meth:`~keystoneauth1.adapter.Adapter.get`. They are additionally + checked against the :data:`~openstack.resource.Resource.base_path` + format string to see if any path fragments need to be filled in by + the contents of this argument. + Parameters supported as filters by the server side are passed in + the API call, remaining parameters are applied as filters to the + retrieved results. :return: A generator of :class:`Resource` objects. :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_list` is not set to ``True``. + :data:`Resource.allow_list` is not set to ``True``. + :raises: :exc:`~openstack.exceptions.InvalidResourceQuery` if query + contains invalid params. """ if not cls.allow_list: raise exceptions.MethodNotSupported(cls, 'list') - more_data = True - params = {} if params is None else params - url = cls._get_url(path_args) - headers = {'Accept': 'application/json'} - while more_data: - resp = session.get(url, endpoint_filter=cls.service, - headers=headers, params=params) - resp = resp.json() + session = cls._get_session(session) + + if microversion is None: + microversion = cls._get_microversion(session) + + if base_path is None: + base_path = cls.base_path + + api_filters = cls._query_mapping._validate( + params, + base_path=base_path, + allow_unknown_params=True, + ) + client_filters = {} + # Gather query parameters which are not supported by the server + for k, v in params.items(): + if ( + # Known attr + hasattr(cls, k) + # Is real attr property + and isinstance(getattr(cls, k), fields.Body) + # not included in the query_params + and k not in cls._query_mapping._mapping.keys() + ): + client_filters[k] = v + query_params = cls._query_mapping._transpose(api_filters, cls) + uri = base_path % params + uri_params = {} + + if max_items and not query_params.get('limit'): + # If a user requested max_items but not a limit, set limit to + # max_items on the assumption that if (a) the value is smaller than + # the maximum server allowed value for limit then we'll be able to + # do a single call to get everything, while (b) if the value is + # larger then the server will ignore the value (or rather use its + # own hardcoded limit) making this is a no-op. + # If a user requested both max_items and limit then we assume they + # know what they're doing. + query_params['limit'] = max_items + + limit = query_params.get('limit') + + for k, v in params.items(): + # We need to gather URI parts to set them on the resource later + if hasattr(cls, k) and isinstance(getattr(cls, k), fields.URI): + uri_params[k] = v + + def _dict_filter(f, d): + """Dict param based filtering""" + if not d: + return False + for key in f.keys(): + if isinstance(f[key], dict): + if not _dict_filter(f[key], d.get(key, None)): + return False + elif d.get(key, None) != f[key]: + return False + return True + + headers_final = {"Accept": "application/json"} + if headers: + headers_final = {**headers_final, **headers} + + # Track the total number of resources yielded so we can paginate + # swift objects + total_yielded = 0 + while uri: + # Copy query_params due to weird mock unittest interactions + response = session.get( + uri, + headers=headers_final, + params=query_params.copy(), + microversion=microversion, + ) + exceptions.raise_from_response(response) + data = response.json() + + # Discard any existing pagination keys + last_marker = query_params.pop('marker', None) + query_params.pop('limit', None) + if cls.resources_key: - resp = resp[cls.resources_key] - - if not resp: - more_data = False - - # Keep track of how many items we've yielded. If we yielded - # less than our limit, we don't need to do an extra request - # to get back an empty data set, which acts as a sentinel. - yielded = 0 - new_marker = None - for data in resp: - value = cls.existing(**data) - new_marker = value.id - yielded += 1 - yield value - - if not paginated: - return - if 'limit' in params and yielded < params['limit']: + resources = data[cls.resources_key] + else: + resources = data + + if not isinstance(resources, list): + resources = [resources] + + marker = None + for raw_resource in resources: + # We return as soon as we hit our limit, even if we have items + # remaining + if max_items and total_yielded >= max_items: + return + + # Do not allow keys called "self" through. Glance chose + # to name a key "self", so we need to pop it out because + # we can't send it through cls.existing and into the + # Resource initializer. "self" is already the first + # argument and is practically a reserved word. + raw_resource.pop("self", None) + # We want that URI props are available on the resource + raw_resource.update(uri_params) + + # TODO(stephenfin): Our types say we accept a ksa Adapter, but + # this requires an SDK Proxy. Do we update the types or rework + # this to support use of an adapter. + value = cls.existing( + microversion=microversion, + connection=session._get_connection(), # type: ignore + **raw_resource, + ) + marker = value.id + filters_matched = True + # Iterate over client filters and return only if matching + for key in client_filters.keys(): + if isinstance(client_filters[key], dict): + if not _dict_filter( + client_filters[key], value.get(key, None) + ): + filters_matched = False + break + elif value.get(key, None) != client_filters[key]: + filters_matched = False + break + + if filters_matched: + yield value + total_yielded += 1 + + if resources and paginated: + uri, next_params = cls._get_next_link( + uri, response, data, marker, limit, total_yielded + ) + try: + if next_params['marker'] == last_marker: + # If next page marker is same as what we were just + # asked something went terribly wrong. Some ancient + # services had bugs. + raise exceptions.SDKException( + 'Endless pagination loop detected, aborting' + ) + except KeyError: + # do nothing, exception handling is cheaper then "if" + pass + query_params.update(next_params) + else: return - params['limit'] = yielded - params['marker'] = new_marker @classmethod - def find(cls, session, name_or_id, path_args=None, ignore_missing=True): + def _get_next_link(cls, uri, response, data, marker, limit, total_yielded): + next_link = None + params: dict[str, str | list[str] | int] = {} + + if isinstance(data, dict): + pagination_key = cls.pagination_key + + if not pagination_key and 'links' in data: + # api-wg guidelines are for a links dict in the main body + pagination_key = 'links' + + if not pagination_key and cls.resources_key: + # Nova has a {key}_links dict in the main body + pagination_key = f'{cls.resources_key}_links' + + if pagination_key: + links = data.get(pagination_key, {}) + # keystone might return a dict + if isinstance(links, dict): + links = ({k: v} for k, v in links.items()) + + for item in links: + if item.get('rel') == 'next' and 'href' in item: + next_link = item['href'] + break + + # Glance has a next field in the main body + next_link = next_link or data.get('next') + if next_link and next_link.startswith('/v'): + next_link = next_link[next_link.find('/', 1) :] + + if not next_link and 'next' in response.links: + # RFC5988 specifies Link headers and requests parses them if they + # are there. We prefer link dicts in resource body, but if those + # aren't there and Link headers are, use them. + next_link = response.links['next']['uri'] + + # Swift provides a count of resources in a header and a list body + if not next_link and cls.pagination_key: + total_count = response.headers.get(cls.pagination_key) + if total_count: + total_count = int(total_count) + if total_count > total_yielded: + params['marker'] = marker + if limit: + params['limit'] = limit + next_link = uri + + # Parse params from Link (next page URL) into params. + # This prevents duplication of query parameters that with large + # number of pages result in HTTP 414 error eventually. + if next_link: + parts = urllib.parse.urlparse(next_link) + query_params = urllib.parse.parse_qs(parts.query) + params.update(query_params) + next_link = urllib.parse.urljoin(next_link, parts.path) + + # If we still have no link, and limit was given and is non-zero, + # and the number of records yielded equals the limit, then the user + # is playing pagination ball so we should go ahead and try once more. + if not next_link and limit: + next_link = uri + params['marker'] = marker + params['limit'] = limit + + return next_link, params + + @classmethod + def _get_one_match(cls, name_or_id, results): + """Given a list of results, return the match""" + the_result = None + for maybe_result in results: + id_value = cls._get_id(maybe_result) + name_value = maybe_result.name + + if (id_value == name_or_id) or (name_value == name_or_id): + # Only allow one resource to be found. If we already + # found a match, raise an exception to show it. + if the_result is None: + the_result = maybe_result + else: + msg = "More than one %s exists with the name '%s'." + msg = msg % (cls.__name__, name_or_id) + raise exceptions.DuplicateResource(msg) + + return the_result + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[True] = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: ty.Literal[False], + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self: ... + + # excuse the duplication here: it's mypy's fault + # https://github.com/python/mypy/issues/14764 + @ty.overload + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: ... + + @classmethod + def find( + cls, + session: adapter.Adapter, + name_or_id: str, + ignore_missing: bool = True, + list_base_path: str | None = None, + *, + microversion: str | None = None, + all_projects: bool | None = None, + **params: ty.Any, + ) -> ty_ext.Self | None: """Find a resource by its name or id. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` + :type session: :class:`~keystoneauth1.adapter.Adapter` :param name_or_id: This resource's identifier, if needed by - the request. The default is ``None``. - :param dict path_args: A dictionary of arguments to construct - a compound URL. - See `How path_args are used`_ for details. + the request. The default is ``None``. :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param str list_base_path: base_path to be used when need listing + resources. + :param str microversion: API version to override the negotiated one. + :param dict params: Any additional parameters to be passed into + underlying methods, such as to + :meth:`~openstack.resource.Resource.existing` in order to pass on + URI parameters. :return: The :class:`Resource` object matching the given name or id - or None if nothing matches. + or None if nothing matches. :raises: :class:`openstack.exceptions.DuplicateResource` if more - than one resource is found for this request. - :raises: :class:`openstack.exceptions.ResourceNotFound` if nothing - is found and ignore_missing is ``False``. + than one resource is found for this request. + :raises: :class:`openstack.exceptions.NotFoundException` if nothing is + found and ignore_missing is ``False``. """ - # Only return one matching resource. - def get_one_match(results, the_id, the_name): - the_result = None - for item in results: - maybe_result = cls.existing(**item) - - id_value, name_value = None, None - if the_id is not None: - id_value = getattr(maybe_result, the_id, None) - if the_name is not None: - name_value = getattr(maybe_result, the_name, None) - - if (id_value == name_or_id) or (name_value == name_or_id): - # Only allow one resource to be found. If we already - # found a match, raise an exception to show it. - if the_result is None: - the_result = maybe_result - else: - msg = "More than one %s exists with the name '%s'." - msg = (msg % (cls.get_resource_name(), name_or_id)) - raise exceptions.DuplicateResource(msg) - - return the_result + session = cls._get_session(session) # Try to short-circuit by looking directly for a matching ID. try: - if cls.allow_retrieve: - return cls.get_by_id(session, name_or_id, path_args=path_args) - except exceptions.NotFoundException: + # TODO(stephenfin): Our types say we accept a ksa Adapter, but this + # requires an SDK Proxy. Do we update the types or rework this to + # support use of an adapter. + match = cls.existing( + id=name_or_id, + connection=session._get_connection(), # type: ignore + **params, + ) + return match.fetch(session, microversion=microversion, **params) + except ( + exceptions.NotFoundException, + exceptions.BadRequestException, + exceptions.ForbiddenException, + ): + # NOTE(gtema): There are few places around openstack that return + # 400 if we try to GET resource and it doesn't exist. pass - data = cls.list(session, path_args=path_args) + if list_base_path: + params['base_path'] = list_base_path + + # all_projects is a special case that is used by multiple services. We + # handle it here since it doesn't make sense to pass it to the .fetch + # call above + if all_projects is not None: + params['all_projects'] = all_projects + + if ( + 'name' in cls._query_mapping._mapping.keys() + and 'name' not in params + ): + params['name'] = name_or_id + + data = cls.list(session, **params) - result = get_one_match(data, cls.id_attribute, cls.name_attribute) + result = cls._get_one_match(name_or_id, data) if result is not None: return result if ignore_missing: return None - raise exceptions.ResourceNotFound( - "No %s found for %s" % (cls.__name__, name_or_id)) + raise exceptions.NotFoundException( + f"No {cls.__name__} found for {name_or_id}" + ) -def wait_for_status(session, resource, status, failures, interval, wait): + +def _normalize_status(status: str | None) -> str | None: + if status is not None: + status = status.lower() + return status + + +def wait_for_status( + session: adapter.Adapter, + resource: ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, +) -> ResourceT: """Wait for the resource to be in a particular status. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` :param resource: The resource to wait on to reach the status. The resource - must have a status attribute. - :type resource: :class:`~openstack.resource.Resource` + must have a status attribute specified via ``attribute``. :param status: Desired status of the resource. - :param list failures: Statuses that would indicate the transition - failed such as 'ERROR'. - :param interval: Number of seconds to wait between checks. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. Set to ``None`` + to use the default interval. :param wait: Maximum number of seconds to wait for transition. - - :return: Method returns self on success. - :raises: :class:`~openstack.exceptions.ResourceTimeout` transition - to status failed to occur in wait seconds. - :raises: :class:`~openstack.exceptions.ResourceFailure` resource - transitioned to one of the failure states. - :raises: :class:`~AttributeError` if the resource does not have a status - attribute + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the transition + to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute """ - if resource.status == status: + current_status = getattr(resource, attribute) + if _normalize_status(current_status) == _normalize_status(status): return resource - total_sleep = 0 if failures is None: - failures = [] - - while total_sleep < wait: - resource.get(session) - if resource.status == status: + failures = ['ERROR'] + + failures = [f.lower() for f in failures] + name = f"{resource.__class__.__name__}:{resource.id}" + msg = f"Timeout waiting for {name} to transition to {status}" + + for count in utils.iterate_timeout( + timeout=wait, message=msg, wait=interval + ): + resource = resource.fetch(session, skip_cache=True) + if not resource: + raise exceptions.ResourceFailure( + f"{name} went away while waiting for {status}" + ) + + new_status = getattr(resource, attribute) + normalized_status = _normalize_status(new_status) + if normalized_status == _normalize_status(status): return resource - if resource.status in failures: - msg = ("Resource %s transitioned to failure state %s" % - (resource.id, resource.status)) - raise exceptions.ResourceFailure(msg) - time.sleep(interval) - total_sleep += interval - msg = "Timeout waiting for %s to transition to %s" % (resource.id, status) - raise exceptions.ResourceTimeout(msg) - - -def wait_for_delete(session, resource, interval, wait): - """Wait for the resource to be deleted. + elif normalized_status in failures: + raise exceptions.ResourceFailure( + f"{name} transitioned to failure state {new_status}" + ) + + LOG.debug( + 'Still waiting for resource %s to reach state %s, ' + 'current state is %s', + name, + status, + new_status, + ) + + if callback: + progress = getattr(resource, 'progress', None) or 0 + callback(progress) + + raise RuntimeError('cannot reach this') + + +def wait_for_delete( + session: adapter.Adapter, + resource: ResourceT, + interval: int | float | None = 2, + wait: int | None = None, + callback: ty.Callable[[int], None] | None = None, +) -> ResourceT: + """Wait for a resource to be deleted. :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` :param resource: The resource to wait on to be deleted. - :type resource: :class:`~openstack.resource.Resource` :param interval: Number of seconds to wait between checks. :param wait: Maximum number of seconds to wait for the delete. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. - :return: Method returns self on success. + :return: The original resource. :raises: :class:`~openstack.exceptions.ResourceTimeout` transition - to status failed to occur in wait seconds. + to status failed to occur in wait seconds. """ - total_sleep = 0 - while total_sleep < wait: + orig_resource = resource + for count in utils.iterate_timeout( + timeout=wait, + message=( + f"Timeout waiting for {resource.__class__.__name__}:{resource.id} " + f"to delete" + ), + wait=interval, + ): try: - resource.get(session) + resource = resource.fetch(session, skip_cache=True) + if not resource: + return orig_resource + # Some resources like VolumeAttachment don't have status field. + if hasattr(resource, 'status'): + if resource.status.lower() == 'deleted': + return resource except exceptions.NotFoundException: - return resource - time.sleep(interval) - total_sleep += interval - msg = "Timeout waiting for %s delete" % (resource.id) - raise exceptions.ResourceTimeout(msg) + return orig_resource + + if callback: + progress = getattr(resource, 'progress', None) or 0 + callback(progress) + + raise RuntimeError('cannot reach this') diff --git a/openstack/resource2.py b/openstack/resource2.py deleted file mode 100644 index 057fc27af2..0000000000 --- a/openstack/resource2.py +++ /dev/null @@ -1,891 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -The :class:`~openstack.resource.Resource` class is a base -class that represent a remote resource. The attributes that -comprise a request or response for this resource are specified -as class members on the Resource subclass where their values -are of a component type, including :class:`~openstack.resource2.Body`, -:class:`~openstack.resource2.Header`, and :class:`~openstack.resource2.URI`. - -For update management, :class:`~openstack.resource2.Resource` employs -a series of :class:`~openstack.resource2._ComponentManager` instances -to look after the attributes of that particular component type. This is -particularly useful for Body and Header types, so that only the values -necessary are sent in requests to the server. - -When making requests, each of the managers are looked at to gather the -necessary URI, body, and header data to build a request to be sent -via keystoneauth's sessions. Responses from keystoneauth are then -converted into this Resource class' appropriate components and types -and then returned to the caller. -""" - -import collections -import itertools -import time - -from openstack import exceptions -from openstack import format -from openstack import utils - - -class _BaseComponent(object): - - # The name this component is being tracked as in the Resource - key = None - - def __init__(self, name, type=None, default=None, alternate_id=False): - """A typed descriptor for a component that makes up a Resource - - :param name: The name this component exists as on the server - :param type: The type this component is expected to be by the server. - By default this is None, meaning any value you specify - will work. If you specify type=dict and then set a - component to a string, __set__ will fail, for example. - :param default: Typically None, but any other default can be set. - :param alternate_id: When `True`, this property is known - internally as a value that can be sent - with requests that require an ID but - when `id` is not a name the Resource has. - This is a relatively uncommon case, and this - setting should only be used once per Resource. - """ - self.name = name - self.type = type - self.default = default - self.alternate_id = alternate_id - - def __get__(self, instance, owner): - if instance is None: - return None - - attributes = getattr(instance, self.key) - - try: - value = attributes[self.name] - except KeyError: - return self.default - - # self.type() should not be called on None objects. - if value is None: - return None - - if self.type and not isinstance(value, self.type): - if issubclass(self.type, format.Formatter): - value = self.type.deserialize(value) - else: - value = self.type(value) - - return value - - def __set__(self, instance, value): - if (self.type and not isinstance(value, self.type) and - value != self.default): - if issubclass(self.type, format.Formatter): - value = self.type.serialize(value) - else: - value = str(self.type(value)) # validate to fail fast - - attributes = getattr(instance, self.key) - attributes[self.name] = value - - def __delete__(self, instance): - try: - attributes = getattr(instance, self.key) - del attributes[self.name] - except KeyError: - pass - - -class Body(_BaseComponent): - """Body attributes""" - - key = "_body" - - -class Header(_BaseComponent): - """Header attributes""" - - key = "_header" - - -class URI(_BaseComponent): - """URI attributes""" - - key = "_uri" - - -class _ComponentManager(collections.MutableMapping): - """Storage of a component type""" - - def __init__(self, attributes=None, synchronized=False): - self.attributes = dict() if attributes is None else attributes.copy() - self._dirty = set() if synchronized else set(self.attributes.keys()) - - def __getitem__(self, key): - return self.attributes[key] - - def __setitem__(self, key, value): - try: - orig = self.attributes[key] - except KeyError: - changed = True - else: - changed = orig != value - - if changed: - self.attributes[key] = value - self._dirty.add(key) - - def __delitem__(self, key): - del self.attributes[key] - self._dirty.add(key) - - def __iter__(self): - return iter(self.attributes) - - def __len__(self): - return len(self.attributes) - - @property - def dirty(self): - """Return a dict of modified attributes""" - return dict((key, self.attributes.get(key, None)) - for key in self._dirty) - - def clean(self): - """Signal that the resource no longer has modified attributes""" - self._dirty = set() - - -class _Request(object): - """Prepared components that go into a KSA request""" - - def __init__(self, uri, body, headers): - self.uri = uri - self.body = body - self.headers = headers - - -class QueryParameters(object): - - def __init__(self, *names, **mappings): - """Create a dict of accepted query parameters - - :param names: List of strings containing client-side query parameter - names. Each name in the list maps directly to the name - expected by the server. - - :param mappings: Key-value pairs where the key is the client-side - name we'll accept here and the value is the name - the server expects, e.g, changes_since=changes-since - - By default, both limit and marker are included in the initial mapping - as they're the most common query parameters used for listing resources. - """ - self._mapping = {"limit": "limit", "marker": "marker"} - self._mapping.update(dict({name: name for name in names}, **mappings)) - - def _transpose(self, query): - """Transpose the keys in query based on the mapping - - :param dict query: Collection of key-value pairs where each key is the - client-side parameter name to be transposed to its - server side name. - """ - result = {} - for key, value in self._mapping.items(): - if key in query: - result[value] = query[key] - return result - - -class Resource(object): - - #: Singular form of key for resource. - resource_key = None - #: Plural form of key for resource. - resources_key = None - - #: The ID of this resource. - id = Body("id") - #: The name of this resource. - name = Body("name") - #: The location of this resource. - location = Header("Location") - - #: Mapping of accepted query parameter names. - _query_mapping = QueryParameters() - - #: The base part of the URI for this resource. - base_path = "" - - #: The service associated with this resource to find the service URL. - service = None - - #: Allow create operation for this resource. - allow_create = False - #: Allow get operation for this resource. - allow_get = False - #: Allow update operation for this resource. - allow_update = False - #: Allow delete operation for this resource. - allow_delete = False - #: Allow list operation for this resource. - allow_list = False - #: Allow head operation for this resource. - allow_head = False - #: Use PATCH for update operations on this resource. - patch_update = False - #: Use PUT for create operations on this resource. - put_create = False - - def __init__(self, _synchronized=False, **attrs): - """The base resource - - :param bool _synchronized: This is not intended to be used directly. - See :meth:`~openstack.resource2.Resource.new` and - :meth:`~openstack.resource2.Resource.existing`. - """ - - # NOTE: _collect_attrs modifies **attrs in place, removing - # items as they match up with any of the body, header, - # or uri mappings. - body, header, uri = self._collect_attrs(attrs) - # TODO(briancurtin): at this point if attrs has anything left - # they're not being set anywhere. Log this? Raise exception? - # How strict should we be here? Should strict be an option? - - self._body = _ComponentManager(attributes=body, - synchronized=_synchronized) - self._header = _ComponentManager(attributes=header, - synchronized=_synchronized) - self._uri = _ComponentManager(attributes=uri, - synchronized=_synchronized) - - def __repr__(self): - pairs = ["%s=%s" % (k, v) for k, v in dict(itertools.chain( - self._body.attributes.items(), - self._header.attributes.items(), - self._uri.attributes.items())).items()] - args = ", ".join(pairs) - - return "%s.%s(%s)" % ( - self.__module__, self.__class__.__name__, args) - - def __eq__(self, comparand): - """Return True if another resource has the same contents""" - return all([self._body.attributes == comparand._body.attributes, - self._header.attributes == comparand._header.attributes, - self._uri.attributes == comparand._uri.attributes]) - - def __getattribute__(self, name): - """Return an attribute on this instance - - This is mostly a pass-through except for a specialization on - the 'id' name, as this can exist under a different name via the - `alternate_id` argument to resource.Body. - """ - if name == "id": - if name in self._body: - return self._body[name] - else: - try: - return self._body[self._alternate_id()] - except KeyError: - return None - else: - return object.__getattribute__(self, name) - - def _update(self, **attrs): - """Given attributes, update them on this instance - - This is intended to be used from within the proxy - layer when updating instances that may have already - been created. - """ - body, header, uri = self._collect_attrs(attrs) - - self._body.update(body) - self._header.update(header) - self._uri.update(uri) - - def _collect_attrs(self, attrs): - """Given attributes, return a dict per type of attribute - - This method splits up **attrs into separate dictionaries - that correspond to the relevant body, header, and uri - attributes that exist on this class. - """ - body = self._consume_attrs(self._body_mapping(), attrs) - header = self._consume_attrs(self._header_mapping(), attrs) - uri = self._consume_attrs(self._uri_mapping(), attrs) - - return body, header, uri - - def _consume_attrs(self, mapping, attrs): - """Given a mapping and attributes, return relevant matches - - This method finds keys in attrs that exist in the mapping, then - both transposes them to their server-side equivalent key name - to be returned, and finally pops them out of attrs. This allows - us to only calculate their place and existence in a particular - type of Resource component one time, rather than looking at the - same source dict several times. - """ - relevant_attrs = {} - consumed_keys = [] - for key in attrs: - if key in mapping: - # Convert client-side key names into server-side. - relevant_attrs[mapping[key]] = attrs[key] - consumed_keys.append(key) - elif key in mapping.values(): - # Server-side names can be stored directly. - relevant_attrs[key] = attrs[key] - consumed_keys.append(key) - - for key in consumed_keys: - attrs.pop(key) - - return relevant_attrs - - @classmethod - def _get_mapping(cls, component): - """Return a dict of attributes of a given component on the class""" - mapping = {} - # Since we're looking at class definitions we need to include - # subclasses, so check the whole MRO. - for klass in cls.__mro__: - for key, value in klass.__dict__.items(): - if isinstance(value, component): - # Make sure base classes don't end up overwriting - # mappings we've found previously in subclasses. - if key not in mapping: - mapping[key] = value.name - return mapping - - @classmethod - def _body_mapping(cls): - """Return all Body members of this class""" - return cls._get_mapping(Body) - - @classmethod - def _header_mapping(cls): - """Return all Header members of this class""" - return cls._get_mapping(Header) - - @classmethod - def _uri_mapping(cls): - """Return all URI members of this class""" - return cls._get_mapping(URI) - - @classmethod - def _alternate_id(cls): - """Return the name of any value known as an alternate_id - - NOTE: This will only ever return the first such alternate_id. - Only one alternate_id should be specified. - - Returns an empty string if no name exists, as this method is - consumed by _get_id and passed to getattr. - """ - for value in cls.__dict__.values(): - if isinstance(value, Body): - if value.alternate_id: - return value.name - return "" - - @staticmethod - def _get_id(value): - """If a value is a Resource, return the canonical ID - - This will return either the value specified by `id` or - `alternate_id` in that order if `value` is a Resource. - If `value` is anything other than a Resource, likely to - be a string already representing an ID, it is returned. - """ - if isinstance(value, Resource): - return value.id - else: - return value - - @classmethod - def new(cls, **kwargs): - """Create a new instance of this resource. - - When creating the instance set the ``_synchronized`` parameter - of :class:`Resource` to ``False`` to indicate that the resource does - not yet exist on the server side. This marks all attributes passed - in ``**kwargs`` as "dirty" on the resource, and thusly tracked - as necessary in subsequent calls such as :meth:`update`. - - :param dict kwargs: Each of the named arguments will be set as - attributes on the resulting Resource object. - """ - return cls(_synchronized=False, **kwargs) - - @classmethod - def existing(cls, **kwargs): - """Create an instance of an existing remote resource. - - When creating the instance set the ``_synchronized`` parameter - of :class:`Resource` to ``True`` to indicate that it represents the - state of an existing server-side resource. As such, all attributes - passed in ``**kwargs`` are considered "clean", such that an immediate - :meth:`update` call would not generate a body of attributes to be - modified on the server. - - :param dict kwargs: Each of the named arguments will be set as - attributes on the resulting Resource object. - """ - return cls(_synchronized=True, **kwargs) - - def to_dict(self, body=True, headers=True, ignore_none=False): - """Return a dictionary of this resource's contents - - :param bool body: Include the :class:`~openstack.resource2.Body` - attributes in the returned dictionary. - :param bool headers: Include the :class:`~openstack.resource2.Header` - attributes in the returned dictionary. - :param bool ignore_none: When True, exclude key/value pairs where - the value is None. This will exclude - attributes that the server hasn't returned. - - :return: A dictionary of key/value pairs where keys are named - as they exist as attributes of this class. - """ - mapping = {} - - components = [] - if body: - components.append(Body) - if headers: - components.append(Header) - if not components: - raise ValueError( - "At least one of `body` or `headers` must be True") - - # isinstance stricly requires this to be a tuple - components = tuple(components) - - # NOTE: This is similar to the implementation in _get_mapping - # but is slightly different in that we're looking at an instance - # and we're mapping names on this class to their actual stored - # values. - # Since we're looking at class definitions we need to include - # subclasses, so check the whole MRO. - for klass in self.__class__.__mro__: - for key, value in klass.__dict__.items(): - if isinstance(value, components): - # Make sure base classes don't end up overwriting - # mappings we've found previously in subclasses. - if key not in mapping: - value = getattr(self, key, None) - if ignore_none and value is None: - continue - mapping[key] = value - - return mapping - - def _prepare_request(self, requires_id=True, prepend_key=False): - """Prepare a request to be sent to the server - - Create operations don't require an ID, but all others do, - so only try to append an ID when it's needed with - requires_id. Create and update operations sometimes require - their bodies to be contained within an dict -- if the - instance contains a resource_key and prepend_key=True, - the body will be wrapped in a dict with that key. - - Return a _Request object that contains the constructed URI - as well a body and headers that are ready to send. - Only dirty body and header contents will be returned. - """ - body = self._body.dirty - if prepend_key and self.resource_key is not None: - body = {self.resource_key: body} - - headers = self._header.dirty - - uri = self.base_path % self._uri.attributes - if requires_id: - id = self._get_id(self) - if id is None: - raise exceptions.InvalidRequest( - "Request requires an ID but none was found") - - uri = utils.urljoin(uri, id) - - return _Request(uri, body, headers) - - def _filter_component(self, component, mapping): - """Filter the keys in component based on a mapping - - This method converts a dict of server-side data to contain - only the appropriate keys for attributes on this instance. - """ - return {k: v for k, v in component.items() if k in mapping.values()} - - def _translate_response(self, response, has_body=True): - """Given a KSA response, inflate this instance with its data - - DELETE operations don't return a body, so only try to work - with a body when has_body is True. - - This method updates attributes that correspond to headers - and body on this instance and clears the dirty set. - """ - if has_body: - body = response.json() - if self.resource_key and self.resource_key in body: - body = body[self.resource_key] - - body = self._filter_component(body, self._body_mapping()) - self._body.attributes.update(body) - self._body.clean() - - headers = self._filter_component(response.headers, - self._header_mapping()) - self._header.attributes.update(headers) - self._header.clean() - - def create(self, session, prepend_key=True): - """Create a remote resource based on this instance. - - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param prepend_key: A boolean indicating whether the resource_key - should be prepended in a resource creation - request. Default to True. - - :return: This :class:`Resource` instance. - :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_create` is not set to ``True``. - """ - if not self.allow_create: - raise exceptions.MethodNotSupported(self, "create") - - if self.put_create: - request = self._prepare_request(requires_id=True, - prepend_key=prepend_key) - response = session.put(request.uri, endpoint_filter=self.service, - json=request.body, headers=request.headers) - else: - request = self._prepare_request(requires_id=False, - prepend_key=prepend_key) - response = session.post(request.uri, endpoint_filter=self.service, - json=request.body, headers=request.headers) - - self._translate_response(response) - return self - - def get(self, session, requires_id=True): - """Get a remote resource based on this instance. - - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param boolean requires_id: A boolean indicating whether resource ID - should be part of the requested URI. - :return: This :class:`Resource` instance. - :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_get` is not set to ``True``. - """ - if not self.allow_get: - raise exceptions.MethodNotSupported(self, "get") - - request = self._prepare_request(requires_id=requires_id) - response = session.get(request.uri, endpoint_filter=self.service) - - self._translate_response(response) - return self - - def head(self, session): - """Get headers from a remote resource based on this instance. - - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - - :return: This :class:`Resource` instance. - :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_head` is not set to ``True``. - """ - if not self.allow_head: - raise exceptions.MethodNotSupported(self, "head") - - request = self._prepare_request() - - response = session.head(request.uri, endpoint_filter=self.service, - headers={"Accept": ""}) - - self._translate_response(response) - return self - - def update(self, session, prepend_key=True, has_body=True): - """Update the remote resource based on this instance. - - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param prepend_key: A boolean indicating whether the resource_key - should be prepended in a resource update request. - Default to True. - - :return: This :class:`Resource` instance. - :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_update` is not set to ``True``. - """ - # Only try to update if we actually have anything to update. - if not any([self._body.dirty, self._header.dirty]): - return self - - if not self.allow_update: - raise exceptions.MethodNotSupported(self, "update") - - request = self._prepare_request(prepend_key=prepend_key) - - if self.patch_update: - response = session.patch(request.uri, endpoint_filter=self.service, - json=request.body, - headers=request.headers) - else: - response = session.put(request.uri, endpoint_filter=self.service, - json=request.body, headers=request.headers) - - self._translate_response(response, has_body=has_body) - return self - - def delete(self, session): - """Delete the remote resource based on this instance. - - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - - :return: This :class:`Resource` instance. - :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_update` is not set to ``True``. - """ - if not self.allow_delete: - raise exceptions.MethodNotSupported(self, "delete") - - request = self._prepare_request() - - response = session.delete(request.uri, endpoint_filter=self.service, - headers={"Accept": ""}) - - self._translate_response(response, has_body=False) - return self - - @classmethod - def list(cls, session, paginated=False, **params): - """This method is a generator which yields resource objects. - - This resource object list generator handles pagination and takes query - params for response filtering. - - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param bool paginated: ``True`` if a GET to this resource returns - a paginated series of responses, or ``False`` - if a GET returns only one page of data. - **When paginated is False only one - page of data will be returned regardless - of the API's support of pagination.** - :param dict params: These keyword arguments are passed through the - :meth:`~openstack.resource2.QueryParamter._transpose` method - to find if any of them match expected query parameters to be - sent in the *params* argument to - :meth:`~openstack.session.Session.get`. They are additionally - checked against the - :data:`~openstack.resource2.Resource.base_path` format string - to see if any path fragments need to be filled in by the contents - of this argument. - - :return: A generator of :class:`Resource` objects. - :raises: :exc:`~openstack.exceptions.MethodNotSupported` if - :data:`Resource.allow_list` is not set to ``True``. - """ - if not cls.allow_list: - raise exceptions.MethodNotSupported(cls, "list") - - more_data = True - query_params = cls._query_mapping._transpose(params) - uri = cls.base_path % params - - while more_data: - resp = session.get(uri, endpoint_filter=cls.service, - headers={"Accept": "application/json"}, - params=query_params) - resp = resp.json() - if cls.resources_key: - resp = resp[cls.resources_key] - - if not resp: - more_data = False - - # Keep track of how many items we've yielded. If we yielded - # less than our limit, we don't need to do an extra request - # to get back an empty data set, which acts as a sentinel. - yielded = 0 - new_marker = None - for data in resp: - # Do not allow keys called "self" through. Glance chose - # to name a key "self", so we need to pop it out because - # we can't send it through cls.existing and into the - # Resource initializer. "self" is already the first - # argument and is practically a reserved word. - data.pop("self", None) - - value = cls.existing(**data) - new_marker = value.id - yielded += 1 - yield value - - if not paginated: - return - if "limit" in query_params and yielded < query_params["limit"]: - return - query_params["limit"] = yielded - query_params["marker"] = new_marker - - @classmethod - def _get_one_match(cls, name_or_id, results): - """Given a list of results, return the match""" - the_result = None - for maybe_result in results: - id_value = cls._get_id(maybe_result) - name_value = maybe_result.name - - if (id_value == name_or_id) or (name_value == name_or_id): - # Only allow one resource to be found. If we already - # found a match, raise an exception to show it. - if the_result is None: - the_result = maybe_result - else: - msg = "More than one %s exists with the name '%s'." - msg = (msg % (cls.__name__, name_or_id)) - raise exceptions.DuplicateResource(msg) - - return the_result - - @classmethod - def find(cls, session, name_or_id, ignore_missing=True, **params): - """Find a resource by its name or id. - - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param name_or_id: This resource's identifier, if needed by - the request. The default is ``None``. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :param dict params: Any additional parameters to be passed into - underlying methods, such as to - :meth:`~openstack.resource2.Resource.existing` - in order to pass on URI parameters. - - :return: The :class:`Resource` object matching the given name or id - or None if nothing matches. - :raises: :class:`openstack.exceptions.DuplicateResource` if more - than one resource is found for this request. - :raises: :class:`openstack.exceptions.ResourceNotFound` if nothing - is found and ignore_missing is ``False``. - """ - # Try to short-circuit by looking directly for a matching ID. - try: - match = cls.existing(id=name_or_id, **params) - return match.get(session) - except exceptions.NotFoundException: - pass - - data = cls.list(session, **params) - - result = cls._get_one_match(name_or_id, data) - if result is not None: - return result - - if ignore_missing: - return None - raise exceptions.ResourceNotFound( - "No %s found for %s" % (cls.__name__, name_or_id)) - - -def wait_for_status(session, resource, status, failures, interval, wait): - """Wait for the resource to be in a particular status. - - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param resource: The resource to wait on to reach the status. The resource - must have a status attribute. - :type resource: :class:`~openstack.resource.Resource` - :param status: Desired status of the resource. - :param list failures: Statuses that would indicate the transition - failed such as 'ERROR'. - :param interval: Number of seconds to wait between checks. - :param wait: Maximum number of seconds to wait for transition. - - :return: Method returns self on success. - :raises: :class:`~openstack.exceptions.ResourceTimeout` transition - to status failed to occur in wait seconds. - :raises: :class:`~openstack.exceptions.ResourceFailure` resource - transitioned to one of the failure states. - :raises: :class:`~AttributeError` if the resource does not have a status - attribute - """ - if resource.status == status: - return resource - - total_sleep = 0 - if failures is None: - failures = [] - - while total_sleep < wait: - resource.get(session) - if resource.status == status: - return resource - if resource.status in failures: - msg = ("Resource %s transitioned to failure state %s" % - (resource.id, resource.status)) - raise exceptions.ResourceFailure(msg) - time.sleep(interval) - total_sleep += interval - msg = "Timeout waiting for %s to transition to %s" % (resource.id, status) - raise exceptions.ResourceTimeout(msg) - - -def wait_for_delete(session, resource, interval, wait): - """Wait for the resource to be deleted. - - :param session: The session to use for making this request. - :type session: :class:`~openstack.session.Session` - :param resource: The resource to wait on to be deleted. - :type resource: :class:`~openstack.resource.Resource` - :param interval: Number of seconds to wait between checks. - :param wait: Maximum number of seconds to wait for the delete. - - :return: Method returns self on success. - :raises: :class:`~openstack.exceptions.ResourceTimeout` transition - to status failed to occur in wait seconds. - """ - total_sleep = 0 - while total_sleep < wait: - try: - resource.get(session) - except exceptions.NotFoundException: - return resource - time.sleep(interval) - total_sleep += interval - msg = "Timeout waiting for %s delete" % (resource.id) - raise exceptions.ResourceTimeout(msg) diff --git a/openstack/service_description.py b/openstack/service_description.py new file mode 100644 index 0000000000..51a2402f93 --- /dev/null +++ b/openstack/service_description.py @@ -0,0 +1,408 @@ +# Copyright 2018 Red Hat, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty +import warnings + +import os_service_types + +from openstack import _log +from openstack import exceptions +from openstack import proxy as proxy_mod +from openstack import warnings as os_warnings + +__all__ = [ + 'ServiceDescription', +] + +if ty.TYPE_CHECKING: + from openstack import connection + +_logger = _log.setup_logging('openstack') +_service_type_manager = os_service_types.ServiceTypes() + + +class _ServiceDisabledProxyShim: + def __init__(self, service_type: str, reason: str | None) -> None: + self.service_type = service_type + self.reason = reason + + def __getattr__(self, item: ty.Any) -> ty.Any: + raise exceptions.ServiceDisabledException( + "Service '{service_type}' is disabled because its configuration " + "could not be loaded. {reason}".format( + service_type=self.service_type, reason=self.reason or '' + ) + ) + + +class ServiceDescription(ty.Generic[proxy_mod.ProxyT]): + #: Dictionary of supported versions and proxy classes for that version + supported_versions: dict[str, type[proxy_mod.Proxy]] = {} + #: main service_type to use to find this service in the catalog + service_type: str + #: list of aliases this service might be registered as + aliases: list[str] = [] + + def __init__( + self, + service_type: str, + supported_versions: dict[str, type[proxy_mod.Proxy]] | None = None, + aliases: list[str] | None = None, + ): + """Class describing how to interact with a REST service. + + Each service in an OpenStack cloud needs to be found by looking + for it in the catalog. Once the endpoint is found, REST calls can + be made, but a Proxy class and some Resource objects are needed + to provide an object interface. + + Instances of ServiceDescription can be passed to + `openstack.connection.Connection.add_service`, or a list can be + passed to the `openstack.connection.Connection` constructor in + the ``extra_services`` argument. + + All three parameters can be provided at instantation time, or + a service-specific subclass can be used that sets the attributes + directly. + + :param service_type: service_type to look for in the keystone catalog + :param aliases: Optional list of aliases, if there is more than one + name that might be used to register the service in the catalog. + """ + self.service_type = service_type or self.service_type + self.supported_versions = ( + supported_versions or self.supported_versions or {} + ) + + self.aliases = aliases or self.aliases + self.all_types = [service_type, *self.aliases] + + @ty.overload + def __get__(self, instance: None, owner: None) -> 'ServiceDescription': ... + + # NOTE(stephenfin): We would like to type instance as + # connection.Connection, but due to how we construct that object, we can't + # do so yet. + @ty.overload + def __get__( + self, + instance: ty.Any, + owner: type[object], + ) -> proxy_mod.ProxyT: ... + + def __get__( + self, + instance: ty.Any, + owner: type[object] | None, + ) -> 'ServiceDescription | proxy_mod.ProxyT': + if instance is None: + return self + + if self.service_type in instance._proxies: + return ty.cast( + proxy_mod.ProxyT, instance._proxies[self.service_type] + ) + + proxy = self._make_proxy(instance) + + if isinstance(proxy, _ServiceDisabledProxyShim): + instance._proxies[self.service_type] = proxy + return ty.cast( + proxy_mod.ProxyT, + instance._proxies[self.service_type], + ) + + # The keystone proxy has a method called get_endpoint + # that is about managing keystone endpoints. This is + # unfortunate. + try: + endpoint = proxy_mod.Proxy.get_endpoint(proxy) + except IndexError: + # It's best not to look to closely here. This is + # to support old placement. + # There was a time when it had no status entry + # in its version discovery doc (OY) In this case, + # no endpoints get through version discovery + # filtering. In order to deal with that, catch + # the IndexError thrown by keystoneauth and + # set an endpoint_override for the user to the + # url in the catalog and try again. + self._set_override_from_catalog(instance.config) + proxy = self._make_proxy(instance) + endpoint = proxy_mod.Proxy.get_endpoint(proxy) + + if instance._strict_proxies: + self._validate_proxy(proxy, endpoint) + + proxy._connection = instance + + instance._proxies[self.service_type] = proxy + return instance._proxies[self.service_type] + + def _set_override_from_catalog(self, config): + override = config._get_endpoint_from_catalog( + self.service_type, + proxy_mod.Proxy, + ) + config.set_service_value( + 'endpoint_override', + self.service_type, + override, + ) + + def _validate_proxy(self, proxy, endpoint): + exc = None + service_url = getattr(proxy, 'skip_discovery', None) + try: + # Don't go too wild for e.g. swift + if service_url is None: + service_url = proxy.get_endpoint_data().service_url + except Exception as e: + exc = e + if exc or not endpoint or not service_url: + raise exceptions.ServiceDiscoveryException( + "Failed to create a working proxy for service {service_type}: " + "{message}".format( + service_type=self.service_type, + message=exc or "No valid endpoint was discoverable.", + ) + ) + + def _make_proxy( + self, + instance: 'connection.Connection', + ) -> proxy_mod.ProxyT | proxy_mod.Proxy: + """Create a Proxy for the service in question. + + :param instance: The `openstack.connection.Connection` we're working + with. + """ + config = instance.config + + # This is not a valid service. + if not config.has_service(self.service_type): + # NOTE(stephenfin): Yes, we are lying here. But that's okay: they + # should behave identically in a typing context + return ty.cast( + proxy_mod.ProxyT, + _ServiceDisabledProxyShim( + self.service_type, + config.get_disabled_reason(self.service_type), + ), + ) + + # This is a valid service type, but we don't know anything about it so + # the user is explicitly just using us for a passthrough REST adapter. + # Skip all the lower logic. + if not self.supported_versions: + temp_client = config.get_session_client( + self.service_type, + allow_version_hack=True, + ) + return temp_client + + # Check to see if we've got config that matches what we understand in + # the SDK. + version_string = config.get_api_version(self.service_type) + endpoint_override = config.get_endpoint(self.service_type) + + # If the user doesn't give a version in config, but we only support + # one version, then just use that version. + if not version_string and len(self.supported_versions) == 1: + version_string = next(iter(self.supported_versions)) + + proxy_obj = None + if endpoint_override and version_string: + # Both endpoint override and version_string are set. We therefore + # don't need to do discovery: just trust the user. + proxy_class = self.supported_versions.get(version_string[0]) + if proxy_class: + proxy_obj = config.get_session_client( + self.service_type, + constructor=proxy_class, + ) + else: + warnings.warn( + f"The configured version, {version_string} for service " + f"{self.service_type} is not known or supported by " + f"openstacksdk. The resulting Proxy object will only " + f"have direct passthrough REST capabilities.", + category=os_warnings.UnsupportedServiceVersion, + ) + elif endpoint_override: + temp_adapter = config.get_session_client(self.service_type) + endpoint_data = temp_adapter.get_endpoint_data() + if not endpoint_data: + raise exceptions.ServiceDiscoveryException( + f"Failed to create a working proxy for service " + f"{self.service_type}: No endpoint data found." + ) + + api_version = endpoint_data.api_version + if not api_version: + raise exceptions.ServiceDiscoveryException( + f"Failed to create a working proxy for service " + f"{self.service_type}: No version in endpoint data." + ) + + proxy_class = self.supported_versions.get(str(api_version[0])) + if proxy_class: + proxy_obj = config.get_session_client( + self.service_type, + constructor=proxy_class, + ) + else: + warnings.warn( + f"Service {self.service_type} has an endpoint override " + f"set but the version discovered at that endpoint, " + f"{api_version}, is not supported by openstacksdk. " + f"The resulting Proxy object will only have direct " + f"passthrough REST capabilities.", + category=os_warnings.UnsupportedServiceVersion, + ) + + if proxy_obj: + if getattr(proxy_obj, 'skip_discovery', False): + # Some services, like swift, don't have discovery. While + # keystoneauth will behave correctly and handle such + # scenarios, it's not super efficient as it involves trying + # and falling back a few times. + return proxy_obj + + data = proxy_obj.get_endpoint_data() + if not data: + if instance._strict_proxies: + raise exceptions.ServiceDiscoveryException( + f"Failed to create a working proxy for service " + f"{self.service_type}: No endpoint data found." + ) + else: + return proxy_obj + + # If we've gotten here with a proxy object it means we have + # an endpoint_override in place. If the catalog_url and + # service_url don't match, which can happen if there is a + # None plugin and auth.endpoint like with standalone ironic, + # we need to be explicit that this service has an endpoint_override + # so that subsequent discovery calls don't get made incorrectly. + if data.catalog_url != data.service_url: + ep_key = '{service_type}_endpoint_override'.format( + service_type=self.service_type.replace('-', '_') + ) + config.config[ep_key] = data.service_url + proxy_obj = config.get_session_client( + self.service_type, + constructor=proxy_class, + ) + + return proxy_obj + + # Make an adapter to let discovery take over + supported_versions = sorted([int(f) for f in self.supported_versions]) + if version_string: + if getattr( + self.supported_versions[str(supported_versions[0])], + 'skip_discovery', + False, + ): + # Requested service does not support version discovery + # In this case it is more efficient to set the + # endpoint_override to the current catalog endpoint value, + # otherwise next request will try to perform discovery. + + temp_adapter = config.get_session_client(self.service_type) + ep_override = temp_adapter.get_endpoint(skip_discovery=True) + + ep_key = '{service_type}_endpoint_override'.format( + service_type=self.service_type.replace('-', '_') + ) + config.config[ep_key] = ep_override + + return config.get_session_client( + self.service_type, + version=version_string, + constructor=self.supported_versions[ + str(supported_versions[0]) + ], + allow_version_hack=True, + ) + + temp_adapter = config.get_session_client( + self.service_type, + allow_version_hack=True, + version=version_string, + ) + else: + temp_adapter = config.get_session_client( + self.service_type, + allow_version_hack=True, + max_version=f'{supported_versions[-1]!s}.latest', + min_version=f'{supported_versions[0]!s}', + ) + + found_version = temp_adapter.get_api_major_version() + if found_version is None: + region_name = instance.config.get_region_name(self.service_type) + raise exceptions.NotSupported( + f"The {self.service_type} service for " + f"{instance.name}:{region_name} exists but does not have " + f"any supported versions." + ) + + proxy_class = self.supported_versions.get(str(found_version[0])) + if proxy_class: + if version_string: + return config.get_session_client( + self.service_type, + constructor=proxy_class, + allow_version_hack=True, + version=version_string, + ) + + return config.get_session_client( + self.service_type, + version=version_string, + constructor=proxy_class, + allow_version_hack=True, + max_version=f'{supported_versions[-1]!s}.latest', + min_version=f'{supported_versions[0]!s}', + ) + + # No proxy_class + # Maybe openstacksdk is being used for the passthrough + # REST API proxy layer for an unknown service in the + # service catalog that also doesn't have any useful + # version discovery? + warnings.warn( + f"Service {self.service_type} has no discoverable version. " + "The resulting Proxy object will only have direct " + "passthrough REST capabilities.", + category=os_warnings.UnsupportedServiceVersion, + ) + return temp_adapter + + def __set__(self, instance, value): + raise AttributeError('Service Descriptors cannot be set') + + def __delete__(self, instance): + # NOTE(gtema) Some clouds are not very fast (or interested at all) + # in bringing their changes upstream. If there are incompatible changes + # downstream we need to allow overriding default implementation by + # deleting service_type attribute of the connection and then + # "add_service" with new implementation. + # This is intentionally designed to be hard to use to show how bad it + # is not to contribute changes back + for service_type in self.all_types: + if service_type in instance._proxies: + del instance._proxies[service_type] diff --git a/openstack/service_filter.py b/openstack/service_filter.py deleted file mode 100644 index 95bc2aa120..0000000000 --- a/openstack/service_filter.py +++ /dev/null @@ -1,191 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -The :class:`~openstack.service_filter.ServiceFilter` is the base class -for service identifiers and user service preferences. Each -:class:`~openstack.resource.Resource` has a service identifier to -associate the resource with a service. An example of a service identifier -would be ``openstack.compute.compute_service.ComputeService``. -The preferences are stored in the -:class:`~openstack.profile.Profile` object. -The service preference and the service identifier are joined to create a -filter to match a service. - -Examples --------- - -The :class:`~openstack.service_filter.ServiceFilter` class can be built -with a service type, interface, region, name, and version. - -Create a service filter -~~~~~~~~~~~~~~~~~~~~~~~ - -Create a compute service and service preference. Join the services -and match:: - - from openstack import service_filter - from openstack.compute import compute_service - default = compute_service.ComputeService() - preference = service_filter.ServiceFilter('compute', version='v2') - result = preference.join(default) - matches = (result.match_service_type('compute') and - result.match_service_name('Hal9000') and - result.match_region('DiscoveryOne') and - result.match_interface('public')) - print(str(result)) - print("matches=" + str(matches)) - -The resulting output from the code:: - - service_type=compute,interface=public,version=v2 - matches=True -""" - - -class ValidVersion(object): - - def __init__(self, module, path=None): - """" Valid service version. - - :param string module: Module associated with version. - :param string path: URL path version. - """ - self.module = module - self.path = path or module - - -class ServiceFilter(dict): - UNVERSIONED = '' - PUBLIC = 'public' - INTERNAL = 'internal' - ADMIN = 'admin' - valid_versions = [] - - def __init__(self, service_type, interface=PUBLIC, region=None, - service_name=None, version=None, api_version=None, - requires_project_id=False): - """Create a service identifier. - - :param string service_type: The desired type of service. - :param string interface: The exposure of the endpoint. Should be - `public` (default), `internal` or `admin`. - :param string region: The desired region (optional). - :param string service_name: Name of the service - :param string version: Version of service to use. - :param string api_version: Microversion of service supported. - :param bool requires_project_id: True if this service's endpoint - expects project id to be included. - """ - self['service_type'] = service_type.lower() - self['interface'] = interface - self['region_name'] = region - self['service_name'] = service_name - self['version'] = version - self['api_version'] = api_version - self['requires_project_id'] = requires_project_id - - @property - def service_type(self): - return self['service_type'] - - @property - def interface(self): - return self['interface'] - - @interface.setter - def interface(self, value): - self['interface'] = value - - @property - def region(self): - return self['region_name'] - - @region.setter - def region(self, value): - self['region_name'] = value - - @property - def service_name(self): - return self['service_name'] - - @service_name.setter - def service_name(self, value): - self['service_name'] = value - - @property - def version(self): - return self['version'] - - @version.setter - def version(self, value): - self['version'] = value - - @property - def api_version(self): - return self['api_version'] - - @api_version.setter - def api_version(self, value): - self['api_version'] = value - - @property - def requires_project_id(self): - return self['requires_project_id'] - - @requires_project_id.setter - def requires_project_id(self, value): - self['requires_project_id'] = value - - @property - def path(self): - return self['path'] - - @path.setter - def path(self, value): - self['path'] = value - - def get_path(self, version=None): - if not self.version: - self.version = version - return self.get('path', self._get_valid_version().path) - - def get_filter(self): - filter = dict(self) - del filter['version'] - return filter - - def _get_valid_version(self): - if self.valid_versions: - if self.version: - for valid in self.valid_versions: - # NOTE(thowe): should support fuzzy match e.g: v2.1==v2 - if self.version.startswith(valid.module): - return valid - return self.valid_versions[0] - return ValidVersion('') - - def get_module(self): - """Get the full module name associated with the service.""" - module = self.__class__.__module__.split('.') - module = ".".join(module[:-1]) - module = module + "." + self._get_valid_version().module - return module - - def get_service_module(self): - """Get the module version of the service name. - - This would often be the same as the service type except in cases like - object store where the service type is `object-store` and the module - is `object_store`. - """ - return self.__class__.__module__.split('.')[-2] diff --git a/openstack/session.py b/openstack/session.py deleted file mode 100644 index 5b087fcf3b..0000000000 --- a/openstack/session.py +++ /dev/null @@ -1,357 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -""" -The :class:`~openstack.session.Session` overrides -:class:`~keystoneauth1.session.Session` to provide end point filtering and -mapping KSA exceptions to SDK exceptions. - -""" -from collections import namedtuple -import logging - -try: - from itertools import accumulate -except ImportError: - # itertools.accumulate was added to Python 3.2, and since we have to - # support Python 2 for some reason, we include this equivalent from - # the 3.x docs. While it's stated that it's a rough equivalent, it's - # good enough for the purposes we're using it for. - # https://docs.python.org/dev/library/itertools.html#itertools.accumulate - def accumulate(iterable, func=None): - """Return running totals""" - # accumulate([1,2,3,4,5]) --> 1 3 6 10 15 - # accumulate([1,2,3,4,5], operator.mul) --> 1 2 6 24 120 - it = iter(iterable) - try: - total = next(it) - except StopIteration: - return - yield total - for element in it: - total = func(total, element) - yield total - -from keystoneauth1 import exceptions as _exceptions -from keystoneauth1 import session as _session - -from openstack import exceptions -from openstack import utils -from openstack import version as openstack_version - -from six.moves.urllib import parse - -DEFAULT_USER_AGENT = "openstacksdk/%s" % openstack_version.__version__ -API_REQUEST_HEADER = "openstack-api-version" - -Version = namedtuple("Version", ["major", "minor"]) - -_logger = logging.getLogger(__name__) - - -def map_exceptions(func): - def map_exceptions_wrapper(*args, **kwargs): - try: - return func(*args, **kwargs) - except _exceptions.HttpError as e: - if e.http_status == 404: - raise exceptions.NotFoundException( - message=e.message, details=e.details, - response=e.response, request_id=e.request_id, - url=e.url, method=e.method, - http_status=e.http_status, cause=e) - else: - raise exceptions.HttpException( - message=e.message, details=e.details, - response=e.response, request_id=e.request_id, - url=e.url, method=e.method, - http_status=e.http_status, cause=e) - except _exceptions.ClientException as e: - raise exceptions.SDKException(message=e.message, cause=e) - - return map_exceptions_wrapper - - -class Session(_session.Session): - - def __init__(self, profile, user_agent=None, **kwargs): - """Create a new Keystone auth session with a profile. - - :param profile: If the user has any special profiles such as the - service name, region, version or interface, they may be provided - in the profile object. If no profiles are provided, the - services that appear first in the service catalog will be used. - :param user_agent: A User-Agent header string to use for the - request. If not provided, a default of - :attr:`~openstack.session.DEFAULT_USER_AGENT` - is used, which contains the openstacksdk version - When a non-None value is passed, it will be - prepended to the default. - :type profile: :class:`~openstack.profile.Profile` - """ - if user_agent is not None: - self.user_agent = "%s %s" % (user_agent, DEFAULT_USER_AGENT) - else: - self.user_agent = DEFAULT_USER_AGENT - - self.profile = profile - api_version_header = self._get_api_requests() - self.endpoint_cache = {} - - super(Session, self).__init__(user_agent=self.user_agent, - additional_headers=api_version_header, - **kwargs) - - def _get_api_requests(self): - """Get API micro-version requests. - - :param profile: A profile object that contains customizations about - service name, region, version, interface or - api_version. - :return: A standard header string if there is any specialization in - API microversion, or None if no such request exists. - """ - if self.profile is None: - return None - - req = [] - for svc in self.profile.get_services(): - if svc.service_type and svc.api_version: - req.append(" ".join([svc.service_type, svc.api_version])) - if req: - return {API_REQUEST_HEADER: ",".join(req)} - - return None - - class _Endpoint(object): - - def __init__(self, uri, versions, - needs_project_id=False, project_id=None): - self.uri = uri - self.versions = versions - self.needs_project_id = needs_project_id - self.project_id = project_id - - def __eq__(self, other): - return all([self.uri == other.uri, - self.versions == other.versions, - self.needs_project_id == other.needs_project_id, - self.project_id == other.project_id]) - - def _parse_versions_response(self, uri): - """Look for a "versions" JSON response at `uri` - - Return versions if we get them, otherwise return None. - """ - _logger.debug("Looking for versions at %s", uri) - - try: - response = self.get(uri) - except exceptions.HttpException: - return None - - try: - response_body = response.json() - except Exception: - # This could raise a number of things, all of which are bad. - # ValueError, JSONDecodeError, etc. Rather than pick and choose - # a bunch of things that might happen, catch 'em all. - return None - - if "versions" in response_body: - versions = response_body["versions"] - # Normalize the version response. Identity nests the versions - # a level deeper than others, inside of a "values" dictionary. - if "values" in versions: - versions = versions["values"] - return self._Endpoint(uri, versions) - - return None - - def _get_endpoint_versions(self, service_type, endpoint): - """Get available endpoints from the remote service - - Take the endpoint that the Service Catalog gives us as a base - and then work from there. In most cases, the path-less 'root' - of the URI is the base of the service which contains the versions. - In other cases, we need to discover it by trying the paths that - eminate from that root. Generally this is achieved in one roundtrip - request/response, but depending on how the service is installed, - it may require multiple requests. - """ - parts = parse.urlparse(endpoint) - - just_root = "://".join([parts.scheme, parts.netloc]) - - # If we need to try using a portion of the parts, - # the project id won't be one worth asking for so remove it. - # However, we do need to know that the project id was - # previously there, so keep it. - project_id = self.get_project_id() - project_id_location = parts.path.find(project_id) - if project_id_location > -1: - usable_path = parts.path[slice(0, project_id_location)] - needs_project_id = True - else: - usable_path = parts.path - needs_project_id = False - - # Generate a series of paths that might contain our version - # information. This will build successively longer paths from - # the split, so /nova/v2 would return "", "/nova", - # "/nova/v2" out of it. Based on what we've normally seen, - # the match will be found early on within those. - paths = accumulate(usable_path.split("/"), - func=lambda *fragments: "/".join(fragments)) - - result = None - - # If we have paths, try them from the root outwards. - # NOTE: Both the body of the for loop and the else clause - # cover the request for `just_root`. The else clause is explicit - # in only testing it because there are no path parts. In the for - # loop, it gets requested in the first iteration. - for path in paths: - response = self._parse_versions_response(just_root + path) - if response is not None: - result = response - break - else: - # If we didn't have paths, root is all we can do anyway. - response = self._parse_versions_response(just_root) - if response is not None: - result = response - - if result is not None: - if needs_project_id: - result.needs_project_id = True - result.project_id = project_id - - return result - - raise exceptions.EndpointNotFound( - "Unable to parse endpoints for %s" % service_type) - - def _parse_version(self, version): - """Parse the version and return major and minor components - - If the version was given with a leading "v", e.g., "v3", strip - that off to just numerals. - """ - version_num = version[version.find("v") + 1:] - components = version_num.split(".") - if len(components) == 1: - # The minor version of a v2 ends up being -1 so that we can - # loop through versions taking the highest available match - # while also working around a direct match for 2.0. - rv = Version(int(components[0]), -1) - elif len(components) == 2: - rv = Version(*[int(component) for component in components]) - else: - raise ValueError("Unable to parse version string %s" % version) - - return rv - - def _get_version_match(self, endpoint, profile_version, service_type): - """Return the best matching version - - Look through each version trying to find the best match for - the version specified in this profile. - * The best match will only ever be found within the same - major version, meaning a v2 profile will never match if - only v3 is available on the server. - * The search for the best match is fuzzy if needed. - * If the profile specifies v2 and the server has - v2.0, v2.1, and v2.2, the match will be v2.2. - * When an exact major/minor is specified, e.g., v2.0, - it will only match v2.0. - """ - - match_version = None - - for version in endpoint.versions: - api_version = self._parse_version(version["id"]) - if profile_version.major != api_version.major: - continue - - if profile_version.minor <= api_version.minor: - for link in version["links"]: - if link["rel"] == "self": - resp_link = link['href'] - match_version = parse.urlsplit(resp_link).path - - # Only break out of the loop on an exact match, - # otherwise keep trying. - if profile_version.minor == api_version.minor: - break - - if match_version is None: - raise exceptions.EndpointNotFound( - "Unable to determine endpoint for %s" % service_type) - - # Make sure the root endpoint has no overlap with match_version - root_parts = parse.urlsplit(endpoint.uri) - match_version = match_version.replace(root_parts.path, "", 1) - match = utils.urljoin(endpoint.uri, match_version) - - # For services that require the project id in the request URI, - # add them in here. - if endpoint.needs_project_id: - match = utils.urljoin(match, endpoint.project_id) - - return match - - def get_endpoint(self, auth=None, interface=None, service_type=None, - **kwargs): - """Override get endpoint to automate endpoint filtering - - This method uses the service catalog to find the root URI of - each service and then gets all available versions directly - from the service, not from the service catalog. - - Endpoints are cached per service type and interface combination - so that they're only requested from the remote service once - per instance of this class. - """ - key = (service_type, interface) - if key in self.endpoint_cache: - return self.endpoint_cache[key] - - filt = self.profile.get_filter(service_type) - if filt.interface is None: - filt.interface = interface - sc_endpoint = super(Session, self).get_endpoint(auth, - **filt.get_filter()) - - # Object Storage is, of course, different. Just use what we get - # back from the service catalog as not only does it not offer - # a list of supported versions, it appends an "AUTH_" prefix to - # the project id so we'd have to special case that as well. - if service_type == "object-store": - self.endpoint_cache[key] = sc_endpoint - return sc_endpoint - - endpoint = self._get_endpoint_versions(service_type, sc_endpoint) - - profile_version = self._parse_version(filt.version) - match = self._get_version_match(endpoint, profile_version, - service_type) - - _logger.debug("Using %s as %s %s endpoint", - match, interface, service_type) - - self.endpoint_cache[key] = match - return match - - @map_exceptions - def request(self, *args, **kwargs): - return super(Session, self).request(*args, **kwargs) diff --git a/openstack/tests/unit/telemetry/__init__.py b/openstack/shared_file_system/__init__.py similarity index 100% rename from openstack/tests/unit/telemetry/__init__.py rename to openstack/shared_file_system/__init__.py diff --git a/openstack/shared_file_system/shared_file_system_service.py b/openstack/shared_file_system/shared_file_system_service.py new file mode 100644 index 0000000000..c5479fc649 --- /dev/null +++ b/openstack/shared_file_system/shared_file_system_service.py @@ -0,0 +1,24 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import service_description +from openstack.shared_file_system.v2 import _proxy + + +class SharedFilesystemService( + service_description.ServiceDescription[_proxy.Proxy] +): + """The shared file systems service.""" + + supported_versions = { + '2': _proxy.Proxy, + } diff --git a/openstack/tests/unit/telemetry/alarm/__init__.py b/openstack/shared_file_system/v2/__init__.py similarity index 100% rename from openstack/tests/unit/telemetry/alarm/__init__.py rename to openstack/shared_file_system/v2/__init__.py diff --git a/openstack/shared_file_system/v2/_proxy.py b/openstack/shared_file_system/v2/_proxy.py new file mode 100644 index 0000000000..7d859ccdef --- /dev/null +++ b/openstack/shared_file_system/v2/_proxy.py @@ -0,0 +1,1252 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +from openstack import exceptions +from openstack import proxy +from openstack import resource +from openstack.shared_file_system.v2 import ( + availability_zone as _availability_zone, +) +from openstack.shared_file_system.v2 import limit as _limit +from openstack.shared_file_system.v2 import quota_class_set as _quota_class_set +from openstack.shared_file_system.v2 import resource_locks as _resource_locks +from openstack.shared_file_system.v2 import share as _share +from openstack.shared_file_system.v2 import share_group as _share_group +from openstack.shared_file_system.v2 import ( + share_group_snapshot as _share_group_snapshot, +) +from openstack.shared_file_system.v2 import ( + share_access_rule as _share_access_rule, +) +from openstack.shared_file_system.v2 import ( + share_export_locations as _share_export_locations, +) +from openstack.shared_file_system.v2 import share_instance as _share_instance +from openstack.shared_file_system.v2 import share_network as _share_network +from openstack.shared_file_system.v2 import ( + share_network_subnet as _share_network_subnet, +) +from openstack.shared_file_system.v2 import share_snapshot as _share_snapshot +from openstack.shared_file_system.v2 import ( + share_snapshot_instance as _share_snapshot_instance, +) +from openstack.shared_file_system.v2 import storage_pool as _storage_pool +from openstack.shared_file_system.v2 import user_message as _user_message + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['2']] = '2' + + _resource_registry = { + "availability_zone": _availability_zone.AvailabilityZone, + "share_snapshot": _share_snapshot.ShareSnapshot, + "storage_pool": _storage_pool.StoragePool, + "user_message": _user_message.UserMessage, + "limit": _limit.Limit, + "share": _share.Share, + "share_network": _share_network.ShareNetwork, + "share_network_subnet": _share_network_subnet.ShareNetworkSubnet, + "share_snapshot_instance": _share_snapshot_instance.ShareSnapshotInstance, # noqa: E501 + "share_instance": _share_instance.ShareInstance, + "share_export_locations": _share_export_locations.ShareExportLocation, + "share_access_rule": _share_access_rule.ShareAccessRule, + "share_group": _share_group.ShareGroup, + "share_group_snapshot": _share_group_snapshot.ShareGroupSnapshot, + "resource_locks": _resource_locks.ResourceLock, + "quota_class_set": _quota_class_set.QuotaClassSet, + } + + def availability_zones(self): + """Retrieve shared file system availability zones + + :returns: A generator of availability zone resources + :rtype: + :class:`~openstack.shared_file_system.v2.availability_zone.AvailabilityZone` + """ + return self._list(_availability_zone.AvailabilityZone) + + def shares(self, details=True, **query): + """Lists all shares with details + + :param kwargs query: Optional query parameters to be sent to limit + the shares being returned. Available parameters include: + + * status: Filters by a share status + * share_server_id: The UUID of the share server. + * metadata: One or more metadata key and value pairs as a url + encoded dictionary of strings. + * extra_specs: The extra specifications as a set of one or more + key-value pairs. + * share_type_id: The UUID of a share type to query resources by. + * name: The user defined name of the resource to filter resources + by. + * snapshot_id: The UUID of the share's base snapshot to filter + the request based on. + * host: The host name of the resource to query with. + * share_network_id: The UUID of the share network to filter + resources by. + * project_id: The ID of the project that owns the resource. + * is_public: A boolean query parameter that, when set to true, + allows retrieving public resources that belong to + all projects. + * share_group_id: The UUID of a share group to filter resource. + * export_location_id: The export location UUID that can be used + to filter shares or share instances. + * export_location_path: The export location path that can be used + to filter shares or share instances. + * name~: The name pattern that can be used to filter shares, share + snapshots, share networks or share groups. + * description~: The description pattern that can be used to filter + shares, share snapshots, share networks or share groups. + * with_count: Whether to show count in API response or not, + default is False. + * limit: The maximum number of shares to return. + * offset: The offset to define start point of share or share group + listing. + * sort_key: The key to sort a list of shares. + * sort_dir: The direction to sort a list of shares. A valid value + is asc, or desc. + + :returns: Details of shares resources + :rtype: :class:`~openstack.shared_file_system.v2.share.Share` + """ + base_path = '/shares/detail' if details else None + return self._list(_share.Share, base_path=base_path, **query) + + def find_share(self, name_or_id, ignore_missing=True, **query): + """Find a single share + + :param name_or_id: The name or ID of a share. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :param dict query: Any additional parameters to be passed into + underlying methods. such as query filters. + + :returns: One :class:`~openstack.shared_file_system.v2.share.Share` + or None + """ + + return self._find( + _share.Share, name_or_id, ignore_missing=ignore_missing, **query + ) + + def get_share(self, share_id): + """Lists details of a single share + + :param share: The ID of the share to get + :returns: Details of the identified share + :rtype: :class:`~openstack.shared_file_system.v2.share.Share` + """ + return self._get(_share.Share, share_id) + + def delete_share(self, share, ignore_missing=True): + """Deletes a single share + + :param share: The ID of the share to delete + :returns: Result of the ``delete`` + :rtype: ``None`` + """ + self._delete(_share.Share, share, ignore_missing=ignore_missing) + + def update_share(self, share_id, **attrs): + """Updates details of a single share. + + :param share: The ID of the share to update + :param dict attrs: The attributes to update on the share + :returns: the updated share + :rtype: :class:`~openstack.shared_file_system.v2.share.Share` + """ + return self._update(_share.Share, share_id, **attrs) + + def create_share(self, **attrs): + """Creates a share from attributes + + :returns: Details of the new share + :param dict attrs: Attributes which will be used to create + a :class:`~openstack.shared_file_system.v2.shares.Shares`, + comprised of the properties on the Shares class. 'size' and 'share' + are required to create a share. + :rtype: :class:`~openstack.shared_file_system.v2.share.Share` + """ + return self._create(_share.Share, **attrs) + + def revert_share_to_snapshot(self, share_id, snapshot_id): + """Reverts a share to the specified snapshot, which must be + the most recent one known to manila. + + :param share_id: The ID of the share to revert + :param snapshot_id: The ID of the snapshot to revert to + :returns: Result of the ``revert`` + :rtype: ``None`` + """ + res = self._get(_share.Share, share_id) + res.revert_to_snapshot(self, snapshot_id) + + def manage_share(self, protocol, export_path, service_host, **params): + """Manage a share. + + :param str protocol: The shared file systems protocol of this share. + :param str export_path: The export path formatted according to the + protocol. + :param str service_host: The manage-share service host. + :param kwargs params: Optional parameters to be sent. Available + parameters include: + * name: The user defined name of the resource. + * share_type: The name or ID of the share type to be used to create + the resource. + * driver_options: A set of one or more key and value pairs, as a + dictionary of strings, that describe driver options. + * is_public: The level of visibility for the share. + * description: The user defiend description of the resource. + * share_server_id: The UUID of the share server. + + :returns: The share that was managed. + """ + + share = _share.Share() + return share.manage( + self, protocol, export_path, service_host, **params + ) + + def unmanage_share(self, share_id): + """Unmanage the share with the given share ID. + + :param share_id: The ID of the share to unmanage. + :returns: ``None`` + """ + + share_to_unmanage = self._get(_share.Share, share_id) + share_to_unmanage.unmanage(self) + + def resize_share( + self, share_id, new_size, no_shrink=False, no_extend=False, force=False + ): + """Resizes a share, extending/shrinking the share as needed. + + :param share_id: The ID of the share to resize + :param new_size: The new size of the share in GiBs. If new_size is + the same as the current size, then nothing is done. + :param bool no_shrink: If set to True, the given share is not shrunk, + even if shrinking the share is required to get the share to the + given size. This could be useful for extending shares to a minimum + size, while not shrinking shares to the given size. This defaults + to False. + :param bool no_extend: If set to True, the given share is not + extended, even if extending the share is required to get the share + to the given size. This could be useful for shrinking shares to a + maximum size, while not extending smaller shares to that maximum + size. This defaults to False. + :param bool force: Whether or not force should be used, + in the case where the share should be extended. + :returns: ``None`` + """ + + res = self._get(_share.Share, share_id) + + if new_size > res.size and no_extend is not True: + res.extend_share(self, new_size, force) + elif new_size < res.size and no_shrink is not True: + res.shrink_share(self, new_size) + + def share_groups(self, **query): + """Lists all share groups. + + :param kwargs query: Optional query parameters to be sent to limit + the share groups being returned. Available parameters include: + + * status: Filters by a share group status. + * name: The user defined name of the resource to filter resources + by. + * description: The user defined description text that can be used + to filter resources. + * project_id: The project ID of the user or service. + * share_server_id: The UUID of the share server. + * snapshot_id: The UUID of the share's base snapshot to filter + the request based on. + * host: The host name for the back end. + * share_network_id: The UUID of the share network to filter + resources by. + * share_group_type_id: The share group type ID to filter + share groups. + * share_group_snapshot_id: The source share group snapshot ID to + list the share group. + * share_types: A list of one or more share type IDs. Allows + filtering share groups. + * limit: The maximum number of share groups members to return. + * offset: The offset to define start point of share or share + group listing. + * sort_key: The key to sort a list of shares. + * sort_dir: The direction to sort a list of shares + * name~: The name pattern that can be used to filter shares, + share snapshots, share networks or share groups. + * description~: The description pattern that can be used to + filter shares, share snapshots, share networks or share groups. + + :returns: A generator of manila share group resources + :rtype: :class:`~openstack.shared_file_system.v2. + share_group.ShareGroup` + """ + return self._list(_share_group.ShareGroup, **query) + + def get_share_group(self, share_group_id): + """Lists details for a share group. + + :param share: The ID of the share group to get + :returns: Details of the identified share group + :rtype: :class:`~openstack.shared_file_system.v2. + share_group.ShareGroup` + """ + return self._get(_share_group.ShareGroup, share_group_id) + + def find_share_group(self, name_or_id, ignore_missing=True): + """Finds a single share group + + :param name_or_id: The name or ID of a share group. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :returns: One :class:`~openstack.shared_file_system.v2. + share_group.ShareGroup` + or None + """ + return self._find( + _share_group.ShareGroup, name_or_id, ignore_missing=ignore_missing + ) + + def create_share_group(self, **attrs): + """Creates a share group from attributes + + :returns: Details of the new share group + :rtype: :class:`~openstack.shared_file_system.v2. + share_group.ShareGroup` + """ + return self._create(_share_group.ShareGroup, **attrs) + + def update_share_group(self, share_group_id, **kwargs): + """Updates details of a single share group + + :param share: The ID of the share group + :returns: Updated details of the identified share group + :rtype: :class:`~openstack.shared_file_system.v2. + share_group.ShareGroup` + """ + return self._update(_share_group.ShareGroup, share_group_id, **kwargs) + + def delete_share_group(self, share_group_id, ignore_missing=True): + """Deletes a single share group + + :param share: The ID of the share group + :returns: Result of the "delete" on share group + :rtype: :class:`~openstack.shared_file_system.v2. + share_group.ShareGroup` + """ + return self._delete( + _share_group.ShareGroup, + share_group_id, + ignore_missing=ignore_missing, + ) + + def storage_pools(self, details=True, **query): + """Lists all back-end storage pools with details + + :param kwargs query: Optional query parameters to be sent to limit + the storage pools being returned. Available parameters include: + + * pool_name: The pool name for the back end. + * host_name: The host name for the back end. + * backend_name: The name of the back end. + * capabilities: The capabilities for the storage back end. + * share_type: The share type name or UUID. + :returns: A generator of manila storage pool resources + :rtype: + :class:`~openstack.shared_file_system.v2.storage_pool.StoragePool` + """ + base_path = '/scheduler-stats/pools/detail' if details else None + return self._list( + _storage_pool.StoragePool, base_path=base_path, **query + ) + + def user_messages(self, **query): + """List shared file system user messages + + :param kwargs query: Optional query parameters to be sent to limit + the messages being returned. Available parameters include: + + * action_id: The ID of the action during which the message + was created. + * detail_id: The ID of the message detail. + * limit: The maximum number of shares to return. + * message_level: The message level. + * offset: The offset to define start point of share or share + group listing. + * sort_key: The key to sort a list of messages. + * sort_dir: The direction to sort a list of shares. + * project_id: The ID of the project for which the message + was created. + * request_id: The ID of the request during which the message + was created. + * resource_id: The UUID of the resource for which the message + was created. + * resource_type: The type of the resource for which the message + was created. + + :returns: A generator of user message resources + :rtype: + :class:`~openstack.shared_file_system.v2.user_message.UserMessage` + """ + return self._list(_user_message.UserMessage, **query) + + def get_user_message(self, message_id): + """List details of a single user message + + :param message_id: The ID of the user message + :returns: Details of the identified user message + :rtype: + :class:`~openstack.shared_file_system.v2.user_message.UserMessage` + """ + return self._get(_user_message.UserMessage, message_id) + + def delete_user_message(self, message_id, ignore_missing=True): + """Deletes a single user message + + :param message_id: The ID of the user message + :returns: Result of the "delete" on the user message + :rtype: + :class:`~openstack.shared_file_system.v2.user_message.UserMessage` + """ + return self._delete( + _user_message.UserMessage, + message_id, + ignore_missing=ignore_missing, + ) + + def limits(self, **query): + """Lists all share limits. + + :param kwargs query: Optional query parameters to be sent to limit + the share limits being returned. + + :returns: A generator of manila share limits resources + :rtype: :class:`~openstack.shared_file_system.v2.limit.Limit` + """ + return self._list(_limit.Limit, **query) + + def share_snapshots(self, details=True, **query): + """Lists all share snapshots with details. + + :param kwargs query: Optional query parameters to be sent to limit + the snapshots being returned. Available parameters include: + + * project_id: The ID of the user or service making the API request. + + :returns: A generator of manila share snapshot resources + :rtype: + :class:`~openstack.shared_file_system.v2.share_snapshot.ShareSnapshot` + """ + base_path = '/snapshots/detail' if details else None + return self._list( + _share_snapshot.ShareSnapshot, base_path=base_path, **query + ) + + def get_share_snapshot(self, snapshot_id): + """Lists details of a single share snapshot + + :param snapshot_id: The ID of the snapshot to get + :returns: Details of the identified share snapshot + :rtype: + :class:`~openstack.shared_file_system.v2.share_snapshot.ShareSnapshot` + """ + return self._get(_share_snapshot.ShareSnapshot, snapshot_id) + + def create_share_snapshot(self, **attrs): + """Creates a share snapshot from attributes + + :returns: Details of the new share snapshot + :rtype: + :class:`~openstack.shared_file_system.v2.share_snapshot.ShareSnapshot` + """ + return self._create(_share_snapshot.ShareSnapshot, **attrs) + + def update_share_snapshot(self, snapshot_id, **attrs): + """Updates details of a single share. + + :param snapshot_id: The ID of the snapshot to update + :pram dict attrs: The attributes to update on the snapshot + :returns: the updated share snapshot + :rtype: + :class:`~openstack.shared_file_system.v2.share_snapshot.ShareSnapshot` + """ + return self._update( + _share_snapshot.ShareSnapshot, snapshot_id, **attrs + ) + + def delete_share_snapshot(self, snapshot_id, ignore_missing=True): + """Deletes a single share snapshot + + :param snapshot_id: The ID of the snapshot to delete + :returns: Result of the ``delete`` + :rtype: ``None`` + """ + self._delete( + _share_snapshot.ShareSnapshot, + snapshot_id, + ignore_missing=ignore_missing, + ) + + # ========= Network Subnets ========== + def share_network_subnets(self, share_network_id): + """Lists all share network subnets with details. + + :param share_network_id: The id of the share network for which + Share Network Subnets should be listed. + :returns: A generator of manila share network subnets + :rtype: + :class:`~openstack.shared_file_system.v2.share_network_subnet.ShareNetworkSubnet` + """ + return self._list( + _share_network_subnet.ShareNetworkSubnet, + share_network_id=share_network_id, + ) + + def get_share_network_subnet( + self, + share_network_id, + share_network_subnet_id, + ): + """Lists details of a single share network subnet. + + :param share_network_id: The id of the share network associated + with the Share Network Subnet. + :param share_network_subnet_id: The id of the Share Network Subnet + to retrieve. + :returns: Details of the identified share network subnet + :rtype: + :class:`~openstack.shared_file_system.v2.share_network_subnet.ShareNetworkSubnet` + """ + + return self._get( + _share_network_subnet.ShareNetworkSubnet, + share_network_subnet_id, + share_network_id=share_network_id, + ) + + def create_share_network_subnet(self, share_network_id, **attrs): + """Creates a share network subnet from attributes + + :param share_network_id: The id of the share network wthin which the + the Share Network Subnet should be created. + :param dict attrs: Attributes which will be used to create + a share network subnet. + :returns: Details of the new share network subnet. + :rtype: + :class:`~openstack.shared_file_system.v2.share_network_subnet.ShareNetworkSubnet` + """ + return self._create( + _share_network_subnet.ShareNetworkSubnet, + **attrs, + share_network_id=share_network_id, + ) + + def delete_share_network_subnet( + self, share_network_id, share_network_subnet, ignore_missing=True + ): + """Deletes a share network subnet. + + :param share_network_id: The id of the Share Network associated with + the Share Network Subnet. + :param share_network_subnet: The id of the Share Network Subnet + which should be deleted. + :returns: Result of the ``delete`` + :rtype: None + """ + + self._delete( + _share_network_subnet.ShareNetworkSubnet, + share_network_subnet, + share_network_id=share_network_id, + ignore_missing=ignore_missing, + ) + + def share_snapshot_instances(self, details=True, **query): + """Lists all share snapshot instances with details. + + :param bool details: Whether to fetch detailed resource + descriptions. Defaults to True. + :param kwargs query: Optional query parameters to be sent to limit + the share snapshot instance being returned. + Available parameters include: + + * snapshot_id: The UUID of the share's base snapshot to filter + the request based on. + * project_id: The project ID of the user or service making the + request. + + :returns: A generator of share snapshot instance resources + :rtype: :class:`~openstack.shared_file_system.v2. + share_snapshot_instance.ShareSnapshotInstance` + """ + base_path = '/snapshot-instances/detail' if details else None + return self._list( + _share_snapshot_instance.ShareSnapshotInstance, + base_path=base_path, + **query, + ) + + def get_share_snapshot_instance(self, snapshot_instance_id): + """Lists details of a single share snapshot instance + + :param snapshot_instance_id: The ID of the snapshot instance to get + :returns: Details of the identified snapshot instance + :rtype: :class:`~openstack.shared_file_system.v2. + share_snapshot_instance.ShareSnapshotInstance` + """ + return self._get( + _share_snapshot_instance.ShareSnapshotInstance, + snapshot_instance_id, + ) + + def share_networks(self, details=True, **query): + """Lists all share networks with details. + + :param dict query: Optional query parameters to be sent to limit the + resources being returned. Available parameters include: + + * name~: The user defined name of the resource to filter resources + by. + * project_id: The ID of the user or service making the request. + * description~: The description pattern that can be used to filter + shares, share snapshots, share networks or share groups. + * all_projects: (Admin only). Defines whether to list the requested + resources for all projects. + + :returns: Details of shares networks + :rtype: :class:`~openstack.shared_file_system.v2. + share_network.ShareNetwork` + """ + base_path = '/share-networks/detail' if details else None + return self._list( + _share_network.ShareNetwork, base_path=base_path, **query + ) + + def get_share_network(self, share_network_id): + """Lists details of a single share network + + :param share_network: The ID of the share network to get + :returns: Details of the identified share network + :rtype: :class:`~openstack.shared_file_system.v2. + share_network.ShareNetwork` + """ + return self._get(_share_network.ShareNetwork, share_network_id) + + def delete_share_network(self, share_network_id, ignore_missing=True): + """Deletes a single share network + + :param share_network_id: The ID of the share network to delete + :rtype: ``None`` + """ + self._delete( + _share_network.ShareNetwork, + share_network_id, + ignore_missing=ignore_missing, + ) + + def update_share_network(self, share_network_id, **attrs): + """Updates details of a single share network. + + :param share_network_id: The ID of the share network to update + :pram dict attrs: The attributes to update on the share network + :returns: the updated share network + :rtype: :class:`~openstack.shared_file_system.v2. + share_network.ShareNetwork` + """ + return self._update( + _share_network.ShareNetwork, share_network_id, **attrs + ) + + def create_share_network(self, **attrs): + """Creates a share network from attributes + + :returns: Details of the new share network + :param dict attrs: Attributes which will be used to create + a :class:`~openstack.shared_file_system.v2. + share_network.ShareNetwork`,comprised of the properties + on the ShareNetwork class. + :rtype: :class:`~openstack.shared_file_system.v2. + share_network.ShareNetwork` + """ + return self._create(_share_network.ShareNetwork, **attrs) + + def share_instances(self, **query): + """Lists all share instances. + + :param kwargs query: Optional query parameters to be sent to limit + the share instances being returned. Available parameters include: + + * export_location_id: The export location UUID that can be used + to filter share instances. + * export_location_path: The export location path that can be used + to filter share instances. + + :returns: Details of share instances resources + :rtype: :class:`~openstack.shared_file_system.v2. + share_instance.ShareInstance` + """ + return self._list(_share_instance.ShareInstance, **query) + + def get_share_instance(self, share_instance_id): + """Shows details for a single share instance + + :param share_instance_id: The UUID of the share instance to get + + :returns: Details of the identified share instance + :rtype: :class:`~openstack.shared_file_system.v2. + share_instance.ShareInstance` + """ + return self._get(_share_instance.ShareInstance, share_instance_id) + + def reset_share_instance_status(self, share_instance_id, status): + """Explicitly updates the state of a share instance. + + :param share_instance_id: The UUID of the share instance to reset. + :param status: The share or share instance status to be set. + + :returns: ``None`` + """ + res = self._get_resource( + _share_instance.ShareInstance, share_instance_id + ) + res.reset_status(self, status) + + def delete_share_instance(self, share_instance_id): + """Force-deletes a share instance + + :param share_instance: The ID of the share instance to delete + + :returns: ``None`` + """ + res = self._get_resource( + _share_instance.ShareInstance, share_instance_id + ) + res.force_delete(self) + + def export_locations(self, share_id): + """List all export locations with details + + :param share_id: The ID of the share to list export locations from + :returns: List of export locations + :rtype: List of :class:`~openstack.shared_filesystem_storage.v2. + share_export_locations.ShareExportLocations` + """ + return self._list( + _share_export_locations.ShareExportLocation, share_id=share_id + ) + + def get_export_location(self, export_location, share_id): + """List details of export location + + :param export_location: The export location resource to get + :param share_id: The ID of the share to get export locations from + :returns: Details of identified export location + :rtype: :class:`~openstack.shared_filesystem_storage.v2. + share_export_locations.ShareExportLocations` + """ + + export_location_id = resource.Resource._get_id(export_location) + return self._get( + _share_export_locations.ShareExportLocation, + export_location_id, + share_id=share_id, + ) + + def access_rules(self, share, **query): + """Lists the access rules on a share. + + :returns: A generator of the share access rules. + :rtype: :class:`~openstack.shared_file_system.v2. + share_access_rules.ShareAccessRules` + """ + share = self._get_resource(_share.Share, share) + return self._list( + _share_access_rule.ShareAccessRule, share_id=share.id, **query + ) + + def get_access_rule(self, access_id): + """List details of an access rule. + + :param access_id: The id of the access rule to get + :returns: Details of the identified access rule. + :rtype: :class:`~openstack.shared_file_system.v2. + share_access_rules.ShareAccessRules` + """ + return self._get(_share_access_rule.ShareAccessRule, access_id) + + def create_access_rule(self, share_id, **attrs): + """Creates an access rule from attributes + + :returns: Details of the new access rule + :param share_id: The ID of the share + :param dict attrs: Attributes which will be used to create + a :class:`~openstack.shared_file_system.v2. + share_access_rules.ShareAccessRules`, comprised of the + properties on the ShareAccessRules class. + :rtype: :class:`~openstack.shared_file_system.v2. + share_access_rules.ShareAccessRules` + """ + base_path = f"/shares/{share_id}/action" + return self._create( + _share_access_rule.ShareAccessRule, base_path=base_path, **attrs + ) + + def delete_access_rule( + self, access_id, share_id, ignore_missing=True, *, unrestrict=False + ): + """Deletes an access rule + + :param access_id: The id of the access rule to get + :param share_id: The ID of the share + :param unrestrict: If Manila must attempt removing locks while deleting + + :rtype: ``requests.models.Response`` HTTP response from internal + requests client + """ + res = self._get_resource( + _share_access_rule.ShareAccessRule, access_id, share_id=share_id + ) + try: + return res.delete( + self, + unrestrict=unrestrict, + ) + except exceptions.NotFoundException: + if ignore_missing: + return None + raise + + def share_group_snapshots(self, details=True, **query): + """Lists all share group snapshots. + + :param kwargs query: Optional query parameters to be sent + to limit the share group snapshots being returned. + Available parameters include: + + * project_id: The ID of the project that owns the resource. + * name: The user defined name of the resource to filter resources. + * description: The user defined description text that can be used + to filter resources. + * status: Filters by a share status + * share_group_id: The UUID of a share group to filter resource. + * limit: The maximum number of share group snapshot members + to return. + * offset: The offset to define start point of share or + share group listing. + * sort_key: The key to sort a list of shares. + * sort_dir: The direction to sort a list of shares. A valid + value is asc, or desc. + + :returns: Details of share group snapshots resources + :rtype: :class:`~openstack.shared_file_system.v2. + share_group_snapshot.ShareGroupSnapshot` + """ + base_path = '/share-group-snapshots/detail' if details else None + return self._list( + _share_group_snapshot.ShareGroupSnapshot, + base_path=base_path, + **query, + ) + + def share_group_snapshot_members(self, group_snapshot_id): + """Lists all share group snapshots members. + + :param group_snapshot_id: The ID of the group snapshot to get + :returns: List of the share group snapshot members, which are + share snapshots. + :rtype: dict containing attributes of the share snapshot members. + """ + res = self._get( + _share_group_snapshot.ShareGroupSnapshot, + group_snapshot_id, + ) + response = res.get_members(self) + return response + + def get_share_group_snapshot(self, group_snapshot_id): + """Show share group snapshot details + + :param group_snapshot_id: The ID of the group snapshot to get + :returns: Details of the group snapshot + :rtype: :class:`~openstack.shared_file_system.v2. + share_group_snapshot.ShareGroupSnapshot` + """ + return self._get( + _share_group_snapshot.ShareGroupSnapshot, group_snapshot_id + ) + + def create_share_group_snapshot(self, share_group_id, **attrs): + """Creates a point-in-time snapshot copy of a share group. + + :returns: Details of the new snapshot + :param dict attrs: Attributes which will be used to create + a :class:`~openstack.shared_file_system.v2. + share_group_snapshots.ShareGroupSnapshots`, + :param 'share_group_id': ID of the share group to have the snapshot + taken. + :rtype: :class:`~openstack.shared_file_system.v2. + share_group_snapshot.ShareGroupSnapshot` + """ + return self._create( + _share_group_snapshot.ShareGroupSnapshot, + share_group_id=share_group_id, + **attrs, + ) + + def reset_share_group_snapshot_status(self, group_snapshot_id, status): + """Reset share group snapshot state. + + :param group_snapshot_id: The ID of the share group snapshot to reset + :param status: The state of the share group snapshot to be set, A + valid value is "creating", "error", "available", "deleting", + "error_deleting". + :rtype: ``None`` + """ + res = self._get( + _share_group_snapshot.ShareGroupSnapshot, group_snapshot_id + ) + res.reset_status(self, status) + + def update_share_group_snapshot(self, group_snapshot_id, **attrs): + """Updates a share group snapshot. + + :param group_snapshot_id: The ID of the share group snapshot to update + :param dict attrs: The attributes to update on the share group snapshot + :returns: the updated share group snapshot + :rtype: :class:`~openstack.shared_file_system.v2. + share_group_snapshot.ShareGroupSnapshot` + """ + return self._update( + _share_group_snapshot.ShareGroupSnapshot, + group_snapshot_id, + **attrs, + ) + + def delete_share_group_snapshot( + self, group_snapshot_id, ignore_missing=True + ): + """Deletes a share group snapshot. + + :param group_snapshot_id: The ID of the share group snapshot to delete + :rtype: ``None`` + """ + self._delete( + _share_group_snapshot.ShareGroupSnapshot, + group_snapshot_id, + ignore_missing=ignore_missing, + ) + + # ========= Share Metadata ========== + def get_share_metadata(self, share_id): + """Lists all metadata for a share. + + :param share_id: The ID of the share + + :returns: A :class:`~openstack.shared_file_system.v2.share.Share` + with the share's metadata. + :rtype: + :class:`~openstack.shared_file_system.v2.share.Share` + """ + share = self._get_resource(_share.Share, share_id) + return share.fetch_metadata(self) + + def get_share_metadata_item(self, share_id, key): + """Retrieves a specific metadata item from a share by its key. + + :param share_id: The ID of the share + :param key: The key of the share metadata + + :returns: A :class:`~openstack.shared_file_system.v2.share.Share` + with the share's metadata. + :rtype: + :class:`~openstack.shared_file_system.v2.share.Share` + """ + share = self._get_resource(_share.Share, share_id) + return share.get_metadata_item(self, key) + + def create_share_metadata(self, share_id, **metadata): + """Creates share metadata as key-value pairs. + + :param share_id: The ID of the share + :param metadata: The metadata to be created + + :returns: A :class:`~openstack.shared_file_system.v2.share.Share` + with the share's metadata. + :rtype: + :class:`~openstack.shared_file_system.v2.share.Share` + """ + share = self._get_resource(_share.Share, share_id) + return share.set_metadata(self, metadata=metadata) + + def update_share_metadata(self, share_id, metadata, replace=False): + """Updates metadata of given share. + + :param share_id: The ID of the share + :param metadata: The metadata to be created + :param replace: Boolean for whether the preexisting metadata + should be replaced + + :returns: A :class:`~openstack.shared_file_system.v2.share.Share` + with the share's updated metadata. + :rtype: + :class:`~openstack.shared_file_system.v2.share.Share` + """ + share = self._get_resource(_share.Share, share_id) + return share.set_metadata(self, metadata=metadata, replace=replace) + + def delete_share_metadata(self, share_id, keys, ignore_missing=True): + """Deletes a single metadata item on a share, idetified by its key. + + :param share_id: The ID of the share + :param keys: The list of share metadata keys to be deleted + :param ignore_missing: Boolean indicating if missing keys should be + ignored. + + :returns: None + :rtype: None + """ + share = self._get_resource(_share.Share, share_id) + keys_failed_to_delete = [] + for key in keys: + try: + share.delete_metadata_item(self, key) + except exceptions.NotFoundException: + if not ignore_missing: + self._connection.log.info("Key %s not found.", key) + keys_failed_to_delete.append(key) + except exceptions.ForbiddenException: + self._connection.log.info("Key %s cannot be deleted.", key) + keys_failed_to_delete.append(key) + except exceptions.SDKException: + self._connection.log.info("Failed to delete key %s.", key) + keys_failed_to_delete.append(key) + if keys_failed_to_delete: + raise exceptions.SDKException( + f"Some keys failed to be deleted {keys_failed_to_delete}" + ) + + def resource_locks(self, **query): + """Lists all resource locks. + + :param kwargs query: Optional query parameters to be sent to limit + the resource locks being returned. Available parameters include: + + * project_id: The project ID of the user that the lock is + created for. + * user_id: The ID of a user to filter resource locks by. + * all_projects: list locks from all projects (Admin Only) + * resource_id: The ID of the resource that the locks pertain to + filter resource locks by. + * resource_action: The action prevented by the filtered resource + locks. + * resource_type: The type of the resource that the locks pertain + to filter resource locks by. + * lock_context: The lock creator's context to filter locks by. + * lock_reason: The lock reason that can be used to filter resource + locks. (Inexact search is also available with lock_reason~) + * created_since: Search for the list of resources that were created + after the specified date. The date is in 'yyyy-mm-dd' format. + * created_before: Search for the list of resources that were + created prior to the specified date. The date is in + 'yyyy-mm-dd' format. + * limit: The maximum number of resource locks to return. + * offset: The offset to define start point of resource lock + listing. + * sort_key: The key to sort a list of shares. + * sort_dir: The direction to sort a list of shares + * with_count: Whether to show count in API response or not, + default is False. This query parameter is useful with + pagination. + + :returns: A generator of manila resource locks + :rtype: :class:`~openstack.shared_file_system.v2. + resource_locks.ResourceLock` + """ + + if query.get('resource_type'): + # The _create method has a parameter named resource_type, which + # refers to the type of resource to be created, so we need to avoid + # a conflict of parameters we are sending to the method. + query['__conflicting_attrs'] = { + 'resource_type': query.get('resource_type') + } + query.pop('resource_type') + return self._list(_resource_locks.ResourceLock, **query) + + def get_resource_lock(self, resource_lock): + """Show details of a resource lock. + + :param resource_lock: The ID of a resource lock or a + :class:`~openstack.shared_file_system.v2. + resource_locks.ResourceLock` instance. + :returns: Details of the identified resource lock. + :rtype: :class:`~openstack.shared_file_system.v2. + resource_locks.ResourceLock` + """ + return self._get(_resource_locks.ResourceLock, resource_lock) + + def update_resource_lock(self, resource_lock, **attrs): + """Updates details of a single resource lock. + + :param resource_lock: The ID of a resource lock or a + :class:`~openstack.shared_file_system.v2. + resource_locks.ResourceLock` instance. + :param dict attrs: The attributes to update on the resource lock + :returns: the updated resource lock + :rtype: :class:`~openstack.shared_file_system.v2. + resource_locks.ResourceLock` + """ + return self._update( + _resource_locks.ResourceLock, resource_lock, **attrs + ) + + def delete_resource_lock(self, resource_lock, ignore_missing=True): + """Deletes a single resource lock + + :param resource_lock: The ID of a resource lock or a + :class:`~openstack.shared_file_system.v2. + resource_locks.ResourceLock` instance. + :returns: Result of the ``delete`` + :rtype: ``None`` + """ + return self._delete( + _resource_locks.ResourceLock, + resource_lock, + ignore_missing=ignore_missing, + ) + + def create_resource_lock(self, **attrs): + """Locks a resource. + + :param dict attrs: Attributes which will be used to create + a :class:`~openstack.shared_file_system.v2. + resource_locks.ResourceLock`, comprised of the properties + on the ResourceLock class. Available parameters include: + + * ``resource_id``: ID of the resource to be locked. + * ``resource_type``: type of the resource (share, access_rule). + * ``resource_action``: action to be locked (delete, show). + * ``lock_reason``: reason why you're locking the resource + (Optional). + :returns: Details of the lock + :rtype: :class:`~openstack.shared_file_system.v2. + resource_locks.ResourceLock` + """ + + if attrs.get('resource_type'): + # The _create method has a parameter named resource_type, which + # refers to the type of resource to be created, so we need to avoid + # a conflict of parameters we are sending to the method. + attrs['__conflicting_attrs'] = { + 'resource_type': attrs.get('resource_type') + } + attrs.pop('resource_type') + return self._create(_resource_locks.ResourceLock, **attrs) + + def get_quota_class_set(self, quota_class_name): + """Get quota class set. + + :param quota_class_name: The name of the quota class + :returns: A :class:`~openstack.shared_file_system.v2 + .quota_class_set.QuotaClassSet` + """ + return self._get(_quota_class_set.QuotaClassSet, quota_class_name) + + def update_quota_class_set(self, quota_class_name, **attrs): + """Update quota class set. + + :param quota_class_name: The name of the quota class + :param attrs: The attributes to update on the quota class set + :returns: the updated quota class set + :rtype: :class:`~openstack.shared_file_system.v2 + .quota_class_set.QuotaClassSet` + """ + + return self._update( + _quota_class_set.QuotaClassSet, quota_class_name, **attrs + ) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/shared_file_system/v2/availability_zone.py b/openstack/shared_file_system/v2/availability_zone.py new file mode 100644 index 0000000000..d05e188b33 --- /dev/null +++ b/openstack/shared_file_system/v2/availability_zone.py @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class AvailabilityZone(resource.Resource): + resource_key = "availability_zone" + resources_key = "availability_zones" + base_path = "/availability-zones" + + # capabilities + allow_create = False + allow_fetch = False + allow_commit = False + allow_delete = False + allow_list = True + + #: Properties + #: The ID of the availability zone + id = resource.Body("id", type=str) + #: The name of the availability zone. + name = resource.Body("name", type=str) + #: Date and time the availability zone was created at. + created_at = resource.Body("created_at", type=str) + #: Date and time the availability zone was last updated at. + updated_at = resource.Body("updated_at", type=str) diff --git a/openstack/shared_file_system/v2/limit.py b/openstack/shared_file_system/v2/limit.py new file mode 100644 index 0000000000..3991dcb0a4 --- /dev/null +++ b/openstack/shared_file_system/v2/limit.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Limit(resource.Resource): + resources_key = "limits" + base_path = "/limits" + + # capabilities + allow_create = False + allow_fetch = False + allow_commit = False + allow_delete = False + allow_list = True + allow_head = False + + #: Properties + #: The maximum number of replica gigabytes that are allowed + #: in a project. + maxTotalReplicaGigabytes = resource.Body( + "maxTotalReplicaGigabytes", type=int + ) + #: The total maximum number of shares that are allowed in a project. + maxTotalShares = resource.Body("maxTotalShares", type=int) + #: The total maximum number of share gigabytes that are allowed in a + #: project. + maxTotalShareGigabytes = resource.Body("maxTotalShareGigabytes", type=int) + #: The total maximum number of share-networks that are allowed in a + #: project. + maxTotalShareNetworks = resource.Body("maxTotalShareNetworks", type=int) + #: The total maximum number of share snapshots that are allowed in a + #: project. + maxTotalShareSnapshots = resource.Body("maxTotalShareSnapshots", type=int) + #: The maximum number of share replicas that is allowed. + maxTotalShareReplicas = resource.Body("maxTotalShareReplicas", type=int) + #: The total maximum number of snapshot gigabytes that are allowed + #: in a project. + maxTotalSnapshotGigabytes = resource.Body( + "maxTotalSnapshotGigabytes", type=int + ) + #: The total number of replica gigabytes used in a project by + #: share replicas. + totalReplicaGigabytesUsed = resource.Body( + "totalReplicaGigabytesUsed", type=int + ) + #: The total number of gigabytes used in a project by shares. + totalShareGigabytesUsed = resource.Body( + "totalShareGigabytesUsed", type=int + ) + #: The total number of created shares in a project. + totalSharesUsed = resource.Body("totalSharesUsed", type=int) + #: The total number of created share-networks in a project. + totalShareNetworksUsed = resource.Body("totalShareNetworksUsed", type=int) + #: The total number of created share snapshots in a project. + totalShareSnapshotsUsed = resource.Body( + "totalShareSnapshotsUsed", type=int + ) + #: The total number of gigabytes used in a project by snapshots. + totalSnapshotGigabytesUsed = resource.Body( + "totalSnapshotGigabytesUsed", type=int + ) + #: The total number of created share replicas in a project. + totalShareReplicasUsed = resource.Body("totalShareReplicasUsed", type=int) diff --git a/openstack/shared_file_system/v2/quota_class_set.py b/openstack/shared_file_system/v2/quota_class_set.py new file mode 100644 index 0000000000..325f00b916 --- /dev/null +++ b/openstack/shared_file_system/v2/quota_class_set.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class QuotaClassSet(resource.Resource): + base_path = '/quota-class-sets' + resource_key = 'quota_class_set' + + allow_create = False + allow_fetch = True + allow_commit = True + allow_delete = False + allow_list = False + allow_head = False + + _query_mapping = resource.QueryParameters("quota_class_name", "project_id") + #: Properties + #: A quota_class_set id. + id = resource.Body("id", type=str) + #: The maximum number of share groups. + share_groups = resource.Body("share_groups", type=int) + #: The maximum number of share group snapshots. + share_group_snapshots = resource.Body("share_group_snapshots", type=int) + #: The total maximum number of shares that are allowed in a project. + snapshots = resource.Body("snapshots", type=int) + #: The maximum number of snapshot gigabytes that are allowed in a project. + snapshot_gigabytes = resource.Body("snapshot_gigabytes", type=int) + #: The total maximum number of snapshot gigabytes that are allowed in a + #: project. + shares = resource.Body("shares", type=int) + #: The maximum number of share-networks that are allowed in a project. + share_networks = resource.Body("share_networks", type=int) + #: The maximum number of share replicas that is allowed. + share_replicas = resource.Body("share_replicas", type=int) + #: The total maximum number of share gigabytes that are allowed in a + #: project. You cannot request a share that exceeds the allowed gigabytes + #: quota. + gigabytes = resource.Body("gigabytes", type=int) + #: The maximum number of replica gigabytes that are allowed in a project. + #: You cannot create a share, share replica, manage a share or extend a + #: share if it is going to exceed the allowed replica gigabytes quota. + replica_gigabytes = resource.Body("replica_gigabytes", type=int) + #: The number of gigabytes per share allowed in a project. + per_share_gigabytes = resource.Body("per_share_gigabytes", type=int) + #: The total maximum number of share backups that are allowed in a project. + backups = resource.Body("backups", type=int) + #: The total maximum number of backup gigabytes that are allowed in a + #: project. + backup_gigabytes = resource.Body("backup_gigabytes", type=int) diff --git a/openstack/shared_file_system/v2/resource_locks.py b/openstack/shared_file_system/v2/resource_locks.py new file mode 100644 index 0000000000..b7939eb681 --- /dev/null +++ b/openstack/shared_file_system/v2/resource_locks.py @@ -0,0 +1,73 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ResourceLock(resource.Resource): + resource_key = "resource_lock" + resources_key = "resource_locks" + base_path = "/resource-locks" + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_head = False + + _query_mapping = resource.QueryParameters( + "project_id", + "created_since", + "created_before", + "limit", + "offset", + "id", + "resource_id", + "resource_type", + "resource_action", + "user_id", + "lock_context", + "lock_reason", + "lock_reason~", + "sort_key", + "sort_dir", + "with_count", + "all_projects", + ) + # The resource was introduced in this microversion, so it is the minimum + # version to use it. Openstacksdk currently doesn't allow to set + # minimum microversions. + _max_microversion = '2.81' + + #: Properties + #: The date and time stamp when the resource was created within the + #: services's database. + created_at = resource.Body("created_at", type=str) + #: The date and time stamp when the resource was last modified within the + #: services's database. + updated_at = resource.Body("updated_at", type=str) + #: The ID of the user that owns the lock + user_id = resource.Body("user_id", type=str) + #: The ID of the project that owns the lock. + project_id = resource.Body("project_id", type=str) + #: The type of the resource that is locked, i.e.: share, access rule. + resource_type = resource.Body("resource_type", type=str) + #: The UUID of the resource that is locked. + resource_id = resource.Body("resource_id", type=str) + #: What action is currently locked, i.e.: deletion, visibility of fields. + resource_action = resource.Body("resource_action", type=str) + #: The reason specified while the lock was being placed. + lock_reason = resource.Body("lock_reason", type=str) + #: The context that placed the lock (user, admin or service). + lock_context = resource.Body("lock_context", type=str) diff --git a/openstack/shared_file_system/v2/share.py b/openstack/shared_file_system/v2/share.py new file mode 100644 index 0000000000..c395144fa0 --- /dev/null +++ b/openstack/shared_file_system/v2/share.py @@ -0,0 +1,234 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.common import metadata +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class Share(resource.Resource, metadata.MetadataMixin): + resource_key = "share" + resources_key = "shares" + base_path = "/shares" + + _query_mapping = resource.QueryParameters( + "project_id", + "name", + "status", + "share_server_id", + "metadata", + "share_type_id", + "snapshot_id", + "host", + "share_network_id", + "is_public", + "share_group_id", + "export_location_id", + "export_location_path", + "limit", + "offset", + "sort_key", + "sort_dir", + all_projects="all_tenants", + ) + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_list = True + allow_head = False + allow_delete = True + + #: Properties + #: The share instance access rules status. A valid value is active, + #: error, or syncing. + access_rules_status = resource.Body("access_rules_status", type=str) + #: The availability zone. + availability_zone = resource.Body("availability_zone", type=str) + #: The date and time stamp when the resource was created within the + #: services's database. + created_at = resource.Body("created_at", type=str) + #: The user defined description of the resource. + description = resource.Body("description", type=str) + #: The share host name. + host = resource.Body("host", type=str) + #: The level of visibility for the share. + is_public = resource.Body("is_public", type=bool) + #: Whether or not this share supports snapshots that can be + #: cloned into new shares. + is_creating_new_share_from_snapshot_supported = resource.Body( + "create_share_from_snapshot_support", type=bool + ) + #: Whether the share's snapshots can be mounted directly and access + #: controlled independently or not. + is_mounting_snapshot_supported = resource.Body( + "mount_snapshot_support", type=bool + ) + #: Whether the share can be reverted to its latest snapshot or not. + is_reverting_to_snapshot_supported = resource.Body( + "revert_to_snapshot_support", type=bool + ) + #: An extra specification that filters back ends by whether the share + #: supports snapshots or not. + is_snapshot_supported = resource.Body("snapshot_support", type=bool) + #: Indicates whether the share has replicas or not. + is_replicated = resource.Body("has_replicas", type=bool) + #: One or more metadata key and value pairs as a dictionary of strings. + metadata = resource.Body("metadata", type=dict) + #: The progress of the share creation. + progress = resource.Body("progress", type=str) + #: The ID of the project that owns the resource. + project_id = resource.Body("project_id", type=str) + #: The share replication type. Valid values are none, readable, + #: writable and dr. + replication_type = resource.Body("replication_type", type=str) + #: The UUID of the share group that this shares belongs to. + share_group_id = resource.Body("share_group_id", type=str) + #: The share network ID. + share_network_id = resource.Body("share_network_id", type=str) + #: The Shared File Systems protocol. A valid value is NFS, + #: CIFS, GlusterFS, HDFS, CephFS, MAPRFS + share_protocol = resource.Body("share_proto", type=str) + #: The UUID of the share server. + share_server_id = resource.Body("share_server_id", type=str) + #: The UUID of the share type. In minor versions, this parameter is a + #: share type name, as a string. + share_type = resource.Body("share_type", type=str) + #: Name of the share type. + share_type_name = resource.Body("share_type_name", type=str) + #: The share size, in GiBs. + size = resource.Body("size", type=int) + #: The UUID of the snapshot that was used to create the + #: share. + snapshot_id = resource.Body("snapshot_id", type=str) + #: The ID of the group snapshot instance that was used to create + #: this share. + source_share_group_snapshot_member_id = resource.Body( + "source_share_group_snapshot_member_id", type=str + ) + #: The share status + status = resource.Body("status", type=str) + #: For the share migration, the migration task state. + task_state = resource.Body("task_state", type=str) + #: ID of the user that the share was created by. + user_id = resource.Body("user_id", type=str) + #: Display name for updating name + display_name = resource.Body("display_name", type=str) + #: Display description for updating description + display_description = resource.Body("display_description", type=str) + + def _action(self, session, body, microversion=None): + """Perform share instance actions given the message body""" + url = utils.urljoin(self.base_path, self.id, 'action') + headers = {'Accept': ''} + + if microversion is None: + microversion = self._get_microversion(session) + + response = session.post( + url, json=body, headers=headers, microversion=microversion + ) + + exceptions.raise_from_response(response) + return response + + def extend_share(self, session, new_size, force=False): + """Extend the share size. + + :param float new_size: The new size of the share + in GiB. + :param bool force: Whether or not to use force, bypassing + the scheduler. Requires admin privileges. Defaults to False. + :returns: The result of the action. + :rtype: ``None`` + """ + + extend_body = {"new_size": new_size} + + if force is True: + extend_body['force'] = True + + body = {"extend": extend_body} + self._action(session, body) + + def shrink_share(self, session, new_size): + """Shrink the share size. + + :param float new_size: The new size of the share + in GiB. + :returns: ``None`` + """ + + body = {"shrink": {'new_size': new_size}} + self._action(session, body) + + def revert_to_snapshot(self, session, snapshot_id): + """Revert the share to the given snapshot. + + :param str snapshot_id: The id of the snapshot to revert to. + :returns: ``None`` + """ + body = {"revert": {"snapshot_id": snapshot_id}} + self._action(session, body) + + def manage(self, session, protocol, export_path, service_host, **params): + """Manage a share. + + :param session: A session object used for sending request. + :param str protocol: The shared file systems protocol of this share. + :param str export_path: The export path formatted according to the + protocol. + :param str service_host: The manage-share service host. + :param kwargs params: Optional parameters to be sent. Available + parameters include: + + * name: The user defined name of the resource. + * share_type: The name or ID of the share type to be used to create + the resource. + * driver_options: A set of one or more key and value pairs, as a + dictionary of strings, that describe driver options. + * is_public: The level of visibility for the share. + * description: The user defiend description of the resource. + * share_server_id: The UUID of the share server. + + :returns: The share that was managed. + """ + + path = 'manage' + attrs = { + 'share': { + 'protocol': protocol, + 'export_path': export_path, + 'service_host': service_host, + } + } + + attrs['share'].update(params) + + url = utils.urljoin(self.base_path, path) + resp = session.post(url, json=attrs) + + self._translate_response(resp) + return self + + def unmanage(self, session): + """Unmanage a share. + + :param session: A session object used for sending request. + :returns: ``None`` + """ + + body = {'unmanage': None} + + self._action(session, body) diff --git a/openstack/shared_file_system/v2/share_access_rule.py b/openstack/shared_file_system/v2/share_access_rule.py new file mode 100644 index 0000000000..64fdbb9572 --- /dev/null +++ b/openstack/shared_file_system/v2/share_access_rule.py @@ -0,0 +1,102 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +from openstack import resource +from openstack import utils + + +class ShareAccessRule(resource.Resource): + resource_key = "access" + resources_key = "access_list" + base_path = "/share-access-rules" + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = False + allow_delete = True + allow_list = True + allow_head = False + + _query_mapping = resource.QueryParameters("share_id") + + # Restricted access rules became available in 2.82 + _max_microversion = '2.82' + + #: Properties + #: The access credential of the entity granted share access. + access_key = resource.Body("access_key", type=str) + #: The access level to the share. + access_level = resource.Body("access_level", type=str) + #: The object of the access rule. + access_list = resource.Body("access_list", type=str) + #: The value that defines the access. + access_to = resource.Body("access_to", type=str) + #: The access rule type. + access_type = resource.Body("access_type", type=str) + #: The date and time stamp when the resource was created within the + #: services's database. + created_at = resource.Body("created_at", type=str) + #: One or more access rule metadata key and value pairs as a dictionary + #: of strings. + metadata = resource.Body("metadata", type=dict) + #: The UUID of the share to which you are granted or denied access. + share_id = resource.Body("share_id", type=str) + #: The state of the access rule. + state = resource.Body("state", type=str) + #: The date and time stamp when the resource was last updated within + #: the services's database. + updated_at = resource.Body("updated_at", type=str) + #: Whether the visibility of some sensitive fields is restricted or not + lock_visibility = resource.Body("lock_visibility", type=bool) + #: Whether the deletion of the access rule should be restricted or not + lock_deletion = resource.Body("lock_deletion", type=bool) + #: Reason for placing the loc + lock_reason = resource.Body("lock_reason", type=bool) + + def _action(self, session, body, url, microversion=None): + headers = {'Accept': ''} + + if microversion is None: + microversion = self._get_microversion(session) + + return session.post( + url, json=body, headers=headers, microversion=microversion + ) + + def create(self, session, *args, **kwargs): + return super().create( + session, + *args, + resource_request_key='allow_access', + resource_response_key='access', + **kwargs, + ) + + def delete( + self, + session, + error_message=None, + *, + microversion=None, + unrestrict=False, + **kwargs, + ): + body: dict[str, ty.Any] = {'deny_access': {'access_id': self.id}} + if unrestrict: + body['deny_access']['unrestrict'] = True + url = utils.urljoin("/shares", self.share_id, "action") + response = self._action(session, body, url) + self._translate_response(response) + return response diff --git a/openstack/shared_file_system/v2/share_export_locations.py b/openstack/shared_file_system/v2/share_export_locations.py new file mode 100644 index 0000000000..71eb625c7b --- /dev/null +++ b/openstack/shared_file_system/v2/share_export_locations.py @@ -0,0 +1,46 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import resource + + +class ShareExportLocation(resource.Resource): + resource_key = "export_location" + resources_key = "export_locations" + base_path = "/shares/%(share_id)s/export_locations" + + # capabilities + allow_list = True + allow_fetch = True + allow_create = False + allow_commit = False + allow_delete = False + allow_head = False + + _max_microversion = '2.47' + + #: Properties + # The share ID, part of the URI for export locations + share_id = resource.URI("share_id", type=str) + #: The path of the export location. + path = resource.Body("path", type=str) + #: Indicate if export location is preferred. + is_preferred = resource.Body("preferred", type=bool) + #: The share instance ID of the export location. + share_instance_id = resource.Body("share_instance_id", type=str) + #: Indicate if export location is admin only. + is_admin = resource.Body("is_admin_only", type=bool) + #: Indicate when the export location is created at + created_at = resource.Body("created_at", type=str) + #: Indicate when the export location is updated at + updated_at = resource.Body("updated_at", type=str) diff --git a/openstack/shared_file_system/v2/share_group.py b/openstack/shared_file_system/v2/share_group.py new file mode 100644 index 0000000000..78a5df6a12 --- /dev/null +++ b/openstack/shared_file_system/v2/share_group.py @@ -0,0 +1,59 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ShareGroup(resource.Resource): + resource_key = "share_group" + resources_key = "share_groups" + base_path = "/share-groups" + + _query_mapping = resource.QueryParameters("share_group_id") + + # The share group API is experimental until 2.55. + _max_microversion = "2.55" + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_head = False + + #: Properties + #: The availability zone ID that the share group exists within. + availability_zone = resource.Body("availability_zone", type=str) + #: The consistency snapshot support. + consistent_snapshot_support = resource.Body( + "consistent_snapshot_support", type=str + ) + #: The date and time stamp when the resource was created within the + #: services's database. + created_at = resource.Body("created_at", type=str) + #: The user defined description of the resource. + description = resource.Body("description", type=str) + #: The ID of the project that owns the resource. + project_id = resource.Body("project_id", type=str) + #: The share group snapshot ID. + share_group_snapshot_id = resource.Body( + "share_group_snapshot_id", type=str + ) + #: The share group type ID. + share_group_type_id = resource.Body("share_group_type_id", type=str) + #: The share network ID where the resource is exported to. + share_network_id = resource.Body("share_network_id", type=str) + #: A list of share type IDs. + share_types = resource.Body("share_types", type=list) + #: The share status + status = resource.Body("status", type=str) diff --git a/openstack/shared_file_system/v2/share_group_snapshot.py b/openstack/shared_file_system/v2/share_group_snapshot.py new file mode 100644 index 0000000000..504efc38ec --- /dev/null +++ b/openstack/shared_file_system/v2/share_group_snapshot.py @@ -0,0 +1,82 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource +from openstack import utils + + +class ShareGroupSnapshot(resource.Resource): + resource_key = "share_group_snapshot" + resources_key = "share_group_snapshots" + base_path = "/share-group-snapshots" + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_head = False + + _query_mapping = resource.QueryParameters( + 'project_id', + 'all_tenants', + 'name', + 'description', + 'status', + 'share_group_id', + 'limit', + 'offset', + 'sort_key', + 'sort_dir', + ) + + #: Properties + #: The ID of the project that owns the resource. + project_id = resource.Body("project_id", type=str) + #: Filters by a share group snapshot status. A valid value is creating, + #: error, available, deleting, error_deleting. + status = resource.Body("status", type=str) + #: The UUID of the share group. + share_group_id = resource.Body("share_group_id", type=str) + #: The user defined description of the resource. + description = resource.Body("description", type=str) + #: The date and time stamp when the resource was created. + created_at = resource.Body("created_at", type=str) + #: The share group snapshot members. + members = resource.Body("members", type=str) + #: The snapshot size, in GiBs. + size = resource.Body("size", type=int) + #: NFS, CIFS, GlusterFS, HDFS, CephFS or MAPRFS. + share_protocol = resource.Body("share_proto", type=str) + + def _action(self, session, body, microversion=None): + """Perform ShareGroupSnapshot actions given the message body.""" + # NOTE: This is using ShareGroupSnapshot.base_path instead of + # self.base_path as ShareGroupSnapshot instances can be acted on, + # but the URL used is sans any additional /detail/ part. + url = utils.urljoin(self.base_path, self.id, 'action') + headers = {'Accept': ''} + microversion = microversion or self._get_microversion(session) + extra_attrs = {'microversion': microversion} + session.post(url, json=body, headers=headers, **extra_attrs) + + def reset_status(self, session, status): + body = {"reset_status": {"status": status}} + self._action(session, body) + + def get_members(self, session, microversion=None): + url = utils.urljoin(self.base_path, self.id, 'members') + microversion = microversion or self._get_microversion(session) + headers = {'Accept': ''} + response = session.get(url, headers=headers, microversion=microversion) + return response.json() diff --git a/openstack/shared_file_system/v2/share_instance.py b/openstack/shared_file_system/v2/share_instance.py new file mode 100644 index 0000000000..37e378caba --- /dev/null +++ b/openstack/shared_file_system/v2/share_instance.py @@ -0,0 +1,82 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack import resource +from openstack import utils + + +class ShareInstance(resource.Resource): + resource_key = "share_instance" + resources_key = "share_instances" + base_path = "/share_instances" + + # capabilities + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = True + allow_head = False + + #: Properties + #: The share instance access rules status. A valid value is active, + #: error, or syncing. + access_rules_status = resource.Body("access_rules_status", type=str) + #: The name of the availability zone the share exists within. + availability_zone = resource.Body("availability_zone", type=str) + #: If the share instance has its cast_rules_to_readonly attribute + #: set to True, all existing access rules be cast to read/only. + cast_rules_to_readonly = resource.Body("cast_rules_to_readonly", type=bool) + #: The date and time stamp when the resource was created within the + #: services's database. + created_at = resource.Body("created_at", type=str) + #: The host name of the service back end that the resource is + #: contained within. + host = resource.Body("host", type=str) + #: The progress of the share creation. + progress = resource.Body("progress", type=str) + #: The share replica state. Has set value only when replication is used. + #: List of possible values: active, in_sync, out_of_sync, error + replica_state = resource.Body("replica_state", type=str) + #: The UUID of the share to which the share instance belongs to. + share_id = resource.Body("share_id", type=str) + #: The share network ID where the resource is exported to. + share_network_id = resource.Body("share_network_id", type=str) + #: The UUID of the share server. + share_server_id = resource.Body("share_server_id", type=str) + #: The share or share instance status. + status = resource.Body("status", type=str) + + def _action(self, session, body, microversion=None): + """Perform share instance actions given the message body""" + url = utils.urljoin(self.base_path, self.id, 'action') + headers = {'Accept': ''} + extra_attrs = {} + if microversion: + # Set microversion override + extra_attrs['microversion'] = microversion + else: + extra_attrs['microversion'] = self._get_microversion(session) + response = session.post(url, json=body, headers=headers, **extra_attrs) + exceptions.raise_from_response(response) + return response + + def reset_status(self, session, reset_status): + """Reset share instance to given status""" + body = {"reset_status": {"status": reset_status}} + self._action(session, body) + + def force_delete(self, session): + """Force delete share instance""" + body = {"force_delete": None} + self._action(session, body) diff --git a/openstack/shared_file_system/v2/share_network.py b/openstack/shared_file_system/v2/share_network.py new file mode 100644 index 0000000000..91463b0f38 --- /dev/null +++ b/openstack/shared_file_system/v2/share_network.py @@ -0,0 +1,65 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource +from openstack.shared_file_system.v2 import share_network_subnet + + +class ShareNetwork(resource.Resource): + resource_key = "share_network" + resources_key = "share_networks" + base_path = "/share-networks" + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_head = False + + _query_mapping = resource.QueryParameters( + "project_id", + "name", + "description", + "created_since", + "created_before", + "security_service_id", + "limit", + "offset", + all_projects="all_tenants", + ) + + #: Properties + #: The date and time stamp when the resource was created within the + #: services's database. + created_at = resource.Body("created_at") + #: The user defined description of the resource. + description = resource.Body("description", type=str) + #: The ID of the project that owns the resource. + project_id = resource.Body("project_id", type=str) + #: A list of share network subnets that pertain to the related share + #: network. + share_network_subnets = resource.Body( + "share_network_subnets", + type=list, + list_type=share_network_subnet.ShareNetworkSubnet, + ) + #: The UUID of a neutron network when setting up or + #: updating a share network subnet with neutron. + neutron_net_id = resource.Body("neutron_net_id", type=str) + #: The UUID of the neutron subnet when setting up or updating + #: a share network subnet with neutron. + neutron_subnet_id = resource.Body("neutron_subnet_id", type=str) + #: The date and time stamp when the resource was last updated within + #: the services's database. + updated_at = resource.Body("updated_at", type=str) diff --git a/openstack/shared_file_system/v2/share_network_subnet.py b/openstack/shared_file_system/v2/share_network_subnet.py new file mode 100644 index 0000000000..06e843a77f --- /dev/null +++ b/openstack/shared_file_system/v2/share_network_subnet.py @@ -0,0 +1,67 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ShareNetworkSubnet(resource.Resource): + resource_key = "share_network_subnet" + resources_key = "share_network_subnets" + base_path = "/share-networks/%(share_network_id)s/subnets" + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = False + allow_delete = True + allow_list = True + + #: Properties + #: The share nerwork ID, part of the URI for share network subnets. + share_network_id = resource.URI("share_network_id", type=str) + + #: The name of the availability zone that the share network + #: subnet belongs to. + availability_zone = resource.Body("availability_zone", type=str) + #: The IP block from which to allocate the network, in CIDR notation. + cidr = resource.Body("cidr", type=str) + #: Date and time the share network subnet was created at. + created_at = resource.Body("created_at") + #: The gateway of a share network subnet. + gateway = resource.Body("gateway", type=str) + #: The IP version of the network. + ip_version = resource.Body("ip_version", type=int) + #: The MTU of a share network subnet. + mtu = resource.Body("mtu", type=str) + #: The network type. A valid value is VLAN, VXLAN, GRE, or flat + network_type = resource.Body("network_type", type=str) + #: The name of the neutron network. + neutron_net_id = resource.Body("neutron_net_id", type=str) + #: The ID of the neitron subnet. + neutron_subnet_id = resource.Body("neutron_subnet_id", type=str) + #: The segmentation ID. + segmentation_id = resource.Body('segmentation_id', type=int) + #: The name of the share network that the share network subnet belongs to. + share_network_name = resource.Body("share_network_name", type=str) + #: Date and time the share network subnet was last updated at. + updated_at = resource.Body("updated_at", type=str) + + def create( + self, + session, + *args, + resource_request_key='share-network-subnet', + **kwargs, + ): + return super().create( + session, resource_request_key=resource_request_key, *args, **kwargs + ) diff --git a/openstack/shared_file_system/v2/share_snapshot.py b/openstack/shared_file_system/v2/share_snapshot.py new file mode 100644 index 0000000000..18d8fea3fa --- /dev/null +++ b/openstack/shared_file_system/v2/share_snapshot.py @@ -0,0 +1,55 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ShareSnapshot(resource.Resource): + resource_key = "snapshot" + resources_key = "snapshots" + base_path = "/snapshots" + + # capabilities + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_head = False + + _query_mapping = resource.QueryParameters("snapshot_id") + + #: Properties + #: The date and time stamp when the resource was + #: created within the services's database. + created_at = resource.Body("created_at") + #: The user defined description of the resource. + description = resource.Body("description", type=str) + #: The user defined name of the resource. + display_name = resource.Body("display_name", type=str) + #: The user defined description of the resource + display_description = resource.Body("display_description", type=str) + #: ID of the project that the snapshot belongs to. + project_id = resource.Body("project_id", type=str) + #: The UUID of the source share that was used to + #: create the snapshot. + share_id = resource.Body("share_id", type=str) + #: The file system protocol of a share snapshot + share_proto = resource.Body("share_proto", type=str) + #: The snapshot's source share's size, in GiBs. + share_size = resource.Body("share_size", type=int) + #: The snapshot size, in GiBs. + size = resource.Body("size", type=int) + #: The snapshot status + status = resource.Body("status", type=str) + #: ID of the user that the snapshot was created by. + user_id = resource.Body("user_id", type=str) diff --git a/openstack/shared_file_system/v2/share_snapshot_instance.py b/openstack/shared_file_system/v2/share_snapshot_instance.py new file mode 100644 index 0000000000..13f53d6b73 --- /dev/null +++ b/openstack/shared_file_system/v2/share_snapshot_instance.py @@ -0,0 +1,47 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class ShareSnapshotInstance(resource.Resource): + resource_key = "snapshot_instance" + resources_key = "snapshot_instances" + base_path = "/snapshot-instances" + + # capabilities + allow_create = False + allow_fetch = True + allow_commit = False + allow_delete = False + allow_list = True + allow_head = False + + #: Properties + #: The date and time stamp when the resource was created within the + #: services's database. + created_at = resource.Body("created_at", type=str) + #: The progress of the snapshot creation. + progress = resource.Body("progress", type=str) + #: Provider location of the snapshot on the backend. + provider_location = resource.Body("provider_location", type=str) + #: The UUID of the share. + share_id = resource.Body("share_id", type=str) + #: The UUID of the share instance. + share_instance_id = resource.Body("share_instance_id", type=str) + #: The UUID of the snapshot. + snapshot_id = resource.Body("snapshot_id", type=str) + #: The snapshot instance status. + status = resource.Body("status", type=str) + #: The date and time stamp when the resource was updated within the + #: services's database. + updated_at = resource.Body("updated_at", type=str) diff --git a/openstack/shared_file_system/v2/storage_pool.py b/openstack/shared_file_system/v2/storage_pool.py new file mode 100644 index 0000000000..49ebe399d6 --- /dev/null +++ b/openstack/shared_file_system/v2/storage_pool.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class StoragePool(resource.Resource): + resources_key = "pools" + base_path = "/scheduler-stats/pools" + + # capabilities + allow_create = False + allow_fetch = False + allow_commit = False + allow_delete = False + allow_list = True + allow_head = False + + _query_mapping = resource.QueryParameters( + 'pool', + 'backend', + 'host', + 'capabilities', + 'share_type', + ) + + #: Properties + #: The name of the back end. + backend = resource.Body("backend", type=str) + #: The host of the back end. + host = resource.Body("host", type=str) + #: The pool for the back end + pool = resource.Body("pool", type=str) + #: The back end capabilities. + capabilities = resource.Body("capabilities", type=dict) diff --git a/openstack/shared_file_system/v2/user_message.py b/openstack/shared_file_system/v2/user_message.py new file mode 100644 index 0000000000..01b9da714a --- /dev/null +++ b/openstack/shared_file_system/v2/user_message.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class UserMessage(resource.Resource): + resource_key = "message" + resources_key = "messages" + base_path = "/messages" + + # capabilities + allow_fetch = True + allow_commit = False + allow_delete = True + allow_list = True + allow_head = False + + _query_mapping = resource.QueryParameters("message_id") + + _max_microversion = '2.37' + + #: Properties + #: The action ID of the user message + action_id = resource.Body("action_id", type=str) + #: Indicate when the user message was created + created_at = resource.Body("created_at", type=str) + #: The detail ID of the user message + detail_id = resource.Body("detail_id", type=str) + #: Indicate when the share message expires + expires_at = resource.Body("expires_at", type=str) + #: The message level of the user message + message_level = resource.Body("message_level", type=str) + #: The project ID of the user message + project_id = resource.Body("project_id", type=str) + #: The request ID of the user message + request_id = resource.Body("request_id", type=str) + #: The resource ID of the user message + resource_id = resource.Body("resource_id", type=str) + #: The resource type of the user message + resource_type = resource.Body("resource_type", type=str) + #: The message for the user message + user_message = resource.Body("user_message", type=str) diff --git a/openstack/telemetry/alarm/alarm_service.py b/openstack/telemetry/alarm/alarm_service.py deleted file mode 100644 index a23cc3a64c..0000000000 --- a/openstack/telemetry/alarm/alarm_service.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import service_filter - - -class AlarmService(service_filter.ServiceFilter): - """The alarm service.""" - - valid_versions = [service_filter.ValidVersion('v2')] - - def __init__(self, version=None): - """Create an alarm service.""" - super(AlarmService, self).__init__(service_type='alarming', - version=version) diff --git a/openstack/telemetry/alarm/v2/_proxy.py b/openstack/telemetry/alarm/v2/_proxy.py deleted file mode 100644 index abadcd2681..0000000000 --- a/openstack/telemetry/alarm/v2/_proxy.py +++ /dev/null @@ -1,129 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import proxy -from openstack.telemetry.alarm.v2 import alarm as _alarm -from openstack.telemetry.alarm.v2 import alarm_change as _alarm_change - - -class Proxy(proxy.BaseProxy): - """.. caution:: This API is a work in progress and is subject to change.""" - - def create_alarm(self, **attrs): - """Create a new alarm from attributes - - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.telemetry.v2.alarm.Alarm`, - comprised of the properties on the Alarm class. - - :returns: The results of alarm creation - :rtype: :class:`~openstack.telemetry.v2.alarm.Alarm` - """ - return self._create(_alarm.Alarm, **attrs) - - def delete_alarm(self, alarm, ignore_missing=True): - """Delete an alarm - - :param alarm: The value can be either the ID of an alarm or a - :class:`~openstack.telemetry.v2.alarm.Alarm` instance. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the alarm does not exist. - When set to ``True``, no exception will be set when - attempting to delete a nonexistent alarm. - - :returns: ``None`` - """ - self._delete(_alarm.Alarm, alarm, ignore_missing=ignore_missing) - - def find_alarm(self, name_or_id, ignore_missing=True): - """Find a single alarm - - :param name_or_id: The name or ID of a alarm. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.telemetry.v2.alarm.Alarm` or None - """ - return self._find(_alarm.Alarm, name_or_id, - ignore_missing=ignore_missing) - - def get_alarm(self, alarm): - """Get a single alarm - - :param alarm: The value can be the ID of an alarm or a - :class:`~openstack.telemetry.v2.alarm.Alarm` instance. - - :returns: One :class:`~openstack.telemetry.v2.alarm.Alarm` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. - """ - return self._get(_alarm.Alarm, alarm) - - def alarms(self, **query): - """Return a generator of alarms - - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. - - :returns: A generator of alarm objects - :rtype: :class:`~openstack.telemetry.v2.alarm.Alarm` - """ - # TODO(Qiming): Check the alarm service API docs/code to verify if - # the parameters need a change. - return self._list(_alarm.Alarm, paginated=False, **query) - - def update_alarm(self, alarm, **attrs): - """Update a alarm - - :param alarm: Either the id of a alarm or a - :class:`~openstack.telemetry.v2.alarm.Alarm` instance. - :attrs kwargs: The attributes to update on the alarm represented - by ``value``. - - :returns: The updated alarm - :rtype: :class:`~openstack.telemetry.v2.alarm.Alarm` - """ - return self._update(_alarm.Alarm, alarm, **attrs) - - def find_alarm_change(self, name_or_id, ignore_missing=True): - """Find a single alarm change - - :param name_or_id: The name or ID of a alarm change. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.telemetry.v2.alarm_change.AlarmChange` - or None - """ - return self._find(_alarm_change.AlarmChange, name_or_id, - ignore_missing=ignore_missing) - - def alarm_changes(self, alarm, **query): - """Return a generator of alarm changes - - :param alarm: Alarm resource or id for alarm. - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. - - :returns: A generator of alarm change objects - :rtype: :class:`~openstack.telemetry.v2.alarm_change.AlarmChange` - """ - # TODO(Qiming): Check the alarm service API docs/code to verify if - # the parameters need a change. - alarm_id = _alarm.Alarm.from_id(alarm).id - return self._list(_alarm_change.AlarmChange, paginated=False, - path_args={'alarm_id': alarm_id}, **query) diff --git a/openstack/telemetry/alarm/v2/alarm.py b/openstack/telemetry/alarm/v2/alarm.py deleted file mode 100644 index 095d44aba7..0000000000 --- a/openstack/telemetry/alarm/v2/alarm.py +++ /dev/null @@ -1,91 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import resource -from openstack.telemetry.alarm import alarm_service -from openstack import utils - - -class Alarm(resource.Resource): - """.. caution:: This API is a work in progress and is subject to change.""" - id_attribute = 'alarm_id' - base_path = '/alarms' - service = alarm_service.AlarmService() - - # Supported Operations - allow_create = True - allow_retrieve = True - allow_update = True - allow_delete = True - allow_list = True - - # Properties - #: The actions to do when alarm state changes to alarm - alarm_actions = resource.prop('alarm_actions') - #: The ID of the alarm - alarm_id = resource.prop('alarm_id') - # TODO(briancurtin): undocumented - combination_rule = resource.prop('combination_rule') - #: The description of the alarm - description = resource.prop('description') - #: ``True`` if this alarm is enabled. *Type: bool* - is_enabled = resource.prop('enabled', type=bool) - #: The actions to do when alarm state changes to insufficient data - insufficient_data_actions = resource.prop('insufficient_data_actions') - #: The actions should be re-triggered on each evaluation cycle. - #: *Type: bool* - is_repeat_actions = resource.prop('repeat_actions', type=bool) - #: The name for the alarm - name = resource.prop('name') - #: The actions to do when alarm state change to ok - ok_actions = resource.prop('ok_actions') - #: The ID of the project that owns the alarm - project_id = resource.prop('project_id') - #: The severity of the alarm - severity = resource.prop('severity') - #: The state off the alarm - state = resource.prop('state') - #: The timestamp of the last alarm state change. - #: *Type: ISO 8601 formatted string* - state_changed_at = resource.prop('state_timestamp') - # TODO(briancurtin): undocumented - threshold_rule = resource.prop('threshold_rule', type=dict) - #: Describe time constraints for the alarm - time_constraints = resource.prop('time_constraints') - #: Explicit type specifier to select which rule to follow - type = resource.prop('type') - #: The timestamp of the last alarm definition update. - #: *Type: ISO 8601 formatted string* - updated_at = resource.prop('timestamp') - #: The ID of the user who created the alarm - user_id = resource.prop('user_id') - - def change_state(self, session, next_state): - """Set the state of an alarm. - - :param next_state: The valid values can be one of: ``ok``, ``alarm``, - ``insufficient data``. - """ - url = utils.urljoin(self.base_path, self.id, 'state') - resp = session.put(url, endpoint_filter=self.service, json=next_state) - return resp.json() - - def check_state(self, session): - """Retrieve the current state of an alarm from the service. - - The properties of the alarm are not modified. - """ - url = utils.urljoin(self.base_path, self.id, 'state') - resp = session.get(url, endpoint_filter=self.service) - resp = resp.json() - current_state = resp.replace('\"', '') - return current_state diff --git a/openstack/telemetry/alarm/v2/alarm_change.py b/openstack/telemetry/alarm/v2/alarm_change.py deleted file mode 100644 index ecc3e42d05..0000000000 --- a/openstack/telemetry/alarm/v2/alarm_change.py +++ /dev/null @@ -1,52 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import resource -from openstack.telemetry.alarm import alarm_service - - -class AlarmChange(resource.Resource): - """.. caution:: This API is a work in progress and is subject to change.""" - id_attribute = 'event_id' - resource_key = 'alarm_change' - base_path = '/alarms/%(alarm_id)s/history' - service = alarm_service.AlarmService() - - # Supported Operations - allow_list = True - - # Properties - #: The ID of the alarm - alarm_id = resource.prop('alarm_id') - #: Data describing the change - detail = resource.prop('detail') - #: The ID of the change event - event_id = resource.prop('event_id') - #: The project ID on behalf of which the change is being made - on_behalf_of_id = resource.prop('on_behalf_of') - #: The project ID of the initiating identity - project_id = resource.prop('project_id') - #: The time/date of the alarm change. - #: *Type: ISO 8601 formatted string* - triggered_at = resource.prop('timestamp') - #: The type of change - type = resource.prop('type') - #: The user ID of the initiating identity - user_id = resource.prop('user_id') - - @classmethod - def list(cls, session, limit=None, marker=None, path_args=None, - paginated=False, **params): - url = cls._get_url(path_args) - resp = session.get(url, endpoint_filter=cls.service, params=params) - for item in resp.json(): - yield cls.existing(**item) diff --git a/openstack/telemetry/telemetry_service.py b/openstack/telemetry/telemetry_service.py deleted file mode 100644 index 6b0a9cc92b..0000000000 --- a/openstack/telemetry/telemetry_service.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import service_filter - - -class TelemetryService(service_filter.ServiceFilter): - """The telemetry service.""" - - valid_versions = [service_filter.ValidVersion('v2')] - - def __init__(self, version=None): - """Create a telemetry service.""" - super(TelemetryService, self).__init__(service_type='metering', - version=version) diff --git a/openstack/telemetry/v2/_proxy.py b/openstack/telemetry/v2/_proxy.py deleted file mode 100644 index b50d37f8f5..0000000000 --- a/openstack/telemetry/v2/_proxy.py +++ /dev/null @@ -1,179 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import proxy2 -from openstack.telemetry.v2 import capability -from openstack.telemetry.v2 import meter as _meter -from openstack.telemetry.v2 import resource as _resource -from openstack.telemetry.v2 import sample -from openstack.telemetry.v2 import statistics - - -class Proxy(proxy2.BaseProxy): - """.. caution:: This API is a work in progress and is subject to change.""" - - def find_capability(self, name_or_id, ignore_missing=True): - """Find a single capability - - :param name_or_id: The name or ID of a capability. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.telemetry.v2.capability.Capability` - or None - """ - return self._find(capability.Capability, name_or_id, - ignore_missing=ignore_missing) - - def capabilities(self, **query): - """Return a generator of capabilities - - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. - - :returns: A generator of capability objects - :rtype: :class:`~openstack.telemetry.v2.capability.Capability` - """ - return self._list(capability.Capability, paginated=False, **query) - - def find_meter(self, name_or_id, ignore_missing=True): - """Find a single meter - - :param name_or_id: The name or ID of a meter. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.telemetry.v2.meter.Meter` or None - """ - return self._find(_meter.Meter, name_or_id, - ignore_missing=ignore_missing) - - def meters(self, **query): - """Return a generator of meters - - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. - - :returns: A generator of meter objects - :rtype: :class:`~openstack.telemetry.v2.meter.Meter` - """ - return self._list(_meter.Meter, paginated=False, **query) - - def find_resource(self, name_or_id, ignore_missing=True): - """Find a single resource - - :param name_or_id: The name or ID of a resource. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.telemetry.v2.resource.Resource` or - None - """ - return self._find(_resource.Resource, name_or_id, - ignore_missing=ignore_missing) - - def get_resource(self, resource): - """Get a single resource - - :param resource: The value can be the ID of a resource or a - :class:`~openstack.telemetry.v2.resource.Resource` - instance. - - :returns: One :class:`~openstack.telemetry.v2.resource.Resource` - :raises: :class:`~openstack.exceptions.ResourceNotFound` - when no resource can be found. - """ - return self._get(_resource.Resource, resource) - - def resources(self, **query): - """Return a generator of resources - - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. - - :returns: A generator of resource objects - :rtype: :class:`~openstack.telemetry.v2.resource.Resource` - """ - return self._list(_resource.Resource, paginated=False, **query) - - def create_sample(self, **attrs): - """Create a new sample from attributes - - :param dict attrs: Keyword arguments which will be used to create - a :class:`~openstack.telemetry.v2.sample.Sample`, - comprised of the properties on the Sample class. - - :returns: The results of sample creation - :rtype: :class:`~openstack.telemetry.v2.sample.Sample` - """ - return self._create(sample.Sample, **attrs) - - def find_sample(self, name_or_id, ignore_missing=True): - """Find a single sample - - :param name_or_id: The name or ID of a sample. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.telemetry.v2.sample.Sample` or None - """ - return self._find(sample.Sample, name_or_id, - ignore_missing=ignore_missing) - - def samples(self, meter, **query): - """Return a generator of samples - - :param value: Meter resource or name for a meter. - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. - - :returns: A generator of sample objects - :rtype: :class:`~openstack.telemetry.v2.sample.Sample` - """ - return self._list(sample.Sample, paginated=False, - counter_name=meter, **query) - - def find_statistics(self, name_or_id, ignore_missing=True): - """Find a single statistics - - :param name_or_id: The name or ID of a statistics. - :param bool ignore_missing: When set to ``False`` - :class:`~openstack.exceptions.ResourceNotFound` will be - raised when the resource does not exist. - When set to ``True``, None will be returned when - attempting to find a nonexistent resource. - :returns: One :class:`~openstack.telemetry.v2.statistics.Statistics` - or None - """ - return self._find(statistics.Statistics, name_or_id, - ignore_missing=ignore_missing) - - def statistics(self, meter, **query): - """Return a generator of statistics - - :param meter: Meter resource or name for a meter. - :param kwargs \*\*query: Optional query parameters to be sent to limit - the resources being returned. - - :returns: A generator of statistics objects - :rtype: :class:`~openstack.telemetry.v2.statistics.Statistics` - """ - return self._list(statistics.Statistics, paginated=False, - meter_name=meter, **query) diff --git a/openstack/telemetry/v2/capability.py b/openstack/telemetry/v2/capability.py deleted file mode 100644 index cfe11571a7..0000000000 --- a/openstack/telemetry/v2/capability.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import six - -from openstack import resource2 as resource -from openstack.telemetry import telemetry_service - - -class Capability(resource.Resource): - """.. caution:: This API is a work in progress and is subject to change.""" - resource_key = 'capability' - resources_key = 'capabilities' - base_path = '/capabilities' - service = telemetry_service.TelemetryService() - - # Supported Operations - allow_list = True - - # Properties - is_enabled = resource.Body('enabled', type=bool) - - @classmethod - def list(cls, session, paginated=False, **params): - resp = session.get(cls.base_path, endpoint_filter=cls.service, - params=params) - resp = resp.json() - for key, value in six.iteritems(resp['api']): - yield cls.existing(id=key, enabled=value) diff --git a/openstack/telemetry/v2/meter.py b/openstack/telemetry/v2/meter.py deleted file mode 100644 index 9e7e9504ed..0000000000 --- a/openstack/telemetry/v2/meter.py +++ /dev/null @@ -1,42 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import resource2 as resource -from openstack.telemetry import telemetry_service - - -class Meter(resource.Resource): - """.. caution:: This API is a work in progress and is subject to change.""" - resource_key = 'meter' - base_path = '/meters' - service = telemetry_service.TelemetryService() - - # Supported Operations - allow_list = True - - # Properties - #: The ID of the meter - meter_id = resource.Body('meter_id', alternate_id=True) - #: The unique name for the meter - name = resource.Body('name') - #: The ID of the project that owns the resource - project_id = resource.Body('project_id') - #: The ID of the resource for which the measurements are taken - resource_id = resource.Body('resource_id') - #: The name of the source where the meter comes from - source = resource.Body('source') - #: The meter type - type = resource.Body('type') - #: The unit of measure - unit = resource.Body('unit') - #: The ID of the user who last triggered an update to the resource - user_id = resource.Body('user_id') diff --git a/openstack/telemetry/v2/resource.py b/openstack/telemetry/v2/resource.py deleted file mode 100644 index f359a391b1..0000000000 --- a/openstack/telemetry/v2/resource.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import resource2 as resource -from openstack.telemetry import telemetry_service - - -class Resource(resource.Resource): - """.. caution:: This API is a work in progress and is subject to change.""" - base_path = '/resources' - service = telemetry_service.TelemetryService() - - # Supported Operations - allow_get = True - allow_list = True - - # Properties - #: UTC date & time not later than the first sample known - #: for this resource. - first_sample_at = resource.Body('first_sample_timestamp') - #: UTC date & time not earlier than the last sample known - #: for this resource. - last_sample_at = resource.Body('last_sample_timestamp') - #: A list containing a self link and associated meter links - links = resource.Body('links') - #: Arbitrary metadata associated with the resource - metadata = resource.Body('metadata') - #: The ID of the owning project - project_id = resource.Body('project_id') - #: The ID for the resource - resource_id = resource.Body('resource_id', alternate_id=True) - #: The name of the source where the resource comes from - source = resource.Body('source') - #: The ID of the user who created the resource or updated it last - user_id = resource.Body('user_id') diff --git a/openstack/telemetry/v2/sample.py b/openstack/telemetry/v2/sample.py deleted file mode 100644 index 0ff47f2871..0000000000 --- a/openstack/telemetry/v2/sample.py +++ /dev/null @@ -1,52 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import resource2 as resource -from openstack.telemetry import telemetry_service - - -class Sample(resource.Resource): - """.. caution:: This API is a work in progress and is subject to change.""" - base_path = '/meters/%(counter_name)s' - service = telemetry_service.TelemetryService() - - # Supported Operations - allow_get = True - allow_list = True - - # Properties - #: When the sample has been generated. - generated_at = resource.Body('timestamp') - #: The message ID - message_id = resource.Body('message_id', alternate_id=True) - #: Arbitrary metadata associated with the sample - metadata = resource.Body('metadata') - #: The meter name this sample is for - counter_name = resource.Body('counter_name') - #: The meter name this sample is for - counter_type = resource.Body('counter_type') - #: The ID of the project this sample was taken for - project_id = resource.Body('project_id') - #: When the sample has been recorded. - recorded_at = resource.Body('recorded_at') - #: The ID of the resource this sample was taken for - resource_id = resource.Body('resource_id') - #: The name of the source that identifies where the sample comes from - source = resource.Body('source') - #: The meter type - type = resource.Body('type') - #: The unit of measure - unit = resource.Body('unit') - #: The ID of the user this sample was taken for - user_id = resource.Body('user_id') - #: The metered value - volume = resource.Body('volume') diff --git a/openstack/telemetry/v2/statistics.py b/openstack/telemetry/v2/statistics.py deleted file mode 100644 index a295d51472..0000000000 --- a/openstack/telemetry/v2/statistics.py +++ /dev/null @@ -1,62 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import resource2 as resource -from openstack.telemetry import telemetry_service - - -class Statistics(resource.Resource): - """.. caution:: This API is a work in progress and is subject to change.""" - resource_key = 'statistics' - base_path = '/meters/%(meter_name)s/statistics' - service = telemetry_service.TelemetryService() - - # Supported Operations - allow_list = True - - # Properties - #: The selectable aggregate value(s) - aggregate = resource.Body('aggregate') - #: The average of all of the volume values seen in the data - avg = resource.Body('avg') - #: The number of samples seen - count = resource.Body('count') - #: The difference, in seconds, between the oldest and newest timestamp - duration = resource.Body('duration') - #: UTC date and time of the oldest timestamp, or the query end time. - duration_end_at = resource.Body('duration_end') - #: UTC date and time of the earliest timestamp, or the query start time. - duration_start_at = resource.Body('duration_start') - #: Dictionary of field names for group, if groupby statistics are requested - group_by = resource.Body('groupby') - #: The maximum volume seen in the data - max = resource.Body('max') - #: The minimum volume seen in the data - min = resource.Body('min') - #: The difference, in seconds, between the period start and end - period = resource.Body('period') - #: UTC date and time of the period end. - period_end_at = resource.Body('period_end') - #: UTC date and time of the period start. - period_start_at = resource.Body('period_start') - #: The total of all of the volume values seen in the data - sum = resource.Body('sum') - #: The unit type of the data set - #: TODO(Qiming): This is still incorrect - unit = resource.Body('unit', alternate_id=True) - - @classmethod - def list(cls, session, paginated=False, **params): - url = cls.base_path % {'meter_name': params.pop('meter_name')} - resp = session.get(url, endpoint_filter=cls.service, params=params) - for stat in resp.json(): - yield cls.existing(**stat) diff --git a/openstack/tests/unit/telemetry/alarm/v2/__init__.py b/openstack/test/__init__.py similarity index 100% rename from openstack/tests/unit/telemetry/alarm/v2/__init__.py rename to openstack/test/__init__.py diff --git a/openstack/test/fakes.py b/openstack/test/fakes.py new file mode 100644 index 0000000000..f77148bcbe --- /dev/null +++ b/openstack/test/fakes.py @@ -0,0 +1,240 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +The :mod:`~openstack.test.fakes` module exists to help application developers +using the OpenStack SDK to unit test their applications. It provides a number +of helper utilities to generate fake :class:`~openstack.resource.Resource` and +:class:`~openstack.proxy.Proxy` instances. These fakes do not require an +established connection and allow you to validate that your application using +valid attributes and methods for both :class:`~openstack.resource.Resource` and +:class:`~openstack.proxy.Proxy` instances. +""" + +from collections.abc import Generator +import inspect +import random +import typing as ty +from unittest import mock +import uuid + +from openstack import fields +from openstack import format as _format +from openstack import proxy +from openstack import resource +from openstack import service_description + + +def generate_fake_resource( + resource_type: type[resource.ResourceT], + **attrs: ty.Any, +) -> resource.ResourceT: + """Generate a fake resource + + Example usage: + + .. code-block:: python + + >>> from openstack.compute.v2 import server + >>> from openstack.test import fakes + >>> fakes.generate_fake_resource(server.Server) + openstack.compute.v2.server.Server(...) + + :param resource_type: Object class + :param attrs: Optional attributes to be set on resource + :return: Instance of ``resource_type`` class populated with fake + values of expected types + :raises NotImplementedError: If a resource attribute specifies a ``type`` + or ``list_type`` that cannot be automatically generated + """ + base_attrs: dict[str, ty.Any] = {} + for name, value in inspect.getmembers( + resource_type, + predicate=lambda x: isinstance(x, fields.Body | fields.URI), + ): + if isinstance(value, fields.Body): + target_type = value.type + if target_type is None: + if ( + name == "properties" + and hasattr( + resource_type, "_store_unknown_attrs_as_properties" + ) + and resource_type._store_unknown_attrs_as_properties + ): + # virtual "properties" attr which hosts all unknown attrs + # (i.e. Image) + base_attrs[name] = dict() + else: + # Type not defined - string + base_attrs[name] = uuid.uuid4().hex + elif issubclass(target_type, resource.Resource): + # Attribute is of another Resource type + base_attrs[name] = generate_fake_resource(target_type) + elif issubclass(target_type, list) and value.list_type is not None: + # List of ... + item_type = value.list_type + if issubclass(item_type, resource.Resource): + # item is of Resource type + base_attrs[name] = [generate_fake_resource(item_type)] + elif issubclass(item_type, dict): + base_attrs[name] = [{}] + elif issubclass(item_type, str): + base_attrs[name] = [uuid.uuid4().hex] + else: + # Everything else + msg = ( + f"Fake value for {resource_type.__name__}.{name} can " + f"not be generated" + ) + raise NotImplementedError(msg) + elif issubclass(target_type, list) and value.list_type is None: + # List of str + base_attrs[name] = [uuid.uuid4().hex] + elif issubclass(target_type, str): + # definitely string + base_attrs[name] = uuid.uuid4().hex + elif issubclass(target_type, int): + # int + base_attrs[name] = random.randint(1, 100) # noqa: S311 + elif issubclass(target_type, float): + # float + base_attrs[name] = random.random() # noqa: S311 + elif issubclass(target_type, bool) or issubclass( + target_type, _format.BoolStr + ): + # bool + base_attrs[name] = random.choice([True, False]) # noqa: S311 + elif issubclass(target_type, dict): + # some dict - without further details leave it empty + base_attrs[name] = dict() + else: + # Everything else + msg = ( + f"Fake value for {resource_type.__name__}.{name} can not " + f"be generated" + ) + raise NotImplementedError(msg) + + if isinstance(value, fields.URI): + # For URI we just generate something + base_attrs[name] = uuid.uuid4().hex + + base_attrs.update(**attrs) + fake = resource_type(**base_attrs) + return fake + + +def generate_fake_resources( + resource_type: type[resource.ResourceT], + count: int = 1, + attrs: dict[str, ty.Any] | None = None, +) -> Generator[resource.ResourceT, None, None]: + """Generate a given number of fake resource entities + + Example usage: + + .. code-block:: python + + >>> from openstack.compute.v2 import server + >>> from openstack.test import fakes + >>> fakes.generate_fake_resources(server.Server, count=3) + + + :param resource_type: Object class + :param count: Number of objects to return + :param attrs: Attribute values to set into each instance + :return: Generator of ``resource_type`` class instances populated with fake + values of expected types. + """ + if not attrs: + attrs = {} + for _ in range(count): + yield generate_fake_resource(resource_type, **attrs) + + +# TODO(stephenfin): It would be helpful to generate fake resources for the +# various proxy methods also, but doing so requires deep code introspection or +# (better) type annotations +def generate_fake_proxy( + service: type[service_description.ServiceDescription], + api_version: str | None = None, +) -> proxy.Proxy: + """Generate a fake proxy for the given service type + + Example usage: + + .. code-block:: python + + >>> import functools + >>> from openstack.compute import compute_service + >>> from openstack.compute.v2 import server + >>> from openstack.test import fakes + >>> # create the fake proxy + >>> fake_compute_proxy = fakes.generate_fake_proxy( + ... compute_service.ComputeService, + ... ) + >>> # configure return values for various proxy APIs + >>> # note that this will generate new fake resources on each invocation + >>> fake_compute_proxy.get_server.side_effect = functools.partial( + ... fakes.generate_fake_resource, + ... server.Server, + ... ) + >>> fake_compute_proxy.servers.side_effect = functools.partial( + ... fakes.generate_fake_resources, + ... server.Server, + ... ) + >>> fake_compute_proxy.servers() + + >>> fake_compute_proxy.serverssss() + Traceback (most recent call last): + File "", line 1, in + File "/usr/lib64/python3.11/unittest/mock.py", line 653, in __getattr__ + raise AttributeError("Mock object has no attribute %r" % name) + AttributeError: Mock object has no attribute 'serverssss'. Did you mean: 'server_ips'? + + :param service: The service to generate the fake proxy for. + :type service: :class:`~openstack.service_description.ServiceDescription` + :param api_version: The API version to generate the fake proxy for. + This should be a major version must be supported by openstacksdk, as + specified in the ``supported_versions`` attribute of the provided + ``service``. This is only required if openstacksdk supports multiple + API versions for the given service. + :type api_version: int or None + :raises ValueError: if the ``service`` is not a valid + :class:`~openstack.service_description.ServiceDescription` or if + ``api_version`` is not supported + :returns: An autospecced mock of the :class:`~openstack.proxy.Proxy` + implementation for the specified service type and API version + """ # noqa: E501 + if not issubclass(service, service_description.ServiceDescription): + raise ValueError( + f"Service {service.__name__} is not a valid ServiceDescription" + ) + + supported_versions = service.supported_versions + + if api_version is None: + if len(supported_versions) > 1: + raise ValueError( + f"api_version was not provided but service {service.__name__} " + f"provides multiple API versions" + ) + else: + api_version = next(iter(supported_versions)) + elif api_version not in supported_versions: + raise ValueError( + f"API version {api_version} is not supported by openstacksdk. " + f"Supported API versions are: {', '.join(supported_versions)}" + ) + + return mock.create_autospec(supported_versions[api_version]) diff --git a/openstack/tests/README.rst b/openstack/tests/README.rst new file mode 100644 index 0000000000..388a401388 --- /dev/null +++ b/openstack/tests/README.rst @@ -0,0 +1,7 @@ +Tests for openstacksdk +====================== + +For information on how to run and extend these tests, refer to the `contributor +guide`__. + +.. __: https://docs.openstack.org/openstacksdk/latest/contributor/testing.html diff --git a/openstack/tests/ansible/README.txt b/openstack/tests/ansible/README.txt new file mode 100644 index 0000000000..3931b4af9e --- /dev/null +++ b/openstack/tests/ansible/README.txt @@ -0,0 +1,26 @@ +This directory contains a testing infrastructure for the Ansible +OpenStack modules. You will need a clouds.yaml file in order to run +the tests. You must provide a value for the `cloud` variable for each +run (using the -e option) as a default is not currently provided. + +If you want to run these tests against devstack, it is easiest to use +the tox target. This assumes you have a devstack-admin cloud defined +in your clouds.yaml file that points to devstack. Some examples of +using tox: + + tox -e ansible + + tox -e ansible keypair security_group + +If you want to run these tests directly, or against different clouds, +then you'll need to use the ansible-playbook command that comes with +the Ansible distribution and feed it the run.yml playbook. Some examples: + + # Run all module tests against a provider + ansible-playbook run.yml -e "cloud=hp" + + # Run only the keypair and security_group tests + ansible-playbook run.yml -e "cloud=hp" --tags "keypair,security_group" + + # Run all tests except security_group + ansible-playbook run.yml -e "cloud=hp" --skip-tags "security_group" diff --git a/openstack/tests/ansible/hooks/post_test_hook.sh b/openstack/tests/ansible/hooks/post_test_hook.sh new file mode 100755 index 0000000000..bbda4af3b4 --- /dev/null +++ b/openstack/tests/ansible/hooks/post_test_hook.sh @@ -0,0 +1,40 @@ +#!/bin/sh + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# TODO(shade) Rework for Zuul v3 + +export OPENSTACKSDK_DIR="$BASE/new/openstacksdk" + +cd $OPENSTACKSDK_DIR +sudo chown -R jenkins:stack $OPENSTACKSDK_DIR + +echo "Running shade Ansible test suite" + +if [ ${OPENSTACKSDK_ANSIBLE_DEV:-0} -eq 1 ] +then + # Use the upstream development version of Ansible + set +e + sudo -E -H -u jenkins tox -eansible -- -d + EXIT_CODE=$? + set -e +else + # Use the release version of Ansible + set +e + sudo -E -H -u jenkins tox -eansible + EXIT_CODE=$? + set -e +fi + + +exit $EXIT_CODE diff --git a/openstack/tests/ansible/roles/auth/tasks/main.yml b/openstack/tests/ansible/roles/auth/tasks/main.yml new file mode 100644 index 0000000000..ca894e50a7 --- /dev/null +++ b/openstack/tests/ansible/roles/auth/tasks/main.yml @@ -0,0 +1,6 @@ +--- +- name: Authenticate to the cloud + os_auth: + cloud={{ cloud }} + +- debug: var=service_catalog diff --git a/openstack/tests/ansible/roles/client_config/tasks/main.yml b/openstack/tests/ansible/roles/client_config/tasks/main.yml new file mode 100644 index 0000000000..1506f6d697 --- /dev/null +++ b/openstack/tests/ansible/roles/client_config/tasks/main.yml @@ -0,0 +1,7 @@ +--- +- name: List all profiles + os_client_config: + register: list + +# WARNING: This will output sensitive authentication information!!!! +- debug: var=list diff --git a/openstack/tests/ansible/roles/group/defaults/main.yml b/openstack/tests/ansible/roles/group/defaults/main.yml new file mode 100644 index 0000000000..361c01190d --- /dev/null +++ b/openstack/tests/ansible/roles/group/defaults/main.yml @@ -0,0 +1 @@ +group_name: ansible_group diff --git a/openstack/tests/ansible/roles/group/tasks/main.yml b/openstack/tests/ansible/roles/group/tasks/main.yml new file mode 100644 index 0000000000..535ed43182 --- /dev/null +++ b/openstack/tests/ansible/roles/group/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- name: Create group + os_group: + cloud: "{{ cloud }}" + state: present + name: "{{ group_name }}" + +- name: Update group + os_group: + cloud: "{{ cloud }}" + state: present + name: "{{ group_name }}" + description: "updated description" + +- name: Delete group + os_group: + cloud: "{{ cloud }}" + state: absent + name: "{{ group_name }}" diff --git a/openstack/tests/ansible/roles/image/defaults/main.yml b/openstack/tests/ansible/roles/image/defaults/main.yml new file mode 100644 index 0000000000..13efe71440 --- /dev/null +++ b/openstack/tests/ansible/roles/image/defaults/main.yml @@ -0,0 +1 @@ +image_name: ansible_image diff --git a/openstack/tests/ansible/roles/image/tasks/main.yml b/openstack/tests/ansible/roles/image/tasks/main.yml new file mode 100644 index 0000000000..587e887b8b --- /dev/null +++ b/openstack/tests/ansible/roles/image/tasks/main.yml @@ -0,0 +1,54 @@ +--- +- name: Create a test image file + shell: mktemp + register: tmp_file + +- name: Fill test image file to 1MB + shell: truncate -s 1048576 {{ tmp_file.stdout }} + +- name: Create raw image (defaults) + os_image: + cloud: "{{ cloud }}" + state: present + name: "{{ image_name }}" + filename: "{{ tmp_file.stdout }}" + disk_format: raw + register: image + +- debug: var=image + +- name: Delete raw image (defaults) + os_image: + cloud: "{{ cloud }}" + state: absent + name: "{{ image_name }}" + +- name: Create raw image (complex) + os_image: + cloud: "{{ cloud }}" + state: present + name: "{{ image_name }}" + filename: "{{ tmp_file.stdout }}" + disk_format: raw + is_public: True + min_disk: 10 + min_ram: 1024 + kernel: cirros-vmlinuz + ramdisk: cirros-initrd + properties: + cpu_arch: x86_64 + distro: ubuntu + register: image + +- debug: var=image + +- name: Delete raw image (complex) + os_image: + cloud: "{{ cloud }}" + state: absent + name: "{{ image_name }}" + +- name: Delete test image file + file: + name: "{{ tmp_file.stdout }}" + state: absent diff --git a/openstack/tests/ansible/roles/keypair/defaults/main.yml b/openstack/tests/ansible/roles/keypair/defaults/main.yml new file mode 100644 index 0000000000..3956b56a2b --- /dev/null +++ b/openstack/tests/ansible/roles/keypair/defaults/main.yml @@ -0,0 +1 @@ +keypair_name: shade_keypair diff --git a/openstack/tests/ansible/roles/keypair/tasks/main.yml b/openstack/tests/ansible/roles/keypair/tasks/main.yml new file mode 100644 index 0000000000..636bf1acac --- /dev/null +++ b/openstack/tests/ansible/roles/keypair/tasks/main.yml @@ -0,0 +1,62 @@ +--- +- name: Create keypair (non-existing) + os_keypair: + cloud: "{{ cloud }}" + name: "{{ keypair_name }}" + state: present + register: + keypair + +# This assert verifies that Ansible is capable serializing data returned by SDK +- name: Ensure private key is returned + assert: + that: + - keypair.key.public_key is defined and keypair.key.public_key + +- name: Delete keypair (non-existing) + os_keypair: + cloud: "{{ cloud }}" + name: "{{ keypair_name }}" + state: absent + +- name: Generate test key file + user: + name: "{{ ansible_env.USER }}" + generate_ssh_key: yes + ssh_key_file: .ssh/shade_id_rsa + +- name: Create keypair (file) + os_keypair: + cloud: "{{ cloud }}" + name: "{{ keypair_name }}" + state: present + public_key_file: "{{ ansible_env.HOME }}/.ssh/shade_id_rsa.pub" + +- name: Delete keypair (file) + os_keypair: + cloud: "{{ cloud }}" + name: "{{ keypair_name }}" + state: absent + +- name: Create keypair (key) + os_keypair: + cloud: "{{ cloud }}" + name: "{{ keypair_name }}" + state: present + public_key: "{{ lookup('file', '~/.ssh/shade_id_rsa.pub') }}" + +- name: Delete keypair (key) + os_keypair: + cloud: "{{ cloud }}" + name: "{{ keypair_name }}" + state: absent + +- name: Delete test key pub file + file: + name: "{{ ansible_env.HOME }}/.ssh/shade_id_rsa.pub" + state: absent + +- name: Delete test key pvt file + file: + name: "{{ ansible_env.HOME }}/.ssh/shade_id_rsa" + state: absent diff --git a/openstack/tests/ansible/roles/keystone_domain/defaults/main.yml b/openstack/tests/ansible/roles/keystone_domain/defaults/main.yml new file mode 100644 index 0000000000..049e7c378b --- /dev/null +++ b/openstack/tests/ansible/roles/keystone_domain/defaults/main.yml @@ -0,0 +1 @@ +domain_name: ansible_domain diff --git a/openstack/tests/ansible/roles/keystone_domain/tasks/main.yml b/openstack/tests/ansible/roles/keystone_domain/tasks/main.yml new file mode 100644 index 0000000000..d1ca1273b7 --- /dev/null +++ b/openstack/tests/ansible/roles/keystone_domain/tasks/main.yml @@ -0,0 +1,19 @@ +--- +- name: Create keystone domain + os_keystone_domain: + cloud: "{{ cloud }}" + state: present + name: "{{ domain_name }}" + description: "test description" + +- name: Update keystone domain + os_keystone_domain: + cloud: "{{ cloud }}" + name: "{{ domain_name }}" + description: "updated description" + +- name: Delete keystone domain + os_keystone_domain: + cloud: "{{ cloud }}" + state: absent + name: "{{ domain_name }}" diff --git a/openstack/tests/ansible/roles/keystone_role/defaults/main.yml b/openstack/tests/ansible/roles/keystone_role/defaults/main.yml new file mode 100644 index 0000000000..d1ebe5d1c1 --- /dev/null +++ b/openstack/tests/ansible/roles/keystone_role/defaults/main.yml @@ -0,0 +1 @@ +role_name: ansible_keystone_role diff --git a/openstack/tests/ansible/roles/keystone_role/tasks/main.yml b/openstack/tests/ansible/roles/keystone_role/tasks/main.yml new file mode 100644 index 0000000000..110b4386bf --- /dev/null +++ b/openstack/tests/ansible/roles/keystone_role/tasks/main.yml @@ -0,0 +1,12 @@ +--- +- name: Create keystone role + os_keystone_role: + cloud: "{{ cloud }}" + state: present + name: "{{ role_name }}" + +- name: Delete keystone role + os_keystone_role: + cloud: "{{ cloud }}" + state: absent + name: "{{ role_name }}" diff --git a/openstack/tests/ansible/roles/network/defaults/main.yml b/openstack/tests/ansible/roles/network/defaults/main.yml new file mode 100644 index 0000000000..d5435ecb11 --- /dev/null +++ b/openstack/tests/ansible/roles/network/defaults/main.yml @@ -0,0 +1,3 @@ +network_name: shade_network +network_shared: false +network_external: false diff --git a/openstack/tests/ansible/roles/network/tasks/main.yml b/openstack/tests/ansible/roles/network/tasks/main.yml new file mode 100644 index 0000000000..8a85c25ccf --- /dev/null +++ b/openstack/tests/ansible/roles/network/tasks/main.yml @@ -0,0 +1,14 @@ +--- +- name: Create network + os_network: + cloud: "{{ cloud }}" + name: "{{ network_name }}" + state: present + shared: "{{ network_shared }}" + external: "{{ network_external }}" + +- name: Delete network + os_network: + cloud: "{{ cloud }}" + name: "{{ network_name }}" + state: absent diff --git a/openstack/tests/ansible/roles/nova_flavor/tasks/main.yml b/openstack/tests/ansible/roles/nova_flavor/tasks/main.yml new file mode 100644 index 0000000000..c034bfc706 --- /dev/null +++ b/openstack/tests/ansible/roles/nova_flavor/tasks/main.yml @@ -0,0 +1,53 @@ +--- +- name: Create public flavor + os_nova_flavor: + cloud: "{{ cloud }}" + state: present + name: ansible_public_flavor + is_public: True + ram: 1024 + vcpus: 1 + disk: 10 + ephemeral: 10 + swap: 1 + flavorid: 12345 + +- name: Delete public flavor + os_nova_flavor: + cloud: "{{ cloud }}" + state: absent + name: ansible_public_flavor + +- name: Create private flavor + os_nova_flavor: + cloud: "{{ cloud }}" + state: present + name: ansible_private_flavor + is_public: False + ram: 1024 + vcpus: 1 + disk: 10 + ephemeral: 10 + swap: 1 + flavorid: 12345 + +- name: Delete private flavor + os_nova_flavor: + cloud: "{{ cloud }}" + state: absent + name: ansible_private_flavor + +- name: Create flavor (defaults) + os_nova_flavor: + cloud: "{{ cloud }}" + state: present + name: ansible_defaults_flavor + ram: 1024 + vcpus: 1 + disk: 10 + +- name: Delete flavor (defaults) + os_nova_flavor: + cloud: "{{ cloud }}" + state: absent + name: ansible_defaults_flavor diff --git a/openstack/tests/ansible/roles/object/tasks/main.yml b/openstack/tests/ansible/roles/object/tasks/main.yml new file mode 100644 index 0000000000..ae54b6ba28 --- /dev/null +++ b/openstack/tests/ansible/roles/object/tasks/main.yml @@ -0,0 +1,37 @@ +--- +- name: Create a test object file + shell: mktemp + register: tmp_file + +- name: Create container + os_object: + cloud: "{{ cloud }}" + state: present + container: ansible_container + container_access: private + +- name: Put object + os_object: + cloud: "{{ cloud }}" + state: present + name: ansible_object + filename: "{{ tmp_file.stdout }}" + container: ansible_container + +- name: Delete object + os_object: + cloud: "{{ cloud }}" + state: absent + name: ansible_object + container: ansible_container + +- name: Delete container + os_object: + cloud: "{{ cloud }}" + state: absent + container: ansible_container + +- name: Delete test object file + file: + name: "{{ tmp_file.stdout }}" + state: absent diff --git a/openstack/tests/ansible/roles/port/defaults/main.yml b/openstack/tests/ansible/roles/port/defaults/main.yml new file mode 100644 index 0000000000..de022001b3 --- /dev/null +++ b/openstack/tests/ansible/roles/port/defaults/main.yml @@ -0,0 +1,6 @@ +network_name: ansible_port_network +network_external: true +subnet_name: ansible_port_subnet +port_name: ansible_port +secgroup_name: ansible_port_secgroup +no_security_groups: True diff --git a/openstack/tests/ansible/roles/port/tasks/main.yml b/openstack/tests/ansible/roles/port/tasks/main.yml new file mode 100644 index 0000000000..1a39140e58 --- /dev/null +++ b/openstack/tests/ansible/roles/port/tasks/main.yml @@ -0,0 +1,101 @@ +--- +- name: Create network + os_network: + cloud: "{{ cloud }}" + state: present + name: "{{ network_name }}" + external: "{{ network_external }}" + +- name: Create subnet + os_subnet: + cloud: "{{ cloud }}" + state: present + name: "{{ subnet_name }}" + network_name: "{{ network_name }}" + cidr: 10.5.5.0/24 + +- name: Create port (no security group or default security group) + os_port: + cloud: "{{ cloud }}" + state: present + name: "{{ port_name }}" + network: "{{ network_name }}" + no_security_groups: "{{ no_security_groups }}" + fixed_ips: + - ip_address: 10.5.5.69 + register: port + +- debug: var=port + +- name: Delete port (no security group or default security group) + os_port: + cloud: "{{ cloud }}" + state: absent + name: "{{ port_name }}" + +- name: Create security group + os_security_group: + cloud: "{{ cloud }}" + state: present + name: "{{ secgroup_name }}" + description: Test group + +- name: Create port (with security group) + os_port: + cloud: "{{ cloud }}" + state: present + name: "{{ port_name }}" + network: "{{ network_name }}" + fixed_ips: + - ip_address: 10.5.5.69 + security_groups: + - "{{ secgroup_name }}" + register: port + +- debug: var=port + +- name: Delete port (with security group) + os_port: + cloud: "{{ cloud }}" + state: absent + name: "{{ port_name }}" + +- name: Create port (with allowed_address_pairs and extra_dhcp_opts) + os_port: + cloud: "{{ cloud }}" + state: present + name: "{{ port_name }}" + network: "{{ network_name }}" + no_security_groups: "{{ no_security_groups }}" + allowed_address_pairs: + - ip_address: 10.6.7.0/24 + extra_dhcp_opts: + - opt_name: "bootfile-name" + opt_value: "testfile.1" + register: port + +- debug: var=port + +- name: Delete port (with allowed_address_pairs and extra_dhcp_opts) + os_port: + cloud: "{{ cloud }}" + state: absent + name: "{{ port_name }}" + +- name: Delete security group + os_security_group: + cloud: "{{ cloud }}" + state: absent + name: "{{ secgroup_name }}" + +- name: Delete subnet + os_subnet: + cloud: "{{ cloud }}" + state: absent + name: "{{ subnet_name }}" + +- name: Delete network + os_network: + cloud: "{{ cloud }}" + state: absent + name: "{{ network_name }}" diff --git a/openstack/tests/ansible/roles/router/defaults/main.yml b/openstack/tests/ansible/roles/router/defaults/main.yml new file mode 100644 index 0000000000..f7d53933a5 --- /dev/null +++ b/openstack/tests/ansible/roles/router/defaults/main.yml @@ -0,0 +1,3 @@ +external_network_name: ansible_external_net +network_external: true +router_name: ansible_router diff --git a/openstack/tests/ansible/roles/router/tasks/main.yml b/openstack/tests/ansible/roles/router/tasks/main.yml new file mode 100644 index 0000000000..083d4f0668 --- /dev/null +++ b/openstack/tests/ansible/roles/router/tasks/main.yml @@ -0,0 +1,95 @@ +--- +# Regular user operation +- name: Create internal network + os_network: + cloud: "{{ cloud }}" + state: present + name: "{{ network_name }}" + external: false + +- name: Create subnet1 + os_subnet: + cloud: "{{ cloud }}" + state: present + network_name: "{{ network_name }}" + name: shade_subnet1 + cidr: 10.7.7.0/24 + +- name: Create router + os_router: + cloud: "{{ cloud }}" + state: present + name: "{{ router_name }}" + +- name: Update router (add interface) + os_router: + cloud: "{{ cloud }}" + state: present + name: "{{ router_name }}" + interfaces: + - shade_subnet1 + +# Admin operation +- name: Create external network + os_network: + cloud: "{{ cloud }}" + state: present + name: "{{ external_network_name }}" + external: "{{ network_external }}" + when: + - network_external + +- name: Create subnet2 + os_subnet: + cloud: "{{ cloud }}" + state: present + network_name: "{{ external_network_name }}" + name: shade_subnet2 + cidr: 10.6.6.0/24 + when: + - network_external + +- name: Update router (add external gateway) + os_router: + cloud: "{{ cloud }}" + state: present + name: "{{ router_name }}" + network: "{{ external_network_name }}" + interfaces: + - shade_subnet1 + when: + - network_external + +- name: Delete router + os_router: + cloud: "{{ cloud }}" + state: absent + name: "{{ router_name }}" + +- name: Delete subnet1 + os_subnet: + cloud: "{{ cloud }}" + state: absent + name: shade_subnet1 + +- name: Delete subnet2 + os_subnet: + cloud: "{{ cloud }}" + state: absent + name: shade_subnet2 + when: + - network_external + +- name: Delete internal network + os_network: + cloud: "{{ cloud }}" + state: absent + name: "{{ network_name }}" + +- name: Delete external network + os_network: + cloud: "{{ cloud }}" + state: absent + name: "{{ external_network_name }}" + when: + - network_external diff --git a/openstack/tests/ansible/roles/security_group/defaults/main.yml b/openstack/tests/ansible/roles/security_group/defaults/main.yml new file mode 100644 index 0000000000..00310dd101 --- /dev/null +++ b/openstack/tests/ansible/roles/security_group/defaults/main.yml @@ -0,0 +1 @@ +secgroup_name: shade_secgroup diff --git a/openstack/tests/ansible/roles/security_group/tasks/main.yml b/openstack/tests/ansible/roles/security_group/tasks/main.yml new file mode 100644 index 0000000000..ddc7e50cd5 --- /dev/null +++ b/openstack/tests/ansible/roles/security_group/tasks/main.yml @@ -0,0 +1,123 @@ +--- +- name: Create security group + os_security_group: + cloud: "{{ cloud }}" + name: "{{ secgroup_name }}" + state: present + description: Created from Ansible playbook + +- name: Create empty ICMP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: present + protocol: icmp + remote_ip_prefix: 0.0.0.0/0 + +- name: Create -1 ICMP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: present + protocol: icmp + port_range_min: -1 + port_range_max: -1 + remote_ip_prefix: 0.0.0.0/0 + +- name: Create empty TCP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: present + protocol: tcp + remote_ip_prefix: 0.0.0.0/0 + +- name: Create empty UDP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: present + protocol: udp + remote_ip_prefix: 0.0.0.0/0 + +- name: Create HTTP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: present + protocol: tcp + port_range_min: 80 + port_range_max: 80 + remote_ip_prefix: 0.0.0.0/0 + +- name: Create egress rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: present + protocol: tcp + port_range_min: 30000 + port_range_max: 30001 + remote_ip_prefix: 0.0.0.0/0 + direction: egress + +- name: Delete empty ICMP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: absent + protocol: icmp + remote_ip_prefix: 0.0.0.0/0 + +- name: Delete -1 ICMP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: absent + protocol: icmp + port_range_min: -1 + port_range_max: -1 + remote_ip_prefix: 0.0.0.0/0 + +- name: Delete empty TCP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: absent + protocol: tcp + remote_ip_prefix: 0.0.0.0/0 + +- name: Delete empty UDP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: absent + protocol: udp + remote_ip_prefix: 0.0.0.0/0 + +- name: Delete HTTP rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: absent + protocol: tcp + port_range_min: 80 + port_range_max: 80 + remote_ip_prefix: 0.0.0.0/0 + +- name: Delete egress rule + os_security_group_rule: + cloud: "{{ cloud }}" + security_group: "{{ secgroup_name }}" + state: absent + protocol: tcp + port_range_min: 30000 + port_range_max: 30001 + remote_ip_prefix: 0.0.0.0/0 + direction: egress + +- name: Delete security group + os_security_group: + cloud: "{{ cloud }}" + name: "{{ secgroup_name }}" + state: absent diff --git a/openstack/tests/ansible/roles/server/defaults/main.yaml b/openstack/tests/ansible/roles/server/defaults/main.yaml new file mode 100644 index 0000000000..e3bd5f33b9 --- /dev/null +++ b/openstack/tests/ansible/roles/server/defaults/main.yaml @@ -0,0 +1,5 @@ +server_network: private +server_name: ansible_server +flavor: m1.tiny +floating_ip_pool_name: public +boot_volume_size: 5 diff --git a/openstack/tests/ansible/roles/server/tasks/main.yml b/openstack/tests/ansible/roles/server/tasks/main.yml new file mode 100644 index 0000000000..ac03115546 --- /dev/null +++ b/openstack/tests/ansible/roles/server/tasks/main.yml @@ -0,0 +1,92 @@ +--- +- name: Create server with meta as CSV + os_server: + cloud: "{{ cloud }}" + state: present + name: "{{ server_name }}" + image: "{{ image }}" + flavor: "{{ flavor }}" + network: "{{ server_network }}" + auto_floating_ip: false + meta: "key1=value1,key2=value2" + wait: true + register: server + +- debug: var=server + +- name: Delete server with meta as CSV + os_server: + cloud: "{{ cloud }}" + state: absent + name: "{{ server_name }}" + wait: true + +- name: Create server with meta as dict + os_server: + cloud: "{{ cloud }}" + state: present + name: "{{ server_name }}" + image: "{{ image }}" + flavor: "{{ flavor }}" + auto_floating_ip: false + network: "{{ server_network }}" + meta: + key1: value1 + key2: value2 + wait: true + register: server + +- debug: var=server + +- name: Delete server with meta as dict + os_server: + cloud: "{{ cloud }}" + state: absent + name: "{{ server_name }}" + wait: true + +- name: Create server (FIP from pool/network) + os_server: + cloud: "{{ cloud }}" + state: present + name: "{{ server_name }}" + image: "{{ image }}" + flavor: "{{ flavor }}" + network: "{{ server_network }}" + floating_ip_pools: + - "{{ floating_ip_pool_name }}" + wait: true + register: server + +- debug: var=server + +- name: Delete server (FIP from pool/network) + os_server: + cloud: "{{ cloud }}" + state: absent + name: "{{ server_name }}" + wait: true + +- name: Create server from volume + os_server: + cloud: "{{ cloud }}" + state: present + name: "{{ server_name }}" + image: "{{ image }}" + flavor: "{{ flavor }}" + network: "{{ server_network }}" + auto_floating_ip: false + boot_from_volume: true + volume_size: "{{ boot_volume_size }}" + terminate_volume: true + wait: true + register: server + +- debug: var=server + +- name: Delete server with volume + os_server: + cloud: "{{ cloud }}" + state: absent + name: "{{ server_name }}" + wait: true diff --git a/openstack/tests/ansible/roles/subnet/defaults/main.yml b/openstack/tests/ansible/roles/subnet/defaults/main.yml new file mode 100644 index 0000000000..5ccc85abc1 --- /dev/null +++ b/openstack/tests/ansible/roles/subnet/defaults/main.yml @@ -0,0 +1,2 @@ +subnet_name: shade_subnet +enable_subnet_dhcp: false diff --git a/openstack/tests/ansible/roles/subnet/tasks/main.yml b/openstack/tests/ansible/roles/subnet/tasks/main.yml new file mode 100644 index 0000000000..a7ca490ad8 --- /dev/null +++ b/openstack/tests/ansible/roles/subnet/tasks/main.yml @@ -0,0 +1,43 @@ +--- +- name: Create network {{ network_name }} + os_network: + cloud: "{{ cloud }}" + name: "{{ network_name }}" + state: present + +- name: Create subnet {{ subnet_name }} on network {{ network_name }} + os_subnet: + cloud: "{{ cloud }}" + network_name: "{{ network_name }}" + name: "{{ subnet_name }}" + state: present + enable_dhcp: "{{ enable_subnet_dhcp }}" + dns_nameservers: + - 8.8.8.7 + - 8.8.8.8 + cidr: 192.168.0.0/24 + gateway_ip: 192.168.0.1 + allocation_pool_start: 192.168.0.2 + allocation_pool_end: 192.168.0.254 + +- name: Update subnet + os_subnet: + cloud: "{{ cloud }}" + network_name: "{{ network_name }}" + name: "{{ subnet_name }}" + state: present + dns_nameservers: + - 8.8.8.7 + cidr: 192.168.0.0/24 + +- name: Delete subnet {{ subnet_name }} + os_subnet: + cloud: "{{ cloud }}" + name: "{{ subnet_name }}" + state: absent + +- name: Delete network {{ network_name }} + os_network: + cloud: "{{ cloud }}" + name: "{{ network_name }}" + state: absent diff --git a/openstack/tests/ansible/roles/user/tasks/main.yml b/openstack/tests/ansible/roles/user/tasks/main.yml new file mode 100644 index 0000000000..6585ca582b --- /dev/null +++ b/openstack/tests/ansible/roles/user/tasks/main.yml @@ -0,0 +1,30 @@ +--- +- name: Create user + os_user: + cloud: "{{ cloud }}" + state: present + name: ansible_user + password: secret + email: ansible.user@nowhere.net + domain: default + default_project: demo + register: user + +- debug: var=user + +- name: Update user + os_user: + cloud: "{{ cloud }}" + state: present + name: ansible_user + password: secret + email: updated.ansible.user@nowhere.net + register: updateduser + +- debug: var=updateduser + +- name: Delete user + os_user: + cloud: "{{ cloud }}" + state: absent + name: ansible_user diff --git a/openstack/tests/ansible/roles/user_group/tasks/main.yml b/openstack/tests/ansible/roles/user_group/tasks/main.yml new file mode 100644 index 0000000000..a0074e2dcd --- /dev/null +++ b/openstack/tests/ansible/roles/user_group/tasks/main.yml @@ -0,0 +1,31 @@ +--- +- name: Create user + os_user: + cloud: "{{ cloud }}" + state: present + name: ansible_user + password: secret + email: ansible.user@nowhere.net + domain: default + default_project: demo + register: user + +- name: Assign user to nonadmins group + os_user_group: + cloud: "{{ cloud }}" + state: present + user: ansible_user + group: nonadmins + +- name: Remove user from nonadmins group + os_user_group: + cloud: "{{ cloud }}" + state: absent + user: ansible_user + group: nonadmins + +- name: Delete user + os_user: + cloud: "{{ cloud }}" + state: absent + name: ansible_user diff --git a/openstack/tests/ansible/roles/volume/tasks/main.yml b/openstack/tests/ansible/roles/volume/tasks/main.yml new file mode 100644 index 0000000000..1479a0030b --- /dev/null +++ b/openstack/tests/ansible/roles/volume/tasks/main.yml @@ -0,0 +1,17 @@ +--- +- name: Create volume + os_volume: + cloud: "{{ cloud }}" + state: present + size: 1 + display_name: ansible_volume + display_description: Test volume + register: vol + +- debug: var=vol + +- name: Delete volume + os_volume: + cloud: "{{ cloud }}" + state: absent + display_name: ansible_volume diff --git a/openstack/tests/ansible/run.yml b/openstack/tests/ansible/run.yml new file mode 100644 index 0000000000..9340ccd06a --- /dev/null +++ b/openstack/tests/ansible/run.yml @@ -0,0 +1,26 @@ +--- +- hosts: localhost + connection: local + gather_facts: true + + roles: + - { role: auth, tags: auth } + - { role: client_config, tags: client_config } + - { role: group, tags: group } + # TODO(mordred) Reenable this once the fixed os_image winds up in an + # upstream ansible release. + # - { role: image, tags: image } + - { role: keypair, tags: keypair } + - { role: keystone_domain, tags: keystone_domain } + - { role: keystone_role, tags: keystone_role } + - { role: network, tags: network } + - { role: nova_flavor, tags: nova_flavor } + - { role: object, tags: object } + - { role: port, tags: port } + - { role: router, tags: router } + - { role: security_group, tags: security_group } + - { role: server, tags: server } + - { role: subnet, tags: subnet } + - { role: user, tags: user } + - { role: user_group, tags: user_group } + - { role: volume, tags: volume } diff --git a/openstack/tests/base.py b/openstack/tests/base.py new file mode 100644 index 0000000000..3226f6be53 --- /dev/null +++ b/openstack/tests/base.py @@ -0,0 +1,149 @@ +# Copyright 2010-2011 OpenStack Foundation +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import io +import logging +import os +import pprint +import sys +import typing as ty + +import fixtures +from oslotest import base +import testtools.content + +from openstack.tests import fixtures as os_fixtures +from openstack import utils + +_TRUE_VALUES = ('true', '1', 'yes') + + +class TestCase(base.BaseTestCase): + """Test case base class for all tests.""" + + # A way to adjust slow test classes + TIMEOUT_SCALING_FACTOR = 1.0 + + def setUp(self): + """Run before each test method to initialize test environment.""" + # No openstacksdk unit tests should EVER run longer than a second. + # Set this to 5 by default just to give us some fudge. + # Do this before super setUp so that we intercept the default value + # in oslotest. TODO(mordred) Make the default timeout configurable + # in oslotest. + test_timeout = int(os.environ.get('OS_TEST_TIMEOUT', '5')) + try: + test_timeout = int(test_timeout * self.TIMEOUT_SCALING_FACTOR) + self.useFixture( + fixtures.EnvironmentVariable( + 'OS_TEST_TIMEOUT', str(test_timeout) + ) + ) + except ValueError: + # Let oslotest do its thing + pass + + super().setUp() + + self.warnings = self.useFixture(os_fixtures.WarningsFixture()) + + self._log_stream: ty.TextIO + + if os.environ.get('OS_LOG_CAPTURE') in _TRUE_VALUES: + self._log_stream = io.StringIO() + if os.environ.get('OS_ALWAYS_LOG') in _TRUE_VALUES: + self.addCleanup(self.printLogs) + else: + self.addOnException(self.attachLogs) + else: + self._log_stream = sys.stdout + + handler = logging.StreamHandler(self._log_stream) + formatter = logging.Formatter('%(asctime)s %(name)-32s %(message)s') + handler.setFormatter(formatter) + + logger = logging.getLogger('openstack') + logger.setLevel(logging.DEBUG) + logger.addHandler(handler) + + # Enable HTTP level tracing + # TODO(mordred) This is blowing out our memory we think + logger = logging.getLogger('keystoneauth') + logger.setLevel(logging.INFO) + logger.addHandler(handler) + logger.propagate = False + + def _fake_logs(self): + # Override _fake_logs in oslotest until we can get our + # attach-on-exception logic added + pass + + def assertEqual(self, first, second, *args, **kwargs): + '''Munch aware wrapper''' + if isinstance(first, utils.Munch): + first = first.toDict() + if isinstance(second, utils.Munch): + second = second.toDict() + return super().assertEqual(first, second, *args, **kwargs) + + def printLogs(self, *args): + self._log_stream.seek(0) + print(self._log_stream.read()) + + def attachLogs(self, *args): + def reader(): + self._log_stream.seek(0) + while True: + x = self._log_stream.read(4096) + if not x: + break + yield x.encode('utf8') + + content = testtools.content.content_from_reader( + reader, testtools.content_type.UTF8_TEXT, False + ) + self.addDetail('logging', content) + + def add_info_on_exception(self, name, text): + def add_content(unused): + self.addDetail( + name, testtools.content.text_content(pprint.pformat(text)) + ) + + self.addOnException(add_content) + + def assertSubdict(self, part, whole): + missing_keys = [] + for key in part: + # In the resource we have virtual access by not existing keys. To + # verify those are there try access it. + if not whole[key] and part[key]: + missing_keys.append(key) + if missing_keys: + self.fail(f"Keys {missing_keys} are in {part} but not in {whole}") + wrong_values = [ + (key, part[key], whole[key]) + for key in part + if part[key] != whole[key] + ] + if wrong_values: + self.fail( + "Mismatched values: {}".format( + ", ".join( + "for {} got {} and {}".format(*tpl) + for tpl in wrong_values + ) + ) + ) diff --git a/openstack/tests/examples/test_compute.py b/openstack/tests/examples/test_compute.py deleted file mode 100644 index c13337ef78..0000000000 --- a/openstack/tests/examples/test_compute.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from examples.compute import create -from examples.compute import delete -from examples.compute import find as compute_find -from examples.compute import list as compute_list -from examples import connect -from examples.network import find as network_find -from examples.network import list as network_list - - -class TestCompute(unittest.TestCase): - """Test the compute examples - - The purpose of these tests is to ensure the examples run without erring - out. - """ - - @classmethod - def setUpClass(cls): - cls.conn = connect.create_connection_from_config() - - def test_compute(self): - compute_list.list_servers(self.conn) - compute_list.list_images(self.conn) - compute_list.list_flavors(self.conn) - compute_list.list_keypairs(self.conn) - network_list.list_networks(self.conn) - - compute_find.find_image(self.conn) - compute_find.find_flavor(self.conn) - compute_find.find_keypair(self.conn) - network_find.find_network(self.conn) - - create.create_server(self.conn) - - delete.delete_keypair(self.conn) - delete.delete_server(self.conn) diff --git a/openstack/tests/examples/test_identity.py b/openstack/tests/examples/test_identity.py deleted file mode 100644 index f55dbe53d3..0000000000 --- a/openstack/tests/examples/test_identity.py +++ /dev/null @@ -1,38 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from examples import connect -from examples.identity import list as identity_list - - -class TestIdentity(unittest.TestCase): - """Test the identity examples - - The purpose of these tests is to ensure the examples run without erring - out. - """ - - @classmethod - def setUpClass(cls): - cls.conn = connect.create_connection_from_config() - - def test_identity(self): - identity_list.list_users(self.conn) - identity_list.list_credentials(self.conn) - identity_list.list_projects(self.conn) - identity_list.list_domains(self.conn) - identity_list.list_groups(self.conn) - identity_list.list_services(self.conn) - identity_list.list_endpoints(self.conn) - identity_list.list_regions(self.conn) diff --git a/openstack/tests/examples/test_image.py b/openstack/tests/examples/test_image.py deleted file mode 100644 index db027e9b46..0000000000 --- a/openstack/tests/examples/test_image.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from examples import connect -from examples.image import create as image_create -from examples.image import delete as image_delete -from examples.image import list as image_list - - -class TestImage(unittest.TestCase): - """Test the image examples - - The purpose of these tests is to ensure the examples run without erring - out. - """ - - @classmethod - def setUpClass(cls): - cls.conn = connect.create_connection_from_config() - - def test_image(self): - image_list.list_images(self.conn) - - image_create.upload_image(self.conn) - - image_delete.delete_image(self.conn) diff --git a/openstack/tests/examples/test_network.py b/openstack/tests/examples/test_network.py deleted file mode 100644 index 0e09f39b7f..0000000000 --- a/openstack/tests/examples/test_network.py +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from examples import connect -from examples.network import create as network_create -from examples.network import delete as network_delete -from examples.network import find as network_find -from examples.network import list as network_list - - -class TestNetwork(unittest.TestCase): - """Test the network examples - - The purpose of these tests is to ensure the examples run without erring - out. - """ - - @classmethod - def setUpClass(cls): - cls.conn = connect.create_connection_from_config() - - def test_network(self): - network_list.list_networks(self.conn) - network_list.list_subnets(self.conn) - network_list.list_ports(self.conn) - network_list.list_security_groups(self.conn) - network_list.list_routers(self.conn) - - network_find.find_network(self.conn) - - network_create.create_network(self.conn) - network_delete.delete_network(self.conn) diff --git a/openstack/tests/fakes.py b/openstack/tests/fakes.py new file mode 100644 index 0000000000..c4990ea031 --- /dev/null +++ b/openstack/tests/fakes.py @@ -0,0 +1,563 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +fakes +----- + +Fakes used for testing +""" + +import datetime +import hashlib +import json +import uuid + +from openstack.cloud import meta +from openstack.orchestration.util import template_format + +PROJECT_ID = '1c36b64c840a42cd9e9b931a369337f0' +FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8dddd' +CHOCOLATE_FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8ddde' +STRAWBERRY_FLAVOR_ID = '0c1d9008-f546-4608-9e8f-f8bdaec8dddf' +COMPUTE_ENDPOINT = 'https://compute.example.com/v2.1' +ORCHESTRATION_ENDPOINT = f'https://orchestration.example.com/v1/{PROJECT_ID}' +NO_MD5 = '93b885adfe0da089cdf634904fd59f71' +NO_SHA256 = '6e340b9cffb37a989ca544e6bb780a2c78901d3fb33738768511a30617afa01d' +FAKE_PUBLIC_KEY = ( + "ssh-rsa AAAAB3NzaC1yc2EAAAADAQABAAABAQCkF3MX59OrlBs3dH5CU7lNmvpbrgZxSpyGj" + "lnE8Flkirnc/Up22lpjznoxqeoTAwTW034k7Dz6aYIrZGmQwe2TkE084yqvlj45Dkyoj95fW/" + "sZacm0cZNuL69EObEGHdprfGJQajrpz22NQoCD8TFB8Wv+8om9NH9Le6s+WPe98WC77KLw8qg" + "fQsbIey+JawPWl4O67ZdL5xrypuRjfIPWjgy/VH85IXg/Z/GONZ2nxHgSShMkwqSFECAC5L3P" + "HB+0+/12M/iikdatFSVGjpuHvkLOs3oe7m6HlOfluSJ85BzLWBbvva93qkGmLg4ZAc8rPh2O+" + "YIsBUHNLLMM/oQp Generated-by-Nova\n" +) + + +def make_fake_flavor(flavor_id, name, ram=100, disk=1600, vcpus=24): + return { + 'OS-FLV-DISABLED:disabled': False, + 'OS-FLV-EXT-DATA:ephemeral': 0, + 'disk': disk, + 'id': flavor_id, + 'links': [ + { + 'href': f'{COMPUTE_ENDPOINT}/flavors/{flavor_id}', + 'rel': 'self', + }, + { + 'href': f'{COMPUTE_ENDPOINT}/flavors/{flavor_id}', + 'rel': 'bookmark', + }, + ], + 'name': name, + 'os-flavor-access:is_public': True, + 'ram': ram, + 'rxtx_factor': 1.0, + 'swap': 0, + 'vcpus': vcpus, + } + + +FAKE_FLAVOR = make_fake_flavor(FLAVOR_ID, 'vanilla') +FAKE_CHOCOLATE_FLAVOR = make_fake_flavor( + CHOCOLATE_FLAVOR_ID, 'chocolate', ram=200 +) +FAKE_STRAWBERRY_FLAVOR = make_fake_flavor( + STRAWBERRY_FLAVOR_ID, 'strawberry', ram=300 +) +FAKE_FLAVOR_LIST = [FAKE_FLAVOR, FAKE_CHOCOLATE_FLAVOR, FAKE_STRAWBERRY_FLAVOR] +FAKE_TEMPLATE = '''heat_template_version: 2014-10-16 + +parameters: + length: + type: number + default: 10 + +resources: + my_rand: + type: OS::Heat::RandomString + properties: + length: {get_param: length} +outputs: + rand: + value: + get_attr: [my_rand, value] +''' +FAKE_TEMPLATE_CONTENT = template_format.parse(FAKE_TEMPLATE) + + +def make_fake_server( + server_id, + name, + status='ACTIVE', + admin_pass=None, + addresses=None, + image=None, + flavor=None, +): + if addresses is None: + if status == 'ACTIVE': + addresses = { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:df:b0:8d", + "version": 6, + "addr": "fddb:b018:307:0:f816:3eff:fedf:b08d", + "OS-EXT-IPS:type": "fixed", + }, + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:df:b0:8d", + "version": 4, + "addr": "10.1.0.9", + "OS-EXT-IPS:type": "fixed", + }, + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:df:b0:8d", + "version": 4, + "addr": "172.24.5.5", + "OS-EXT-IPS:type": "floating", + }, + ] + } + else: + addresses = {} + if image is None: + image = {"id": "217f3ab1-03e0-4450-bf27-63d52b421e9e", "links": []} + if flavor is None: + flavor = {"id": "64", "links": []} + + server = { + "OS-EXT-STS:task_state": None, + "addresses": addresses, + "links": [], + "image": image, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2017-03-23T23:57:38.000000", + "flavor": flavor, + "id": server_id, + "security_groups": [{"name": "default"}], + "user_id": "9c119f4beaaa438792ce89387362b3ad", + "OS-DCF:diskConfig": "MANUAL", + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "OS-EXT-AZ:availability_zone": "nova", + "metadata": {}, + "status": status, + "updated": "2017-03-23T23:57:39Z", + "hostId": "89d165f04384e3ffa4b6536669eb49104d30d6ca832bba2684605dbc", + "OS-SRV-USG:terminated_at": None, + "key_name": None, + "name": name, + "created": "2017-03-23T23:57:12Z", + "tenant_id": PROJECT_ID, + "os-extended-volumes:volumes_attached": [], + "config_drive": "True", + } + if admin_pass: + server['adminPass'] = admin_pass + return json.loads(json.dumps(server)) + + +def make_fake_keypair(name): + # Note: this is literally taken from: + # https://docs.openstack.org/api-ref/compute/ + return { + "fingerprint": "7e:eb:ab:24:ba:d1:e1:88:ae:9a:fb:66:53:df:d3:bd", + "name": name, + "type": "ssh", + "public_key": FAKE_PUBLIC_KEY, + "created_at": datetime.datetime.now().isoformat(), + } + + +def make_fake_stack(id, name, description=None, status='CREATE_COMPLETE'): + return { + 'creation_time': '2017-03-23T23:57:12Z', + 'deletion_time': '2017-03-23T23:57:12Z', + 'description': description, + 'id': id, + 'links': [], + 'parent': None, + 'stack_name': name, + 'stack_owner': None, + 'stack_status': status, + 'stack_user_project_id': PROJECT_ID, + 'tags': None, + 'updated_time': '2017-03-23T23:57:12Z', + } + + +def make_fake_stack_event( + id, name, status='CREATE_COMPLETED', resource_name='id' +): + event_id = uuid.uuid4().hex + self_url = "{endpoint}/stacks/{name}/{id}/resources/{name}/events/{event}" + resource_url = "{endpoint}/stacks/{name}/{id}/resources/{name}" + return { + "resource_name": id if resource_name == 'id' else name, + "event_time": "2017-03-26T19:38:18", + "links": [ + { + "href": self_url.format( + endpoint=ORCHESTRATION_ENDPOINT, + name=name, + id=id, + event=event_id, + ), + "rel": "self", + }, + { + "href": resource_url.format( + endpoint=ORCHESTRATION_ENDPOINT, name=name, id=id + ), + "rel": "resource", + }, + { + "href": f"{ORCHESTRATION_ENDPOINT}/stacks/{name}/{id}", + "rel": "stack", + }, + ], + "logical_resource_id": name, + "resource_status": status, + "resource_status_reason": "", + "physical_resource_id": id, + "id": event_id, + } + + +def make_fake_image( + image_id=None, + md5=NO_MD5, + sha256=NO_SHA256, + status='active', + image_name='fake_image', + data=None, + checksum='ee36e35a297980dee1b514de9803ec6d', +): + if data: + md5_hash = hashlib.md5(usedforsecurity=False) + sha256_hash = hashlib.sha256() + sha512_hash = hashlib.sha512() + with open(data, 'rb') as file_obj: + for chunk in iter(lambda: file_obj.read(8192), b''): + md5_hash.update(chunk) + sha256_hash.update(chunk) + sha512_hash.update(chunk) + md5 = md5_hash.hexdigest() + sha256 = sha256_hash.hexdigest() + sha512 = sha512_hash.hexdigest() + else: + sha512 = None + return { + 'image_state': 'available', + 'container_format': 'bare', + 'min_ram': 0, + 'ramdisk_id': 'fake_ramdisk_id', + 'updated_at': '2016-02-10T05:05:02Z', + 'file': '/v2/images/' + image_id + '/file', + 'size': 3402170368, + 'image_type': 'snapshot', + 'disk_format': 'qcow2', + 'id': image_id, + 'schema': '/v2/schemas/image', + 'status': status, + 'tags': [], + 'visibility': 'private', + 'locations': [ + {'url': 'http://127.0.0.1/images/' + image_id, 'metadata': {}} + ], + 'min_disk': 40, + 'virtual_size': None, + 'name': image_name, + 'checksum': md5 or checksum, + 'created_at': '2016-02-10T05:03:11Z', + 'owner_specified.openstack.md5': md5 or NO_MD5, + 'owner_specified.openstack.sha256': sha256 or NO_SHA256, + 'owner_specified.openstack.object': f'images/{image_name}', + 'protected': False, + # Add secure hash fields (os_hash_algo and os_hash_value) + # Default to sha512 if data was provided, otherwise None + 'os_hash_algo': 'sha512' if sha512 else None, + 'os_hash_value': sha512 if sha512 else None, + } + + +def make_fake_machine(machine_name, machine_id=None): + if not machine_id: + machine_id = uuid.uuid4().hex + return meta.obj_to_munch(FakeMachine(id=machine_id, name=machine_name)) + + +def make_fake_port(address, node_id=None, port_id=None): + if not node_id: + node_id = uuid.uuid4().hex + if not port_id: + port_id = uuid.uuid4().hex + return meta.obj_to_munch( + FakeMachinePort(id=port_id, address=address, node_id=node_id) + ) + + +class FakeFloatingIP: + def __init__(self, id, pool, ip, fixed_ip, instance_id): + self.id = id + self.pool = pool + self.ip = ip + self.fixed_ip = fixed_ip + self.instance_id = instance_id + + +def make_fake_server_group(id, name, policies): + return json.loads( + json.dumps( + { + 'id': id, + 'name': name, + 'policies': policies, + 'members': [], + 'metadata': {}, + } + ) + ) + + +def make_fake_hypervisor(id, name): + return json.loads( + json.dumps( + { + 'id': id, + 'hypervisor_hostname': name, + 'state': 'up', + 'status': 'enabled', + "cpu_info": { + "arch": "x86_64", + "model": "Nehalem", + "vendor": "Intel", + "features": ["pge", "clflush"], + "topology": {"cores": 1, "threads": 1, "sockets": 4}, + }, + "current_workload": 0, + "disk_available_least": 0, + "host_ip": "1.1.1.1", + "free_disk_gb": 1028, + "free_ram_mb": 7680, + "hypervisor_type": "fake", + "hypervisor_version": 1000, + "local_gb": 1028, + "local_gb_used": 0, + "memory_mb": 8192, + "memory_mb_used": 512, + "running_vms": 0, + "service": {"host": "host1", "id": 7, "disabled_reason": None}, + "vcpus": 1, + "vcpus_used": 0, + } + ) + ) + + +class FakeVolume: + def __init__(self, id, status, name, attachments=[], size=75): + self.id = id + self.status = status + self.name = name + self.attachments = attachments + self.size = size + self.snapshot_id = 'id:snapshot' + self.description = 'description' + self.volume_type = 'type:volume' + self.availability_zone = 'az1' + self.created_at = '1900-01-01 12:34:56' + self.updated_at = None + self.source_volid = '12345' + self.metadata = {} + + +class FakeVolumeSnapshot: + def __init__(self, id, status, name, description, size=75): + self.id = id + self.status = status + self.name = name + self.description = description + self.size = size + self.created_at = '1900-01-01 12:34:56' + self.updated_at = None + self.volume_id = '12345' + self.metadata = {} + self.is_forced = False + + +class FakeMachine: + def __init__( + self, + id, + name=None, + driver=None, + driver_info=None, + chassis_uuid=None, + instance_info=None, + instance_name=None, + instance_uuid=None, + properties=None, + reservation=None, + last_error=None, + provision_state='available', + ): + self.uuid = id + self.name = name + self.driver = driver + self.driver_info = driver_info + self.chassis_uuid = chassis_uuid + self.instance_info = instance_info + self.instance_name = instance_name + self.instance_uuid = instance_uuid + self.properties = properties + self.reservation = reservation + self.last_error = last_error + self.provision_state = provision_state + + +class FakeMachinePort: + def __init__(self, id, address, node_id): + self.uuid = id + self.address = address + self.node_uuid = node_id + + +def make_fake_neutron_security_group( + id, name, description, rules, stateful=True, project_id=None +): + if not rules: + rules = [] + if not project_id: + project_id = PROJECT_ID + return json.loads( + json.dumps( + { + 'id': id, + 'name': name, + 'description': description, + 'stateful': stateful, + 'project_id': project_id, + 'tenant_id': project_id, + 'security_group_rules': rules, + } + ) + ) + + +def make_fake_nova_security_group_rule( + id, from_port, to_port, ip_protocol, cidr +): + return json.loads( + json.dumps( + { + 'id': id, + 'from_port': int(from_port), + 'to_port': int(to_port), + 'ip_protcol': 'tcp', + 'ip_range': {'cidr': cidr}, + } + ) + ) + + +def make_fake_nova_security_group(id, name, description, rules): + if not rules: + rules = [] + return json.loads( + json.dumps( + { + 'id': id, + 'name': name, + 'description': description, + 'tenant_id': PROJECT_ID, + 'rules': rules, + } + ) + ) + + +class FakeNovaSecgroupRule: + def __init__( + self, + id, + from_port=None, + to_port=None, + ip_protocol=None, + cidr=None, + parent_group_id=None, + ): + self.id = id + self.from_port = from_port + self.to_port = to_port + self.ip_protocol = ip_protocol + if cidr: + self.ip_range = {'cidr': cidr} + self.parent_group_id = parent_group_id + + +class FakeHypervisor: + def __init__(self, id, hostname): + self.id = id + self.hypervisor_hostname = hostname + + +class FakeZone: + def __init__(self, id, name, type_, email, description, ttl, masters): + self.id = id + self.name = name + self.type_ = type_ + self.email = email + self.description = description + self.ttl = ttl + self.masters = masters + + +class FakeRecordset: + def __init__(self, zone, id, name, type_, description, ttl, records): + self.zone = zone + self.id = id + self.name = name + self.type_ = type_ + self.description = description + self.ttl = ttl + self.records = records + + +def make_fake_aggregate( + id, name, availability_zone='nova', metadata=None, hosts=None +): + if not metadata: + metadata = {} + if not hosts: + hosts = [] + return json.loads( + json.dumps( + { + "availability_zone": availability_zone, + "created_at": datetime.datetime.now().isoformat(), + "deleted": False, + "deleted_at": None, + "hosts": hosts, + "id": int(id), + "metadata": { + "availability_zone": availability_zone, + }, + "name": name, + "updated_at": None, + } + ) + ) diff --git a/openstack/tests/fixtures.py b/openstack/tests/fixtures.py new file mode 100644 index 0000000000..b5d76baced --- /dev/null +++ b/openstack/tests/fixtures.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +fixtures +-------- + +Fixtures used for testing +""" + +import warnings + +import fixtures + +from openstack import warnings as os_warnings + + +# TODO(stephenfin): Replace this with WarningsFilter from fixtures when it's +# released https://github.com/testing-cabal/fixtures/pull/50 +class WarningsFixture(fixtures.Fixture): + """Filters out warnings during test runs.""" + + def setUp(self): + super().setUp() + + self._original_warning_filters = warnings.filters[:] + + # enable user warnings as many libraries use this (it's the default) + warnings.simplefilter("error", UserWarning) + + # enable deprecation warnings in general... + warnings.simplefilter("once", DeprecationWarning) + + # ...but ignore our own deprecation warnings + warnings.filterwarnings( + "ignore", + category=os_warnings.OpenStackDeprecationWarning, + ) + warnings.filterwarnings( + "ignore", + category=os_warnings._RemovedInSDKWarning, + ) + + # also ignore our own general warnings + warnings.filterwarnings( + "ignore", + category=os_warnings.OpenStackWarning, + ) + + self.addCleanup(self._reset_warning_filters) + + def _reset_warning_filters(self): + warnings.filters[:] = self._original_warning_filters # type: ignore[index] diff --git a/openstack/tests/functional/README.rst b/openstack/tests/functional/README.rst new file mode 100644 index 0000000000..a9bbf05c04 --- /dev/null +++ b/openstack/tests/functional/README.rst @@ -0,0 +1,7 @@ +Unit Tests for openstacksdk +=========================== + +For information on how to run and extend these tests, refer to the `contributor +guide`__. + +.. __: https://docs.openstack.org/openstacksdk/latest/contributor/testing.html diff --git a/openstack/tests/unit/telemetry/v2/__init__.py b/openstack/tests/functional/baremetal/__init__.py similarity index 100% rename from openstack/tests/unit/telemetry/v2/__init__.py rename to openstack/tests/functional/baremetal/__init__.py diff --git a/openstack/tests/functional/baremetal/v1/__init__.py b/openstack/tests/functional/baremetal/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/baremetal/v1/base.py b/openstack/tests/functional/baremetal/v1/base.py new file mode 100644 index 0000000000..9094c40552 --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/base.py @@ -0,0 +1,148 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.tests.functional import base + + +class BaseBaremetalTest(base.BaseFunctionalTest): + min_microversion: str | None = None + node_id: str + + def setUp(self): + super().setUp() + self.require_service( + 'baremetal', min_microversion=self.min_microversion + ) + + def create_allocation(self, **kwargs): + allocation = self.operator_cloud.baremetal.create_allocation(**kwargs) + self.addCleanup( + lambda: self.operator_cloud.baremetal.delete_allocation( + allocation.id, ignore_missing=True + ) + ) + return allocation + + def create_chassis(self, **kwargs): + chassis = self.system_admin_cloud.baremetal.create_chassis(**kwargs) + self.addCleanup( + lambda: self.system_admin_cloud.baremetal.delete_chassis( + chassis.id, ignore_missing=True + ) + ) + return chassis + + def create_node(self, driver='fake-hardware', **kwargs): + node = self.operator_cloud.baremetal.create_node( + driver=driver, **kwargs + ) + self.node_id = node.id + self.addCleanup( + lambda: self.operator_cloud.baremetal.delete_node( + self.node_id, ignore_missing=True + ) + ) + self.assertIsNotNone(self.node_id) + return node + + def create_port(self, node_id=None, **kwargs): + node_id = node_id or self.node_id + port = self.operator_cloud.baremetal.create_port( + node_uuid=node_id, **kwargs + ) + self.addCleanup( + lambda: self.operator_cloud.baremetal.delete_port( + port.id, ignore_missing=True + ) + ) + return port + + def create_port_group(self, node_id=None, **kwargs): + node_id = node_id or self.node_id + port_group = self.operator_cloud.baremetal.create_port_group( + node_uuid=node_id, **kwargs + ) + self.addCleanup( + lambda: self.operator_cloud.baremetal.delete_port_group( + port_group.id, ignore_missing=True + ) + ) + return port_group + + def create_volume_connector(self, node_id=None, **kwargs): + node_id = node_id or self.node_id + volume_connector = ( + self.operator_cloud.baremetal.create_volume_connector( + node_uuid=node_id, **kwargs + ) + ) + + self.addCleanup( + lambda: self.operator_cloud.baremetal.delete_volume_connector( + volume_connector.id, ignore_missing=True + ) + ) + return volume_connector + + def create_volume_target(self, node_id=None, **kwargs): + node_id = node_id or self.node_id + volume_target = self.operator_cloud.baremetal.create_volume_target( + node_uuid=node_id, **kwargs + ) + + self.addCleanup( + lambda: self.operator_cloud.baremetal.delete_volume_target( + volume_target.id, ignore_missing=True + ) + ) + return volume_target + + def create_deploy_template(self, **kwargs): + """Create a new deploy_template from attributes.""" + + deploy_template = ( + self.system_admin_cloud.baremetal.create_deploy_template(**kwargs) + ) + + self.addCleanup( + lambda: self.system_admin_cloud.baremetal.delete_deploy_template( + deploy_template.id, ignore_missing=True + ) + ) + return deploy_template + + def create_runbook(self, **kwargs): + """Create a new runbook from attributes.""" + + runbook = self.operator_cloud.baremetal.create_runbook(**kwargs) + + self.addCleanup( + lambda: self.operator_cloud.baremetal.delete_runbook( + runbook.id, ignore_missing=True + ) + ) + return runbook + + def create_inspection_rule(self, **kwargs): + """Create a new inspection_rule from attributes.""" + + inspection_rule = ( + self.system_admin_cloud.baremetal.create_inspection_rule(**kwargs) + ) + + self.addCleanup( + lambda: self.system_admin_cloud.baremetal.delete_inspection_rule( + inspection_rule.id, ignore_missing=True + ) + ) + return inspection_rule diff --git a/openstack/tests/functional/baremetal/v1/test_allocation.py b/openstack/tests/functional/baremetal/v1/test_allocation.py new file mode 100644 index 0000000000..99c7b570e5 --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/test_allocation.py @@ -0,0 +1,233 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random + +from openstack import exceptions +from openstack.tests.functional.baremetal.v1 import base + + +class Base(base.BaseBaremetalTest): + def setUp(self): + super().setUp() + # NOTE(dtantsur): generate a unique resource class to prevent parallel + # tests from clashing. + self.resource_class = f'baremetal-{random.randrange(1024)}' + self.node = self._create_available_node() + + def _create_available_node(self): + node = self.create_node(resource_class=self.resource_class) + self.operator_cloud.baremetal.set_node_provision_state( + node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_provision_state( + node, 'provide', wait=True + ) + # Make sure the node has non-empty power state by forcing power off. + self.operator_cloud.baremetal.set_node_power_state(node, 'power off') + self.addCleanup( + lambda: self.operator_cloud.baremetal.update_node( + node.id, instance_id=None + ) + ) + return node + + +class TestBareMetalAllocation(Base): + min_microversion = '1.52' + + def test_allocation_create_get_delete(self): + allocation = self.create_allocation(resource_class=self.resource_class) + self.assertEqual('allocating', allocation.state) + self.assertIsNone(allocation.node_id) + self.assertIsNone(allocation.last_error) + + loaded = self.operator_cloud.baremetal.wait_for_allocation(allocation) + self.assertEqual(loaded.id, allocation.id) + self.assertEqual('active', allocation.state) + self.assertEqual(self.node.id, allocation.node_id) + self.assertIsNone(allocation.last_error) + + with_fields = self.operator_cloud.baremetal.get_allocation( + allocation.id, fields=['uuid', 'node_uuid'] + ) + self.assertEqual(allocation.id, with_fields.id) + self.assertIsNone(with_fields.state) + + node = self.operator_cloud.baremetal.get_node(self.node.id) + self.assertEqual(allocation.id, node.allocation_id) + + self.operator_cloud.baremetal.delete_allocation( + allocation, ignore_missing=False + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_allocation, + allocation.id, + ) + + def test_allocation_list(self): + allocation1 = self.create_allocation( + resource_class=self.resource_class + ) + allocation2 = self.create_allocation( + resource_class=self.resource_class + '-fail' + ) + + self.operator_cloud.baremetal.wait_for_allocation(allocation1) + self.operator_cloud.baremetal.wait_for_allocation( + allocation2, ignore_error=True + ) + + allocations = self.operator_cloud.baremetal.allocations() + self.assertEqual( + {p.id for p in allocations}, {allocation1.id, allocation2.id} + ) + + allocations = self.operator_cloud.baremetal.allocations(state='active') + self.assertEqual([p.id for p in allocations], [allocation1.id]) + + allocations = self.operator_cloud.baremetal.allocations( + node=self.node.id + ) + self.assertEqual([p.id for p in allocations], [allocation1.id]) + + allocations = self.operator_cloud.baremetal.allocations( + resource_class=self.resource_class + '-fail' + ) + self.assertEqual([p.id for p in allocations], [allocation2.id]) + + def test_allocation_negative_failure(self): + allocation = self.create_allocation( + resource_class=self.resource_class + '-fail' + ) + self.assertRaises( + exceptions.SDKException, + self.operator_cloud.baremetal.wait_for_allocation, + allocation, + ) + + allocation = self.operator_cloud.baremetal.get_allocation( + allocation.id + ) + self.assertEqual('error', allocation.state) + self.assertIn(self.resource_class + '-fail', allocation.last_error) + + def test_allocation_negative_non_existing(self): + uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_allocation, + uuid, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.delete_allocation, + uuid, + ignore_missing=False, + ) + self.assertIsNone( + self.operator_cloud.baremetal.delete_allocation(uuid) + ) + + def test_allocation_fields(self): + self.create_allocation(resource_class=self.resource_class) + result = self.operator_cloud.baremetal.allocations(fields=['uuid']) + for item in result: + self.assertIsNotNone(item.id) + self.assertIsNone(item.resource_class) + + +class TestBareMetalAllocationUpdate(Base): + min_microversion = '1.57' + + def test_allocation_update(self): + name = 'ossdk-name1' + + allocation = self.create_allocation(resource_class=self.resource_class) + allocation = self.operator_cloud.baremetal.wait_for_allocation( + allocation + ) + self.assertEqual('active', allocation.state) + self.assertIsNone(allocation.last_error) + self.assertIsNone(allocation.name) + self.assertEqual({}, allocation.extra) + + allocation = self.operator_cloud.baremetal.update_allocation( + allocation, name=name, extra={'answer': 42} + ) + self.assertEqual(name, allocation.name) + self.assertEqual({'answer': 42}, allocation.extra) + + allocation = self.operator_cloud.baremetal.get_allocation(name) + self.assertEqual(name, allocation.name) + self.assertEqual({'answer': 42}, allocation.extra) + + self.operator_cloud.baremetal.delete_allocation( + allocation, ignore_missing=False + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_allocation, + allocation.id, + ) + + def test_allocation_patch(self): + name = 'ossdk-name2' + + allocation = self.create_allocation(resource_class=self.resource_class) + allocation = self.operator_cloud.baremetal.wait_for_allocation( + allocation + ) + self.assertEqual('active', allocation.state) + self.assertIsNone(allocation.last_error) + self.assertIsNone(allocation.name) + self.assertEqual({}, allocation.extra) + + allocation = self.operator_cloud.baremetal.patch_allocation( + allocation, + [ + {'op': 'replace', 'path': '/name', 'value': name}, + {'op': 'add', 'path': '/extra/answer', 'value': 42}, + ], + ) + self.assertEqual(name, allocation.name) + self.assertEqual({'answer': 42}, allocation.extra) + + allocation = self.operator_cloud.baremetal.get_allocation(name) + self.assertEqual(name, allocation.name) + self.assertEqual({'answer': 42}, allocation.extra) + + allocation = self.operator_cloud.baremetal.patch_allocation( + allocation, + [ + {'op': 'remove', 'path': '/name'}, + {'op': 'remove', 'path': '/extra/answer'}, + ], + ) + self.assertIsNone(allocation.name) + self.assertEqual({}, allocation.extra) + + allocation = self.operator_cloud.baremetal.get_allocation( + allocation.id + ) + self.assertIsNone(allocation.name) + self.assertEqual({}, allocation.extra) + + self.operator_cloud.baremetal.delete_allocation( + allocation, ignore_missing=False + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_allocation, + allocation.id, + ) diff --git a/openstack/tests/functional/baremetal/v1/test_chassis.py b/openstack/tests/functional/baremetal/v1/test_chassis.py new file mode 100644 index 0000000000..8217763e53 --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/test_chassis.py @@ -0,0 +1,90 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import exceptions +from openstack.tests.functional.baremetal.v1 import base + + +class TestBareMetalChassis(base.BaseBaremetalTest): + def test_chassis_create_get_delete(self): + chassis = self.create_chassis() + + loaded = self.system_admin_cloud.baremetal.get_chassis(chassis.id) + self.assertEqual(loaded.id, chassis.id) + + self.system_admin_cloud.baremetal.delete_chassis( + chassis, ignore_missing=False + ) + self.assertRaises( + exceptions.NotFoundException, + self.system_admin_cloud.baremetal.get_chassis, + chassis.id, + ) + + def test_chassis_update(self): + chassis = self.create_chassis() + chassis.extra = {'answer': 42} + + chassis = self.system_admin_cloud.baremetal.update_chassis(chassis) + self.assertEqual({'answer': 42}, chassis.extra) + + chassis = self.system_admin_cloud.baremetal.get_chassis(chassis.id) + self.assertEqual({'answer': 42}, chassis.extra) + + def test_chassis_patch(self): + chassis = self.create_chassis() + + chassis = self.system_admin_cloud.baremetal.patch_chassis( + chassis, dict(path='/extra/answer', op='add', value=42) + ) + self.assertEqual({'answer': 42}, chassis.extra) + + chassis = self.system_admin_cloud.baremetal.get_chassis(chassis.id) + self.assertEqual({'answer': 42}, chassis.extra) + + def test_chassis_negative_non_existing(self): + uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" + self.assertRaises( + exceptions.NotFoundException, + self.system_admin_cloud.baremetal.get_chassis, + uuid, + ) + self.assertRaises( + exceptions.NotFoundException, + self.system_admin_cloud.baremetal.find_chassis, + uuid, + ignore_missing=False, + ) + self.assertRaises( + exceptions.NotFoundException, + self.system_admin_cloud.baremetal.delete_chassis, + uuid, + ignore_missing=False, + ) + self.assertIsNone(self.system_admin_cloud.baremetal.find_chassis(uuid)) + self.assertIsNone( + self.system_admin_cloud.baremetal.delete_chassis(uuid) + ) + + +class TestBareMetalChassisFields(base.BaseBaremetalTest): + min_microversion = '1.8' + + def test_chassis_fields(self): + self.create_chassis(description='something') + result = self.system_admin_cloud.baremetal.chassis( + fields=['uuid', 'extra'] + ) + for ch in result: + self.assertIsNotNone(ch.id) + self.assertIsNone(ch.description) diff --git a/openstack/tests/functional/baremetal/v1/test_conductor.py b/openstack/tests/functional/baremetal/v1/test_conductor.py new file mode 100644 index 0000000000..68f2d805ef --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/test_conductor.py @@ -0,0 +1,30 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.tests.functional.baremetal.v1 import base + + +class TestBareMetalConductor(base.BaseBaremetalTest): + min_microversion = '1.49' + + def test_list_get_conductor(self): + node = self.create_node(name='node-name') + conductors = self.system_admin_cloud.baremetal.conductors() + hostname_list = [conductor.hostname for conductor in conductors] + self.assertIn(node.conductor, hostname_list) + conductor1 = self.system_admin_cloud.baremetal.get_conductor( + node.conductor + ) + self.assertIsNotNone(conductor1.conductor_group) + self.assertIsNotNone(conductor1.links) + self.assertTrue(conductor1.alive) diff --git a/openstack/tests/functional/baremetal/v1/test_deploy_templates.py b/openstack/tests/functional/baremetal/v1/test_deploy_templates.py new file mode 100644 index 0000000000..f3a3ec08c5 --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/test_deploy_templates.py @@ -0,0 +1,191 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack.tests.functional.baremetal.v1 import base + + +class TestBareMetalDeployTemplate(base.BaseBaremetalTest): + min_microversion = '1.55' + + def test_baremetal_deploy_create_get_delete(self): + steps = [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "priority": 150, + } + ] + deploy_template = self.create_deploy_template( + name='CUSTOM_DEPLOY_TEMPLATE', steps=steps + ) + loaded = self.system_admin_cloud.baremetal.get_deploy_template( + deploy_template.id + ) + self.assertEqual(loaded.id, deploy_template.id) + self.system_admin_cloud.baremetal.delete_deploy_template( + deploy_template, ignore_missing=False + ) + self.assertRaises( + exceptions.NotFoundException, + self.system_admin_cloud.baremetal.get_deploy_template, + deploy_template.id, + ) + + def test_baremetal_deploy_template_list(self): + steps = [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "priority": 150, + } + ] + + deploy_template1 = self.create_deploy_template( + name='CUSTOM_DEPLOY_TEMPLATE1', steps=steps + ) + deploy_template2 = self.create_deploy_template( + name='CUSTOM_DEPLOY_TEMPLATE2', steps=steps + ) + deploy_templates = self.system_admin_cloud.baremetal.deploy_templates() + ids = [template.id for template in deploy_templates] + self.assertIn(deploy_template1.id, ids) + self.assertIn(deploy_template2.id, ids) + + deploy_templates_with_details = ( + self.system_admin_cloud.baremetal.deploy_templates(details=True) + ) + for dp in deploy_templates_with_details: + self.assertIsNotNone(dp.id) + self.assertIsNotNone(dp.name) + + deploy_tempalte_with_fields = ( + self.system_admin_cloud.baremetal.deploy_templates(fields=['uuid']) + ) + for dp in deploy_tempalte_with_fields: + self.assertIsNotNone(dp.id) + self.assertIsNone(dp.name) + + def test_baremetal_deploy_list_update_delete(self): + steps = [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "priority": 150, + } + ] + deploy_template = self.create_deploy_template( + name='CUSTOM_DEPLOY_TEMPLATE4', steps=steps + ) + self.assertFalse(deploy_template.extra) + deploy_template.extra = {'answer': 42} + + deploy_template = ( + self.system_admin_cloud.baremetal.update_deploy_template( + deploy_template + ) + ) + self.assertEqual({'answer': 42}, deploy_template.extra) + + deploy_template = ( + self.system_admin_cloud.baremetal.get_deploy_template( + deploy_template.id + ) + ) + + self.system_admin_cloud.baremetal.delete_deploy_template( + deploy_template.id, ignore_missing=False + ) + + def test_baremetal_deploy_update(self): + steps = [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "priority": 150, + } + ] + deploy_template = self.create_deploy_template( + name='CUSTOM_DEPLOY_TEMPLATE4', steps=steps + ) + deploy_template.extra = {'answer': 42} + + deploy_template = ( + self.system_admin_cloud.baremetal.update_deploy_template( + deploy_template + ) + ) + self.assertEqual({'answer': 42}, deploy_template.extra) + + deploy_template = ( + self.system_admin_cloud.baremetal.get_deploy_template( + deploy_template.id + ) + ) + self.assertEqual({'answer': 42}, deploy_template.extra) + + def test_deploy_template_patch(self): + name = "CUSTOM_HYPERTHREADING_ON" + steps = [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "priority": 150, + } + ] + deploy_template = self.create_deploy_template(name=name, steps=steps) + deploy_template = ( + self.system_admin_cloud.baremetal.patch_deploy_template( + deploy_template, dict(path='/extra/answer', op='add', value=42) + ) + ) + self.assertEqual({'answer': 42}, deploy_template.extra) + self.assertEqual(name, deploy_template.name) + + deploy_template = ( + self.system_admin_cloud.baremetal.get_deploy_template( + deploy_template.id + ) + ) + self.assertEqual({'answer': 42}, deploy_template.extra) + + def test_deploy_template_negative_non_existing(self): + uuid = "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" + self.assertRaises( + exceptions.NotFoundException, + self.system_admin_cloud.baremetal.get_deploy_template, + uuid, + ) + self.assertRaises( + exceptions.NotFoundException, + self.system_admin_cloud.baremetal.delete_deploy_template, + uuid, + ignore_missing=False, + ) + self.assertIsNone( + self.system_admin_cloud.baremetal.delete_deploy_template(uuid) + ) diff --git a/openstack/tests/functional/baremetal/v1/test_driver.py b/openstack/tests/functional/baremetal/v1/test_driver.py new file mode 100644 index 0000000000..b48a5693b0 --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/test_driver.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import exceptions +from openstack.tests.functional.baremetal.v1 import base + + +class TestBareMetalDriver(base.BaseBaremetalTest): + def test_fake_hardware_get(self): + driver = self.system_admin_cloud.baremetal.get_driver('fake-hardware') + self.assertEqual('fake-hardware', driver.name) + self.assertNotEqual([], driver.hosts) + + def test_fake_hardware_list(self): + drivers = self.system_admin_cloud.baremetal.drivers() + self.assertIn('fake-hardware', [d.name for d in drivers]) + + def test_driver_negative_non_existing(self): + self.assertRaises( + exceptions.NotFoundException, + self.system_admin_cloud.baremetal.get_driver, + 'not-a-driver', + ) + + +class TestBareMetalDriverDetails(base.BaseBaremetalTest): + min_microversion = '1.30' + + def test_fake_hardware_get(self): + driver = self.system_admin_cloud.baremetal.get_driver('fake-hardware') + self.assertEqual('fake-hardware', driver.name) + for iface in ('boot', 'deploy', 'management', 'power'): + self.assertIn( + 'fake', getattr(driver, f'enabled_{iface}_interfaces') + ) + self.assertEqual( + 'fake', getattr(driver, f'default_{iface}_interface') + ) + self.assertNotEqual([], driver.hosts) + + def test_fake_hardware_list_details(self): + drivers = self.system_admin_cloud.baremetal.drivers(details=True) + driver = next(d for d in drivers if d.name == 'fake-hardware') + for iface in ('boot', 'deploy', 'management', 'power'): + self.assertIn( + 'fake', getattr(driver, f'enabled_{iface}_interfaces') + ) + self.assertEqual( + 'fake', getattr(driver, f'default_{iface}_interface') + ) diff --git a/openstack/tests/functional/baremetal/v1/test_inspection_rules.py b/openstack/tests/functional/baremetal/v1/test_inspection_rules.py new file mode 100644 index 0000000000..bbfe6627c4 --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/test_inspection_rules.py @@ -0,0 +1,207 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack.tests.functional.baremetal.v1 import base + + +class TestBareMetalInspectionRule(base.BaseBaremetalTest): + min_microversion = '1.96' + + def setUp(self): + super().setUp() + + def test_baremetal_inspection_rule_create_get_delete(self): + actions = [{"op": "set-attribute", "args": ["/driver", "idrac"]}] + conditions = [ + {"op": "eq", "args": ["node:memory_mb", 4096], "multiple": "all"} + ] + inspection_rule = self.create_inspection_rule( + actions=actions, + conditions=conditions, + description="Test inspection rule", + phase="main", + priority=100, + sensitive=False, + ) + loaded = self.system_admin_cloud.baremetal.get_inspection_rule( + inspection_rule.id + ) + self.assertEqual(loaded.id, inspection_rule.id) + self.system_admin_cloud.baremetal.delete_inspection_rule( + inspection_rule, ignore_missing=False + ) + self.assertRaises( + exceptions.NotFoundException, + self.system_admin_cloud.baremetal.get_inspection_rule, + inspection_rule.id, + ) + + def test_baremetal_inspection_rule_list(self): + actions = [{"op": "set-attribute", "args": ["/driver", "idrac"]}] + conditions = [ + { + "op": "is-true", + "args": ["{node.auto_discovered}"], + "multiple": "any", + } + ] + + inspection_rule1 = self.create_inspection_rule( + actions=actions, + conditions=conditions, + description="Test inspection rule 1", + ) + inspection_rule2 = self.create_inspection_rule( + actions=actions, + conditions=conditions, + description="Test inspection rule 2", + ) + inspection_rules = self.system_admin_cloud.baremetal.inspection_rules() + ids = [rule.id for rule in inspection_rules] + self.assertIn(inspection_rule1.id, ids) + self.assertIn(inspection_rule2.id, ids) + + inspection_rules_with_details = ( + self.system_admin_cloud.baremetal.inspection_rules(details=True) + ) + for rule in inspection_rules_with_details: + self.assertIsNotNone(rule.id) + self.assertIsNotNone(rule.description) + + inspection_rule_with_fields = ( + self.system_admin_cloud.baremetal.inspection_rules(fields=['uuid']) + ) + for rule in inspection_rule_with_fields: + self.assertIsNotNone(rule.id) + self.assertIsNone(rule.description) + + def test_baremetal_inspection_rule_list_update_delete(self): + actions = [{"op": "set-attribute", "args": ["/driver", "idrac"]}] + conditions = [ + { + "op": "eq", + "args": ["node:cpu_arch", "x86_64"], + "multiple": "all", + } + ] + inspection_rule = self.create_inspection_rule( + actions=actions, + conditions=conditions, + description="Test inspection rule", + ) + inspection_rule.description = 'Updated inspection rule' + + inspection_rule = ( + self.system_admin_cloud.baremetal.update_inspection_rule( + inspection_rule + ) + ) + self.assertEqual( + 'Updated inspection rule', inspection_rule.description + ) + + inspection_rule = ( + self.system_admin_cloud.baremetal.get_inspection_rule( + inspection_rule.id + ) + ) + + self.system_admin_cloud.baremetal.delete_inspection_rule( + inspection_rule.id, ignore_missing=False + ) + + def test_baremetal_inspection_rule_update(self): + actions = [{"op": "set-attribute", "args": ["/driver", "idrac"]}] + conditions = [ + {"op": "gt", "args": ["node:memory_mb", 4096], "multiple": "all"} + ] + inspection_rule = self.create_inspection_rule( + actions=actions, conditions=conditions, phase="main", priority=100 + ) + inspection_rule.priority = 150 + + inspection_rule = ( + self.system_admin_cloud.baremetal.update_inspection_rule( + inspection_rule + ) + ) + self.assertEqual(150, inspection_rule.priority) + + inspection_rule = ( + self.system_admin_cloud.baremetal.get_inspection_rule( + inspection_rule.id + ) + ) + self.assertEqual(150, inspection_rule.priority) + + def test_inspection_rule_patch(self): + description = "BIOS configuration rule" + actions = [ + { + "op": "set-attribute", + "args": ["/properties/capabilities", "boot_mode:uefi"], + } + ] + conditions = [ + { + "op": "is-true", + "args": ["{node.auto_discovered}"], + "multiple": "any", + } + ] + inspection_rule = self.create_inspection_rule( + actions=actions, + conditions=conditions, + description=description, + sensitive=False, + ) + + updated_actions = [ + { + "op": "set-attribute", + "args": ["/driver", "fake"], + } + ] + + inspection_rule = ( + self.system_admin_cloud.baremetal.patch_inspection_rule( + inspection_rule, + dict(path='/actions', op='add', value=updated_actions), + ) + ) + self.assertEqual(updated_actions, inspection_rule.actions) + self.assertEqual(description, inspection_rule.description) + + inspection_rule = ( + self.system_admin_cloud.baremetal.get_inspection_rule( + inspection_rule.id + ) + ) + self.assertEqual(updated_actions, inspection_rule.actions) + + def test_inspection_rule_negative_non_existing(self): + uuid = "bbb45f41-d4bc-4307-8d1d-32f95ce1e920" + self.assertRaises( + exceptions.NotFoundException, + self.system_admin_cloud.baremetal.get_inspection_rule, + uuid, + ) + self.assertRaises( + exceptions.NotFoundException, + self.system_admin_cloud.baremetal.delete_inspection_rule, + uuid, + ignore_missing=False, + ) + self.assertIsNone( + self.system_admin_cloud.baremetal.delete_inspection_rule(uuid) + ) diff --git a/openstack/tests/functional/baremetal/v1/test_node.py b/openstack/tests/functional/baremetal/v1/test_node.py new file mode 100644 index 0000000000..5b5b70785e --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/test_node.py @@ -0,0 +1,689 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random +import uuid + +from openstack import exceptions +from openstack.tests.functional.baremetal.v1 import base + + +class TestBareMetalNode(base.BaseBaremetalTest): + def test_node_create_get_delete(self): + node = self.create_node(name='node-name') + self.assertEqual(node.name, 'node-name') + self.assertEqual(node.driver, 'fake-hardware') + self.assertEqual(node.provision_state, 'enroll') + self.assertFalse(node.is_maintenance) + + # NOTE(dtantsur): get_node and find_node only differ in handing missing + # nodes, otherwise they are identical. + for ident in (self.node_id, 'node-name'): + found = self.operator_cloud.baremetal.get_node(ident) + self.assertEqual(node.id, found.id) + self.assertEqual(node.name, found.name) + + found = self.operator_cloud.baremetal.find_node(ident) + self.assertEqual(node.id, found.id) + self.assertEqual(node.name, found.name) + + with_fields = self.operator_cloud.baremetal.get_node( + 'node-name', fields=['uuid', 'driver', 'instance_id'] + ) + self.assertEqual(node.id, with_fields.id) + self.assertEqual(node.driver, with_fields.driver) + self.assertIsNone(with_fields.name) + self.assertIsNone(with_fields.provision_state) + + nodes = self.operator_cloud.baremetal.nodes() + self.assertIn(node.id, [n.id for n in nodes]) + + self.operator_cloud.baremetal.delete_node(node, ignore_missing=False) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_node, + self.node_id, + ) + + def test_node_create_in_available(self): + node = self.create_node(name='node-name', provision_state='available') + self.assertEqual(node.name, 'node-name') + self.assertEqual(node.driver, 'fake-hardware') + self.assertEqual(node.provision_state, 'available') + + self.operator_cloud.baremetal.delete_node(node, ignore_missing=False) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_node, + self.node_id, + ) + + def test_node_update(self): + node = self.create_node(name='node-name', extra={'foo': 'bar'}) + node.name = 'new-name' + node.extra = {'answer': 42} + instance_uuid = str(uuid.uuid4()) + + node = self.operator_cloud.baremetal.update_node( + node, instance_id=instance_uuid + ) + self.assertEqual('new-name', node.name) + self.assertEqual({'answer': 42}, node.extra) + self.assertEqual(instance_uuid, node.instance_id) + + node = self.operator_cloud.baremetal.get_node('new-name') + self.assertEqual('new-name', node.name) + self.assertEqual({'answer': 42}, node.extra) + self.assertEqual(instance_uuid, node.instance_id) + + node = self.operator_cloud.baremetal.update_node( + node, instance_id=None + ) + self.assertIsNone(node.instance_id) + + node = self.operator_cloud.baremetal.get_node('new-name') + self.assertIsNone(node.instance_id) + + def test_node_update_by_name(self): + self.create_node(name='node-name', extra={'foo': 'bar'}) + instance_uuid = str(uuid.uuid4()) + + node = self.operator_cloud.baremetal.update_node( + 'node-name', instance_id=instance_uuid, extra={'answer': 42} + ) + self.assertEqual({'answer': 42}, node.extra) + self.assertEqual(instance_uuid, node.instance_id) + + node = self.operator_cloud.baremetal.get_node('node-name') + self.assertEqual({'answer': 42}, node.extra) + self.assertEqual(instance_uuid, node.instance_id) + + node = self.operator_cloud.baremetal.update_node( + 'node-name', instance_id=None + ) + self.assertIsNone(node.instance_id) + + node = self.operator_cloud.baremetal.get_node('node-name') + self.assertIsNone(node.instance_id) + + def test_node_patch(self): + node = self.create_node(name='node-name', extra={'foo': 'bar'}) + node.name = 'new-name' + instance_uuid = str(uuid.uuid4()) + + node = self.operator_cloud.baremetal.patch_node( + node, + [ + dict(path='/instance_id', op='replace', value=instance_uuid), + dict(path='/extra/answer', op='add', value=42), + ], + ) + self.assertEqual('new-name', node.name) + self.assertEqual({'foo': 'bar', 'answer': 42}, node.extra) + self.assertEqual(instance_uuid, node.instance_id) + + node = self.operator_cloud.baremetal.get_node('new-name') + self.assertEqual('new-name', node.name) + self.assertEqual({'foo': 'bar', 'answer': 42}, node.extra) + self.assertEqual(instance_uuid, node.instance_id) + + node = self.operator_cloud.baremetal.patch_node( + node, + [ + dict(path='/instance_id', op='remove'), + dict(path='/extra/answer', op='remove'), + ], + ) + self.assertIsNone(node.instance_id) + self.assertNotIn('answer', node.extra) + + node = self.operator_cloud.baremetal.get_node('new-name') + self.assertIsNone(node.instance_id) + self.assertNotIn('answer', node.extra) + + def test_node_list_update_delete(self): + self.create_node(name='node-name', extra={'foo': 'bar'}) + node = next( + n + for n in self.operator_cloud.baremetal.nodes( + details=True, + provision_state='enroll', + is_maintenance=False, + associated=False, + ) + if n.name == 'node-name' + ) + self.assertEqual(node.extra, {'foo': 'bar'}) + + # This test checks that resources returned from listing are usable + self.operator_cloud.baremetal.update_node(node, extra={'foo': 42}) + self.operator_cloud.baremetal.delete_node(node, ignore_missing=False) + + def test_node_create_in_enroll_provide(self): + node = self.create_node() + self.node_id = node.id + + self.assertEqual(node.driver, 'fake-hardware') + self.assertEqual(node.provision_state, 'enroll') + self.assertIsNone(node.power_state) + self.assertFalse(node.is_maintenance) + + self.operator_cloud.baremetal.set_node_provision_state( + node, 'manage', wait=True + ) + self.assertEqual(node.provision_state, 'manageable') + + self.operator_cloud.baremetal.set_node_provision_state( + node, 'provide', wait=True + ) + self.assertEqual(node.provision_state, 'available') + + def test_node_create_in_enroll_provide_by_name(self): + name = f'node-{random.randint(0, 1000)}' + node = self.create_node(name=name) + self.node_id = node.id + + self.assertEqual(node.driver, 'fake-hardware') + self.assertEqual(node.provision_state, 'enroll') + self.assertIsNone(node.power_state) + self.assertFalse(node.is_maintenance) + + node = self.operator_cloud.baremetal.set_node_provision_state( + name, 'manage', wait=True + ) + self.assertEqual(node.provision_state, 'manageable') + + node = self.operator_cloud.baremetal.set_node_provision_state( + name, 'provide', wait=True + ) + self.assertEqual(node.provision_state, 'available') + + def test_node_power_state(self): + node = self.create_node() + self.assertIsNone(node.power_state) + + self.operator_cloud.baremetal.set_node_power_state( + node, 'power on', wait=True + ) + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertEqual('power on', node.power_state) + + self.operator_cloud.baremetal.set_node_power_state( + node, 'power off', wait=True + ) + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertEqual('power off', node.power_state) + + def test_node_validate(self): + node = self.create_node() + # Fake hardware passes validation for all interfaces + result = self.operator_cloud.baremetal.validate_node(node) + for iface in ('boot', 'deploy', 'management', 'power'): + self.assertTrue(result[iface].result) + self.assertFalse(result[iface].reason) + + def test_node_negative_non_existing(self): + uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_node, + uuid, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.find_node, + uuid, + ignore_missing=False, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.delete_node, + uuid, + ignore_missing=False, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.update_node, + uuid, + name='new-name', + ) + self.assertIsNone(self.operator_cloud.baremetal.find_node(uuid)) + self.assertIsNone(self.operator_cloud.baremetal.delete_node(uuid)) + + def test_maintenance(self): + reason = "Prepating for taking over the world" + + node = self.create_node() + self.assertFalse(node.is_maintenance) + self.assertIsNone(node.maintenance_reason) + + # Initial setting without the reason + node = self.operator_cloud.baremetal.set_node_maintenance(node) + self.assertTrue(node.is_maintenance) + self.assertIsNone(node.maintenance_reason) + + # Updating the reason later + node = self.operator_cloud.baremetal.set_node_maintenance(node, reason) + self.assertTrue(node.is_maintenance) + self.assertEqual(reason, node.maintenance_reason) + + # Removing the reason later + node = self.operator_cloud.baremetal.set_node_maintenance(node) + self.assertTrue(node.is_maintenance) + self.assertIsNone(node.maintenance_reason) + + # Unsetting maintenance + node = self.operator_cloud.baremetal.unset_node_maintenance(node) + self.assertFalse(node.is_maintenance) + self.assertIsNone(node.maintenance_reason) + + # Initial setting with the reason + node = self.operator_cloud.baremetal.set_node_maintenance(node, reason) + self.assertTrue(node.is_maintenance) + self.assertEqual(reason, node.maintenance_reason) + + def test_maintenance_via_update(self): + reason = "Prepating for taking over the world" + + node = self.create_node() + + # Initial setting without the reason + node = self.operator_cloud.baremetal.update_node( + node, is_maintenance=True + ) + self.assertTrue(node.is_maintenance) + self.assertIsNone(node.maintenance_reason) + + # Make sure the change has effect on the remote side. + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertTrue(node.is_maintenance) + self.assertIsNone(node.maintenance_reason) + + # Updating the reason later + node = self.operator_cloud.baremetal.update_node( + node, maintenance_reason=reason + ) + self.assertTrue(node.is_maintenance) + self.assertEqual(reason, node.maintenance_reason) + + # Make sure the change has effect on the remote side. + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertTrue(node.is_maintenance) + self.assertEqual(reason, node.maintenance_reason) + + # Unsetting maintenance + node = self.operator_cloud.baremetal.update_node( + node, is_maintenance=False + ) + self.assertFalse(node.is_maintenance) + self.assertIsNone(node.maintenance_reason) + + # Make sure the change has effect on the remote side. + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertFalse(node.is_maintenance) + self.assertIsNone(node.maintenance_reason) + + # Initial setting with the reason + node = self.operator_cloud.baremetal.update_node( + node, is_maintenance=True, maintenance_reason=reason + ) + self.assertTrue(node.is_maintenance) + self.assertEqual(reason, node.maintenance_reason) + + # Make sure the change has effect on the remote side. + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertTrue(node.is_maintenance) + self.assertEqual(reason, node.maintenance_reason) + + +class TestNodeRetired(base.BaseBaremetalTest): + min_microversion = '1.61' + + def test_retired(self): + reason = "I'm too old for this s...tuff!" + + node = self.create_node() + + # Set retired without reason + node = self.operator_cloud.baremetal.update_node(node, is_retired=True) + self.assertTrue(node.is_retired) + self.assertIsNone(node.retired_reason) + + # Verify set retired on server side + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertTrue(node.is_retired) + self.assertIsNone(node.retired_reason) + + # Add the reason + node = self.operator_cloud.baremetal.update_node( + node, retired_reason=reason + ) + self.assertTrue(node.is_retired) + self.assertEqual(reason, node.retired_reason) + + # Verify the reason on server side + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertTrue(node.is_retired) + self.assertEqual(reason, node.retired_reason) + + # Unset retired + node = self.operator_cloud.baremetal.update_node( + node, is_retired=False + ) + self.assertFalse(node.is_retired) + self.assertIsNone(node.retired_reason) + + # Verify on server side + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertFalse(node.is_retired) + self.assertIsNone(node.retired_reason) + + # Set retired with reason + node = self.operator_cloud.baremetal.update_node( + node, is_retired=True, retired_reason=reason + ) + self.assertTrue(node.is_retired) + self.assertEqual(reason, node.retired_reason) + + # Verify on server side + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertTrue(node.is_retired) + self.assertEqual(reason, node.retired_reason) + + def test_retired_in_available(self): + node = self.create_node(provision_state='available') + + # Set retired when node state available should fail! + self.assertRaises( + exceptions.ConflictException, + self.operator_cloud.baremetal.update_node, + node, + is_retired=True, + ) + + +class TestBareMetalNodeFields(base.BaseBaremetalTest): + min_microversion = '1.8' + + def test_node_fields(self): + self.create_node() + result = self.operator_cloud.baremetal.nodes( + fields=['uuid', 'name', 'instance_id'] + ) + for item in result: + self.assertIsNotNone(item.id) + self.assertIsNone(item.driver) + + +class TestBareMetalVif(base.BaseBaremetalTest): + min_microversion = '1.28' + + def setUp(self): + super().setUp() + self.node = self.create_node(network_interface='noop') + self.vif_id = "200712fc-fdfb-47da-89a6-2d19f76c7618" + + def test_node_vif_attach_detach(self): + self.operator_cloud.baremetal.attach_vif_to_node( + self.node, self.vif_id + ) + # NOTE(dtantsur): The noop networking driver is completely noop - the + # VIF list does not return anything of value. + self.operator_cloud.baremetal.list_node_vifs(self.node) + res = self.operator_cloud.baremetal.detach_vif_from_node( + self.node, self.vif_id, ignore_missing=False + ) + self.assertTrue(res) + + def test_node_vif_negative(self): + uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.attach_vif_to_node, + uuid, + self.vif_id, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.list_node_vifs, + uuid, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.detach_vif_from_node, + uuid, + self.vif_id, + ignore_missing=False, + ) + + +class TestBareMetalVirtualMedia(base.BaseBaremetalTest): + min_microversion = '1.89' + + def setUp(self): + super().setUp() + self.node = self.create_node(network_interface='noop') + self.device_type = 'cdrom' + self.image_url = 'http://image' + + def test_node_vmedia_attach_detach(self): + self.operator_cloud.baremetal.attach_vmedia_to_node( + self.node, self.device_type, self.image_url + ) + res = self.operator_cloud.baremetal.detach_vmedia_from_node(self.node) + self.assertIsNone(res) + + def test_node_vmedia_negative(self): + uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" + self.assertRaises( + exceptions.ResourceNotFound, + self.operator_cloud.baremetal.attach_vmedia_to_node, + uuid, + self.device_type, + self.image_url, + ) + self.assertRaises( + exceptions.ResourceNotFound, + self.operator_cloud.baremetal.detach_vmedia_from_node, + uuid, + ) + + +class TestTraits(base.BaseBaremetalTest): + min_microversion = '1.37' + + def setUp(self): + super().setUp() + self.node = self.create_node() + + def test_add_remove_node_trait(self): + node = self.operator_cloud.baremetal.get_node(self.node) + self.assertEqual([], node.traits) + + self.operator_cloud.baremetal.add_node_trait(self.node, 'CUSTOM_FAKE') + self.assertEqual(['CUSTOM_FAKE'], self.node.traits) + node = self.operator_cloud.baremetal.get_node(self.node) + self.assertEqual(['CUSTOM_FAKE'], node.traits) + + self.operator_cloud.baremetal.add_node_trait(self.node, 'CUSTOM_REAL') + self.assertEqual( + sorted(['CUSTOM_FAKE', 'CUSTOM_REAL']), sorted(self.node.traits) + ) + node = self.operator_cloud.baremetal.get_node(self.node) + self.assertEqual( + sorted(['CUSTOM_FAKE', 'CUSTOM_REAL']), sorted(node.traits) + ) + + self.operator_cloud.baremetal.remove_node_trait( + node, 'CUSTOM_FAKE', ignore_missing=False + ) + self.assertEqual(['CUSTOM_REAL'], self.node.traits) + node = self.operator_cloud.baremetal.get_node(self.node) + self.assertEqual(['CUSTOM_REAL'], node.traits) + + def test_set_node_traits(self): + node = self.operator_cloud.baremetal.get_node(self.node) + self.assertEqual([], node.traits) + + traits1 = ['CUSTOM_FAKE', 'CUSTOM_REAL'] + traits2 = ['CUSTOM_FOOBAR'] + + self.operator_cloud.baremetal.set_node_traits(self.node, traits1) + self.assertEqual(sorted(traits1), sorted(self.node.traits)) + node = self.operator_cloud.baremetal.get_node(self.node) + self.assertEqual(sorted(traits1), sorted(node.traits)) + + self.operator_cloud.baremetal.set_node_traits(self.node, traits2) + self.assertEqual(['CUSTOM_FOOBAR'], self.node.traits) + node = self.operator_cloud.baremetal.get_node(self.node) + self.assertEqual(['CUSTOM_FOOBAR'], node.traits) + + +class TestBareMetalNodeListFirmware(base.BaseBaremetalTest): + min_microversion = '1.86' + + def test_list_firmware(self): + node = self.create_node(firmware_interface="no-firmware") + self.assertEqual("no-firmware", node.firmware_interface) + result = self.operator_cloud.baremetal.list_node_firmware(node) + self.assertEqual({'firmware': []}, result) + + +class TestBareMetalNodeInstanceName(base.BaseBaremetalTest): + min_microversion = '1.104' + + def test_node_instance_name(self): + """Test instance_name field functionality.""" + node = self.create_node(name='test-node-instance-name') + self.assertIsNone(node.instance_name) + + # Set instance_name + instance_name = "test-instance-123" + node = self.operator_cloud.baremetal.patch_node( + node.id, + [{'op': 'add', 'path': '/instance_name', 'value': instance_name}], + ) + self.assertEqual(instance_name, node.instance_name) + + # Verify on server side + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertEqual(instance_name, node.instance_name) + + # Clear instance_name + node = self.operator_cloud.baremetal.patch_node( + node.id, [{'op': 'remove', 'path': '/instance_name'}] + ) + self.assertIsNone(node.instance_name) + + # Verify on server side + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertIsNone(node.instance_name) + + def test_node_instance_name_query(self): + """Test querying nodes by instance_name.""" + node1 = self.create_node(name='node1') + node2 = self.create_node(name='node2') + + # Set different instance names using explicit patches + self.operator_cloud.baremetal.patch_node( + node1.id, + [{'op': 'add', 'path': '/instance_name', 'value': 'instance-1'}], + ) + self.operator_cloud.baremetal.patch_node( + node2.id, + [{'op': 'add', 'path': '/instance_name', 'value': 'instance-2'}], + ) + + # Query by instance_name + result = list( + self.operator_cloud.baremetal.nodes( + instance_name="instance-1", details=True + ) + ) + self.assertEqual(1, len(result)) + self.assertEqual(node1.id, result[0].id) + self.assertEqual("instance-1", result[0].instance_name) + + # Query by different instance_name + result = list( + self.operator_cloud.baremetal.nodes( + instance_name="instance-2", details=True + ) + ) + self.assertEqual(1, len(result)) + self.assertEqual(node2.id, result[0].id) + self.assertEqual("instance-2", result[0].instance_name) + + # Query by non-existent instance_name + result = list( + self.operator_cloud.baremetal.nodes( + instance_name="non-existent", details=True + ) + ) + self.assertEqual(0, len(result)) + + def test_node_instance_name_with_instance_info(self): + """Test that instance_name works with instance_info.display_name.""" + node = self.create_node(name='test-node-display-name') + + # Set instance_info.display_name first + self.operator_cloud.baremetal.patch_node( + node.id, + [ + { + 'op': 'add', + 'path': '/instance_info', + 'value': {'display_name': 'display-name-123'}, + } + ], + ) + + # Verify instance_name was automatically set + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertEqual('display-name-123', node.instance_name) + self.assertEqual( + {'display_name': 'display-name-123'}, node.instance_info + ) + + # Set instance_name explicitly + self.operator_cloud.baremetal.patch_node( + node.id, + [ + { + 'op': 'replace', + 'path': '/instance_name', + 'value': 'explicit-name', + } + ], + ) + + # Verify explicit instance_name takes precedence + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertEqual('explicit-name', node.instance_name) + + self.operator_cloud.baremetal.patch_node( + node.id, + [ + { + 'op': 'replace', + 'path': '/instance_info', + 'value': {'display_name': 'new-display-name'}, + } + ], + ) + + # Verify instance_name was not overridden + node = self.operator_cloud.baremetal.get_node(node.id) + self.assertEqual('explicit-name', node.instance_name) + self.assertEqual( + {'display_name': 'new-display-name'}, node.instance_info + ) diff --git a/openstack/tests/functional/baremetal/v1/test_port.py b/openstack/tests/functional/baremetal/v1/test_port.py new file mode 100644 index 0000000000..63f8f86dfe --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/test_port.py @@ -0,0 +1,151 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import exceptions +from openstack.tests.functional.baremetal.v1 import base + + +class TestBareMetalPort(base.BaseBaremetalTest): + def setUp(self): + super().setUp() + self.node = self.create_node() + + def test_port_create_get_delete(self): + port = self.create_port(address='11:22:33:44:55:66') + self.assertEqual(self.node_id, port.node_id) + # Can be None if the microversion is too small, so we make sure it is + # not False. + self.assertNotEqual(port.is_pxe_enabled, False) + self.assertIsNone(port.port_group_id) + + loaded = self.operator_cloud.baremetal.get_port(port.id) + self.assertEqual(loaded.id, port.id) + self.assertIsNotNone(loaded.address) + + with_fields = self.operator_cloud.baremetal.get_port( + port.id, fields=['uuid', 'extra', 'node_id'] + ) + self.assertEqual(port.id, with_fields.id) + self.assertIsNone(with_fields.address) + + self.operator_cloud.baremetal.delete_port(port, ignore_missing=False) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_port, + port.id, + ) + + def test_port_list(self): + node2 = self.create_node(name='test-node') + + port1 = self.create_port(address='11:22:33:44:55:66', node_id=node2.id) + port2 = self.create_port( + address='11:22:33:44:55:77', node_id=self.node.id + ) + + ports = self.operator_cloud.baremetal.ports( + address='11:22:33:44:55:77' + ) + self.assertEqual([p.id for p in ports], [port2.id]) + + ports = self.operator_cloud.baremetal.ports(node=node2.id) + self.assertEqual([p.id for p in ports], [port1.id]) + + ports = self.operator_cloud.baremetal.ports(node='test-node') + self.assertEqual([p.id for p in ports], [port1.id]) + + def test_port_list_update_delete(self): + self.create_port( + address='11:22:33:44:55:66', + node_id=self.node.id, + extra={'foo': 'bar'}, + ) + port = next( + self.operator_cloud.baremetal.ports( + details=True, address='11:22:33:44:55:66' + ) + ) + self.assertEqual(port.extra, {'foo': 'bar'}) + + # This test checks that resources returned from listing are usable + self.operator_cloud.baremetal.update_port(port, extra={'foo': 42}) + self.operator_cloud.baremetal.delete_port(port, ignore_missing=False) + + def test_port_update(self): + port = self.create_port(address='11:22:33:44:55:66') + port.address = '66:55:44:33:22:11' + port.extra = {'answer': 42} + + port = self.operator_cloud.baremetal.update_port(port) + self.assertEqual('66:55:44:33:22:11', port.address) + self.assertEqual({'answer': 42}, port.extra) + + port = self.operator_cloud.baremetal.get_port(port.id) + self.assertEqual('66:55:44:33:22:11', port.address) + self.assertEqual({'answer': 42}, port.extra) + + def test_port_patch(self): + port = self.create_port(address='11:22:33:44:55:66') + port.address = '66:55:44:33:22:11' + + port = self.operator_cloud.baremetal.patch_port( + port, dict(path='/extra/answer', op='add', value=42) + ) + self.assertEqual('66:55:44:33:22:11', port.address) + self.assertEqual({'answer': 42}, port.extra) + + port = self.operator_cloud.baremetal.get_port(port.id) + self.assertEqual('66:55:44:33:22:11', port.address) + self.assertEqual({'answer': 42}, port.extra) + + def test_port_negative_non_existing(self): + uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_port, + uuid, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.find_port, + uuid, + ignore_missing=False, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.delete_port, + uuid, + ignore_missing=False, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.update_port, + uuid, + pxe_enabled=True, + ) + self.assertIsNone(self.operator_cloud.baremetal.find_port(uuid)) + self.assertIsNone(self.operator_cloud.baremetal.delete_port(uuid)) + + +class TestBareMetalPortFields(base.BaseBaremetalTest): + min_microversion = '1.8' + + def test_port_fields(self): + self.create_node() + self.create_port(address='11:22:33:44:55:66') + result = self.operator_cloud.baremetal.ports( + fields=['uuid', 'node_id'] + ) + for item in result: + self.assertIsNotNone(item.id) + self.assertIsNone(item.address) diff --git a/openstack/tests/functional/baremetal/v1/test_port_group.py b/openstack/tests/functional/baremetal/v1/test_port_group.py new file mode 100644 index 0000000000..335338de1d --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/test_port_group.py @@ -0,0 +1,146 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import exceptions +from openstack.tests.functional.baremetal.v1 import base + + +class TestBareMetalPortGroup(base.BaseBaremetalTest): + min_microversion = '1.23' + + def setUp(self): + super().setUp() + self.node = self.create_node() + + def test_port_group_create_get_delete(self): + port_group = self.create_port_group() + + loaded = self.operator_cloud.baremetal.get_port_group(port_group.id) + self.assertEqual(loaded.id, port_group.id) + self.assertIsNotNone(loaded.node_id) + + with_fields = self.operator_cloud.baremetal.get_port_group( + port_group.id, fields=['uuid', 'extra'] + ) + self.assertEqual(port_group.id, with_fields.id) + self.assertIsNone(with_fields.node_id) + + self.operator_cloud.baremetal.delete_port_group( + port_group, ignore_missing=False + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_port_group, + port_group.id, + ) + + def test_port_list(self): + node2 = self.create_node(name='test-node') + + pg1 = self.create_port_group( + address='11:22:33:44:55:66', node_id=node2.id + ) + pg2 = self.create_port_group( + address='11:22:33:44:55:77', node_id=self.node.id + ) + + pgs = self.operator_cloud.baremetal.port_groups( + address='11:22:33:44:55:77' + ) + self.assertEqual([p.id for p in pgs], [pg2.id]) + + pgs = self.operator_cloud.baremetal.port_groups(node=node2.id) + self.assertEqual([p.id for p in pgs], [pg1.id]) + + pgs = self.operator_cloud.baremetal.port_groups(node='test-node') + self.assertEqual([p.id for p in pgs], [pg1.id]) + + def test_port_list_update_delete(self): + self.create_port_group( + address='11:22:33:44:55:66', extra={'foo': 'bar'} + ) + port_group = next( + self.operator_cloud.baremetal.port_groups( + details=True, address='11:22:33:44:55:66' + ) + ) + self.assertEqual(port_group.extra, {'foo': 'bar'}) + + # This test checks that resources returned from listing are usable + self.operator_cloud.baremetal.update_port_group( + port_group, extra={'foo': 42} + ) + self.operator_cloud.baremetal.delete_port_group( + port_group, ignore_missing=False + ) + + def test_port_group_update(self): + port_group = self.create_port_group() + port_group.extra = {'answer': 42} + + port_group = self.operator_cloud.baremetal.update_port_group( + port_group + ) + self.assertEqual({'answer': 42}, port_group.extra) + + port_group = self.operator_cloud.baremetal.get_port_group( + port_group.id + ) + self.assertEqual({'answer': 42}, port_group.extra) + + def test_port_group_patch(self): + port_group = self.create_port_group() + + port_group = self.operator_cloud.baremetal.patch_port_group( + port_group, dict(path='/extra/answer', op='add', value=42) + ) + self.assertEqual({'answer': 42}, port_group.extra) + + port_group = self.operator_cloud.baremetal.get_port_group( + port_group.id + ) + self.assertEqual({'answer': 42}, port_group.extra) + + def test_port_group_negative_non_existing(self): + uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_port_group, + uuid, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.find_port_group, + uuid, + ignore_missing=False, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.delete_port_group, + uuid, + ignore_missing=False, + ) + self.assertIsNone(self.operator_cloud.baremetal.find_port_group(uuid)) + self.assertIsNone( + self.operator_cloud.baremetal.delete_port_group(uuid) + ) + + def test_port_group_fields(self): + self.create_node() + self.create_port_group(address='11:22:33:44:55:66') + result = self.operator_cloud.baremetal.port_groups( + fields=['uuid', 'name'] + ) + for item in result: + self.assertIsNotNone(item.id) + self.assertIsNone(item.address) diff --git a/openstack/tests/functional/baremetal/v1/test_runbooks.py b/openstack/tests/functional/baremetal/v1/test_runbooks.py new file mode 100644 index 0000000000..36dc119e87 --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/test_runbooks.py @@ -0,0 +1,210 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack.tests.functional.baremetal.v1 import base + + +class TestBareMetalRunbook(base.BaseBaremetalTest): + min_microversion = '1.92' + + def test_baremetal_runbook_create_get_delete(self): + steps = [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "order": 150, + } + ] + runbook = self.create_runbook(name='CUSTOM_RUNBOOK', steps=steps) + loaded = self.operator_cloud.baremetal.get_runbook(runbook.id) + self.assertEqual(loaded.id, runbook.id) + self.operator_cloud.baremetal.delete_runbook( + runbook, ignore_missing=False + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_runbook, + runbook.id, + ) + + def test_baremetal_runbook_list(self): + steps = [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "order": 150, + } + ] + + runbook1 = self.create_runbook(name='CUSTOM_RUNBOOK1', steps=steps) + runbook2 = self.create_runbook(name='CUSTOM_RUNBOOK2', steps=steps) + runbooks = self.operator_cloud.baremetal.runbooks() + ids = [runbook.id for runbook in runbooks] + self.assertIn(runbook1.id, ids) + self.assertIn(runbook2.id, ids) + + runbooks_with_details = self.operator_cloud.baremetal.runbooks( + details=True + ) + for runbook in runbooks_with_details: + self.assertIsNotNone(runbook.id) + self.assertIsNotNone(runbook.name) + + runbook_with_fields = self.operator_cloud.baremetal.runbooks( + fields=['uuid'] + ) + for runbook in runbook_with_fields: + self.assertIsNotNone(runbook.id) + self.assertIsNone(runbook.name) + + def test_baremetal_runbook_list_update_delete(self): + steps = [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "order": 150, + } + ] + runbook = self.create_runbook(name='CUSTOM_RUNBOOK4', steps=steps) + self.assertFalse(runbook.extra) + runbook.extra = {'answer': 42} + + runbook = self.operator_cloud.baremetal.update_runbook(runbook) + self.assertEqual({'answer': 42}, runbook.extra) + + runbook = self.operator_cloud.baremetal.get_runbook(runbook.id) + + self.operator_cloud.baremetal.delete_runbook( + runbook.id, ignore_missing=False + ) + + def test_baremetal_runbook_update(self): + steps = [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "order": 150, + } + ] + runbook = self.create_runbook(name='CUSTOM_RUNBOOK4', steps=steps) + runbook.extra = {'answer': 42} + + runbook = self.operator_cloud.baremetal.update_runbook(runbook) + self.assertEqual({'answer': 42}, runbook.extra) + + runbook = self.operator_cloud.baremetal.get_runbook(runbook.id) + self.assertEqual({'answer': 42}, runbook.extra) + + def test_runbook_patch(self): + name = "CUSTOM_HYPERTHREADING_ON" + steps = [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "order": 150, + } + ] + runbook = self.create_runbook(name=name, steps=steps) + runbook = self.operator_cloud.baremetal.patch_runbook( + runbook, dict(path='/extra/answer', op='add', value=42) + ) + self.assertEqual({'answer': 42}, runbook.extra) + self.assertEqual(name, runbook.name) + + runbook = self.operator_cloud.baremetal.get_runbook(runbook.id) + self.assertEqual({'answer': 42}, runbook.extra) + + def test_runbook_negative_non_existing(self): + uuid = "b4145fbb-d4bc-0d1d-4382-e1e922f9035c" + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_runbook, + uuid, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.delete_runbook, + uuid, + ignore_missing=False, + ) + self.assertIsNone(self.operator_cloud.baremetal.delete_runbook(uuid)) + + def test_runbook_rbac_project_scoped(self): + steps = [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "order": 150, + } + ] + + runbook = self.operator_cloud.baremetal.create_runbook( + name='CUSTOM_PROJ_AWESOME', steps=steps + ) + self.addCleanup( + lambda: self.operator_cloud.baremetal.delete_runbook( + runbook.id, ignore_missing=True + ) + ) + self.assertFalse(runbook.public) + self.assertEqual(self.operator_cloud.current_project_id, runbook.owner) + + # is accessible to the owner + loaded = self.operator_cloud.baremetal.get_runbook(runbook.id) + self.assertEqual(loaded.id, runbook.id) + + def test_runbook_rbac_system_scoped(self): + steps = [ + { + "interface": "bios", + "step": "apply_configuration", + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "order": 150, + } + ] + + runbook = self.system_admin_cloud.baremetal.create_runbook( + name='CUSTOM_SYS_AWESOME', steps=steps + ) + self.addCleanup( + lambda: self.system_admin_cloud.baremetal.delete_runbook( + runbook.id, ignore_missing=True + ) + ) + + self.assertFalse(runbook.public) + self.assertIsNone(runbook.owner) + + # is accessible to system-scoped users + loaded = self.system_admin_cloud.baremetal.get_runbook(runbook.id) + self.assertEqual(loaded.id, runbook.id) diff --git a/openstack/tests/functional/baremetal/v1/test_volume_connector.py b/openstack/tests/functional/baremetal/v1/test_volume_connector.py new file mode 100644 index 0000000000..7190f524c8 --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/test_volume_connector.py @@ -0,0 +1,215 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import exceptions +from openstack.tests.functional.baremetal.v1 import base + + +class TestBareMetalVolumeconnector(base.BaseBaremetalTest): + min_microversion = '1.32' + + def setUp(self): + super().setUp() + self.node = self.create_node(provision_state='enroll') + + def test_volume_connector_create_get_delete(self): + self.operator_cloud.baremetal.set_node_provision_state( + self.node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state( + self.node, 'power off' + ) + volume_connector = self.create_volume_connector( + connector_id='iqn.2017-07.org.openstack:01:d9a51732c3f', type='iqn' + ) + + loaded = self.operator_cloud.baremetal.get_volume_connector( + volume_connector.id + ) + self.assertEqual(loaded.id, volume_connector.id) + self.assertIsNotNone(loaded.node_id) + + with_fields = self.operator_cloud.baremetal.get_volume_connector( + volume_connector.id, fields=['uuid', 'extra'] + ) + self.assertEqual(volume_connector.id, with_fields.id) + self.assertIsNone(with_fields.node_id) + + self.operator_cloud.baremetal.delete_volume_connector( + volume_connector, ignore_missing=False + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_volume_connector, + volume_connector.id, + ) + + def test_volume_connector_list(self): + node2 = self.create_node(name='test-node') + self.operator_cloud.baremetal.set_node_provision_state( + node2, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state(node2, 'power off') + self.operator_cloud.baremetal.set_node_provision_state( + self.node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state( + self.node, 'power off' + ) + vc1 = self.create_volume_connector( + connector_id='iqn.2018-07.org.openstack:01:d9a514g2c32', + node_id=node2.id, + type='iqn', + ) + vc2 = self.create_volume_connector( + connector_id='iqn.2017-07.org.openstack:01:d9a51732c4g', + node_id=self.node.id, + type='iqn', + ) + + vcs = self.operator_cloud.baremetal.volume_connectors( + node=self.node.id + ) + self.assertEqual([v.id for v in vcs], [vc2.id]) + + vcs = self.operator_cloud.baremetal.volume_connectors(node=node2.id) + self.assertEqual([v.id for v in vcs], [vc1.id]) + + vcs = self.operator_cloud.baremetal.volume_connectors(node='test-node') + self.assertEqual([v.id for v in vcs], [vc1.id]) + + def test_volume_connector_list_update_delete(self): + self.operator_cloud.baremetal.set_node_provision_state( + self.node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state( + self.node, 'power off' + ) + self.create_volume_connector( + connector_id='iqn.2020-07.org.openstack:02:d9451472ce2', + node_id=self.node.id, + type='iqn', + extra={'foo': 'bar'}, + ) + volume_connector = next( + self.operator_cloud.baremetal.volume_connectors( + details=True, node=self.node.id + ) + ) + self.assertEqual(volume_connector.extra, {'foo': 'bar'}) + + # This test checks that resources returned from listing are usable + self.operator_cloud.baremetal.update_volume_connector( + volume_connector, extra={'foo': 42} + ) + self.operator_cloud.baremetal.delete_volume_connector( + volume_connector, ignore_missing=False + ) + + def test_volume_connector_update(self): + self.operator_cloud.baremetal.set_node_provision_state( + self.node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state( + self.node, 'power off' + ) + volume_connector = self.create_volume_connector( + connector_id='iqn.2019-07.org.openstack:03:de45b472c40', + node_id=self.node.id, + type='iqn', + ) + volume_connector.extra = {'answer': 42} + + volume_connector = ( + self.operator_cloud.baremetal.update_volume_connector( + volume_connector + ) + ) + self.assertEqual({'answer': 42}, volume_connector.extra) + + volume_connector = self.operator_cloud.baremetal.get_volume_connector( + volume_connector.id + ) + self.assertEqual({'answer': 42}, volume_connector.extra) + + def test_volume_connector_patch(self): + vol_conn_id = 'iqn.2020-07.org.openstack:04:de45b472c40' + self.operator_cloud.baremetal.set_node_provision_state( + self.node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state( + self.node, 'power off' + ) + volume_connector = self.create_volume_connector( + connector_id=vol_conn_id, node_id=self.node.id, type='iqn' + ) + + volume_connector = ( + self.operator_cloud.baremetal.patch_volume_connector( + volume_connector, + dict(path='/extra/answer', op='add', value=42), + ) + ) + self.assertEqual({'answer': 42}, volume_connector.extra) + self.assertEqual(vol_conn_id, volume_connector.connector_id) + + volume_connector = self.operator_cloud.baremetal.get_volume_connector( + volume_connector.id + ) + self.assertEqual({'answer': 42}, volume_connector.extra) + + def test_volume_connector_negative_non_existing(self): + uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_volume_connector, + uuid, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.find_volume_connector, + uuid, + ignore_missing=False, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.delete_volume_connector, + uuid, + ignore_missing=False, + ) + self.assertIsNone( + self.operator_cloud.baremetal.find_volume_connector(uuid) + ) + self.assertIsNone( + self.operator_cloud.baremetal.delete_volume_connector(uuid) + ) + + def test_volume_connector_fields(self): + self.create_node() + self.operator_cloud.baremetal.set_node_provision_state( + self.node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state( + self.node, 'power off' + ) + self.create_volume_connector( + connector_id='iqn.2018-08.org.openstack:04:de45f37c48', + node_id=self.node.id, + type='iqn', + ) + result = self.operator_cloud.baremetal.volume_connectors( + fields=['uuid', 'node_id'] + ) + for item in result: + self.assertIsNotNone(item.id) + self.assertIsNone(item.connector_id) diff --git a/openstack/tests/functional/baremetal/v1/test_volume_target.py b/openstack/tests/functional/baremetal/v1/test_volume_target.py new file mode 100644 index 0000000000..a3567d7560 --- /dev/null +++ b/openstack/tests/functional/baremetal/v1/test_volume_target.py @@ -0,0 +1,232 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import exceptions +from openstack.tests.functional.baremetal.v1 import base + + +class TestBareMetalVolumetarget(base.BaseBaremetalTest): + min_microversion = '1.32' + + def setUp(self): + super().setUp() + self.node = self.create_node(provision_state='enroll') + + def test_volume_target_create_get_delete(self): + self.operator_cloud.baremetal.set_node_provision_state( + self.node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state( + self.node, 'power off' + ) + volume_target = self.create_volume_target( + boot_index=0, + volume_id='04452bed-5367-4202-8bf5-de4335ac56d2', + volume_type='iscsi', + ) + + loaded = self.operator_cloud.baremetal.get_volume_target( + volume_target.id + ) + self.assertEqual(loaded.id, volume_target.id) + self.assertIsNotNone(loaded.node_id) + + with_fields = self.operator_cloud.baremetal.get_volume_target( + volume_target.id, fields=['uuid', 'extra'] + ) + self.assertEqual(volume_target.id, with_fields.id) + self.assertIsNone(with_fields.node_id) + + self.operator_cloud.baremetal.delete_volume_target( + volume_target, ignore_missing=False + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_volume_target, + volume_target.id, + ) + + def test_volume_target_list(self): + node2 = self.create_node(name='test-node') + self.operator_cloud.baremetal.set_node_provision_state( + node2, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state(node2, 'power off') + self.operator_cloud.baremetal.set_node_provision_state( + self.node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state( + self.node, 'power off' + ) + vt1 = self.create_volume_target( + boot_index=0, + volume_id='bd4d008c-7d31-463d-abf9-6c23d9d55f7f', + node_id=node2.id, + volume_type='iscsi', + ) + vt2 = self.create_volume_target( + boot_index=0, + volume_id='04452bed-5367-4202-8bf5-de4335ac57c2', + node_id=self.node.id, + volume_type='iscsi', + ) + + vts = self.operator_cloud.baremetal.volume_targets(node=self.node.id) + self.assertEqual([v.id for v in vts], [vt2.id]) + + vts = self.operator_cloud.baremetal.volume_targets(node=node2.id) + self.assertEqual([v.id for v in vts], [vt1.id]) + + vts = self.operator_cloud.baremetal.volume_targets(node='test-node') + self.assertEqual([v.id for v in vts], [vt1.id]) + + vts_with_details = self.operator_cloud.baremetal.volume_targets( + details=True + ) + for i in vts_with_details: + self.assertIsNotNone(i.id) + self.assertIsNotNone(i.volume_type) + + vts_with_fields = self.operator_cloud.baremetal.volume_targets( + fields=['uuid', 'node_uuid'] + ) + for i in vts_with_fields: + self.assertIsNotNone(i.id) + self.assertIsNone(i.volume_type) + self.assertIsNotNone(i.node_id) + + def test_volume_target_list_update_delete(self): + self.operator_cloud.baremetal.set_node_provision_state( + self.node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state( + self.node, 'power off' + ) + self.create_volume_target( + boot_index=0, + volume_id='04452bed-5367-4202-8bf5-de4335ac57h3', + node_id=self.node.id, + volume_type='iscsi', + extra={'foo': 'bar'}, + ) + volume_target = next( + self.operator_cloud.baremetal.volume_targets( + details=True, node=self.node.id + ) + ) + self.assertEqual(volume_target.extra, {'foo': 'bar'}) + + # This test checks that resources returned from listing are usable + self.operator_cloud.baremetal.update_volume_target( + volume_target, extra={'foo': 42} + ) + self.operator_cloud.baremetal.delete_volume_target( + volume_target, ignore_missing=False + ) + + def test_volume_target_update(self): + self.operator_cloud.baremetal.set_node_provision_state( + self.node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state( + self.node, 'power off' + ) + volume_target = self.create_volume_target( + boot_index=0, + volume_id='04452bed-5367-4202-8bf5-de4335ac53h7', + node_id=self.node.id, + volume_type='isci', + ) + volume_target.extra = {'answer': 42} + + volume_target = self.operator_cloud.baremetal.update_volume_target( + volume_target + ) + self.assertEqual({'answer': 42}, volume_target.extra) + + volume_target = self.operator_cloud.baremetal.get_volume_target( + volume_target.id + ) + self.assertEqual({'answer': 42}, volume_target.extra) + + def test_volume_target_patch(self): + vol_targ_id = '04452bed-5367-4202-9cg6-de4335ac53h7' + self.operator_cloud.baremetal.set_node_provision_state( + self.node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state( + self.node, 'power off' + ) + volume_target = self.create_volume_target( + boot_index=0, + volume_id=vol_targ_id, + node_id=self.node.id, + volume_type='isci', + ) + + volume_target = self.operator_cloud.baremetal.patch_volume_target( + volume_target, dict(path='/extra/answer', op='add', value=42) + ) + self.assertEqual({'answer': 42}, volume_target.extra) + self.assertEqual(vol_targ_id, volume_target.volume_id) + + volume_target = self.operator_cloud.baremetal.get_volume_target( + volume_target.id + ) + self.assertEqual({'answer': 42}, volume_target.extra) + + def test_volume_target_negative_non_existing(self): + uuid = "5c9dcd04-2073-49bc-9618-99ae634d8971" + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.get_volume_target, + uuid, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.find_volume_target, + uuid, + ignore_missing=False, + ) + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.baremetal.delete_volume_target, + uuid, + ignore_missing=False, + ) + self.assertIsNone( + self.operator_cloud.baremetal.find_volume_target(uuid) + ) + self.assertIsNone( + self.operator_cloud.baremetal.delete_volume_target(uuid) + ) + + def test_volume_target_fields(self): + self.create_node() + self.operator_cloud.baremetal.set_node_provision_state( + self.node, 'manage', wait=True + ) + self.operator_cloud.baremetal.set_node_power_state( + self.node, 'power off' + ) + self.create_volume_target( + boot_index=0, + volume_id='04452bed-5367-4202-8bf5-99ae634d8971', + node_id=self.node.id, + volume_type='iscsi', + ) + result = self.operator_cloud.baremetal.volume_targets( + fields=['uuid', 'node_id'] + ) + for item in result: + self.assertIsNotNone(item.id) diff --git a/openstack/tests/functional/base.py b/openstack/tests/functional/base.py index 3d37081b08..c2c0fdab81 100644 --- a/openstack/tests/functional/base.py +++ b/openstack/tests/functional/base.py @@ -10,74 +10,309 @@ # License for the specific language governing permissions and limitations # under the License. +import operator import os -import os_client_config import time -import unittest +import uuid -from keystoneauth1 import exceptions as _exceptions +from keystoneauth1 import discover + +import openstack.config from openstack import connection +from openstack.tests import base #: Defines the OpenStack Client Config (OCC) cloud key in your OCC config #: file, typically in $HOME/.config/openstack/clouds.yaml. That configuration #: will determine where the functional tests will be run and what resource #: defaults will be used to run the functional tests. -TEST_CLOUD = os.getenv('OS_CLOUD', 'test_cloud') +TEST_CONFIG = openstack.config.OpenStackConfig() +TEST_CLOUD_NAME = os.getenv('OS_CLOUD', 'devstack-admin') +TEST_CLOUD_REGION = openstack.config.get_cloud_region(cloud=TEST_CLOUD_NAME) + + +def _get_resource_value(resource_key): + return TEST_CONFIG.get_extra_config('functional').get(resource_key) + + +def _disable_keep_alive(conn): + sess = conn.config.get_session() + sess.keep_alive = False + + +class BaseFunctionalTest(base.TestCase): + user_cloud: connection.Connection + user_cloud_alt: connection.Connection + operator_cloud: connection.Connection + + _wait_for_timeout_key = '' + + def setUp(self): + super().setUp() + + self._system_admin_name = os.environ.get( + 'OPENSTACKSDK_SYSTEM_ADMIN_CLOUD', + 'devstack-system-admin', + ) + if not self._system_admin_name: + raise self.failureException( + "OPENSTACKSDK_SYSTEM_ADMIN_CLOUD must be set to a non-empty " + "value" + ) + + self.config = openstack.config.OpenStackConfig() + + self._user_cloud_name = os.environ.get( + 'OPENSTACKSDK_DEMO_CLOUD', 'devstack' + ) + if not self._user_cloud_name: + raise self.failureException( + "OPENSTACKSDK_DEMO_CLOUD must be set to a non-empty value" + ) + + self._user_alt_cloud_name = os.environ.get( + 'OPENSTACKSDK_DEMO_CLOUD_ALT', 'devstack-alt' + ) + if not self._user_alt_cloud_name: + raise self.failureException( + "OPENSTACKSDK_DEMO_CLOUD_ALT must be set to a non-empty value" + ) + + self._set_user_cloud() + + self._operator_cloud_name = os.environ.get( + 'OPENSTACKSDK_OPERATOR_CLOUD', 'devstack-admin' + ) + if not self._operator_cloud_name: + raise self.failureException( + "OPENSTACKSDK_OPERATOR_CLOUD must be set to a non-empty value" + ) + + self._set_operator_cloud() + + self.flavor = self._pick_flavor() + self.image = self._pick_image() + + # Defines default timeout for wait_for methods used + # in the functional tests + self._wait_for_timeout = int( + os.getenv( + self._wait_for_timeout_key, + os.getenv('OPENSTACKSDK_FUNC_TEST_TIMEOUT', 300), + ) + ) + + def _set_user_cloud(self, **kwargs): + user_config = self.config.get_one( + cloud=self._user_cloud_name, **kwargs + ) + self.user_cloud = connection.Connection(config=user_config) + _disable_keep_alive(self.user_cloud) + + user_config_alt = self.config.get_one( + cloud=self._user_alt_cloud_name, **kwargs + ) + self.user_cloud_alt = connection.Connection(config=user_config_alt) + _disable_keep_alive(self.user_cloud_alt) + + def _set_operator_cloud(self, **kwargs): + operator_config = self.config.get_one( + cloud=self._operator_cloud_name, **kwargs + ) + self.operator_cloud = connection.Connection(config=operator_config) + _disable_keep_alive(self.operator_cloud) + + system_admin_config = self.config.get_one( + cloud=self._system_admin_name, **kwargs + ) + self.system_admin_cloud = connection.Connection( + config=system_admin_config + ) + _disable_keep_alive(self.system_admin_cloud) + + def _pick_flavor(self): + """Pick a sensible flavor to run tests with. + + This returns None if the compute service is not present (e.g. + ironic-only deployments). + """ + if not self.user_cloud.has_service('compute'): + return None + + flavors = self.user_cloud.list_flavors(get_extra=False) + + flavor_name = os.environ.get('OPENSTACKSDK_FLAVOR') + + if not flavor_name: + flavor_name = _get_resource_value('flavor_name') + + if flavor_name: + for flavor in flavors: + if flavor.name == flavor_name: + return flavor + + raise self.failureException( + "Cloud does not have flavor '%s'", + flavor_name, + ) + + # Enable running functional tests against RAX, which requires + # performance flavors be used for boot from volume + + for flavor in sorted(flavors, key=operator.attrgetter('ram')): + if 'performance' in flavor.name: + return flavor + + # Otherwise, pick the smallest flavor with a ephemeral disk configured + + for flavor in sorted(flavors, key=operator.attrgetter('ram')): + if flavor.disk: + return flavor + + raise self.failureException('No sensible flavor found') + + def _pick_image(self): + """Pick a sensible image to run tests with. + + This returns None if the image service is not present. + """ + if not self.user_cloud.has_service('image'): + return None + + images = self.user_cloud.list_images() + + image_name = os.environ.get('OPENSTACKSDK_IMAGE') + + if not image_name: + image_name = _get_resource_value('image_name') + + if image_name: + for image in images: + if image.name == image_name: + return image + + raise self.failureException( + "Cloud does not have image '%s'", + image_name, + ) + + for image in images: + if image.name.startswith('cirros') and image.name.endswith('-uec'): + return image + + for image in images: + if ( + image.name.startswith('cirros') + and image.disk_format == 'qcow2' + ): + return image + + for image in images: + if image.name.lower().startswith('ubuntu'): + return image + for image in images: + if image.name.lower().startswith('centos'): + return image + + raise self.failureException('No sensible image found') + + def addEmptyCleanup(self, func, *args, **kwargs): + def cleanup(): + result = func(*args, **kwargs) + self.assertIsNone(result) + + self.addCleanup(cleanup) + + def require_service(self, service_type, min_microversion=None, **kwargs): + """Method to check whether a service exists + + Usage:: + class TestMeter(base.BaseFunctionalTest): + def setUp(self): + super(TestMeter, self).setUp() + self.require_service('metering') -class Opts(object): - def __init__(self, cloud_name='test_cloud', debug=False): - self.cloud = cloud_name - self.debug = debug + :returns: True if the service exists, otherwise False. + """ + if not self.operator_cloud.has_service(service_type): + self.skipTest(f'Service {service_type} not found in cloud') + if not min_microversion: + return -def _get_resource_value(resource_key, default): - try: - return cloud.config['functional'][resource_key] - except KeyError: - return default + data = self.operator_cloud.session.get_endpoint_data( + service_type=service_type, **kwargs + ) -opts = Opts(cloud_name=TEST_CLOUD) -occ = os_client_config.OpenStackConfig() -cloud = occ.get_one_cloud(opts.cloud, argparse=opts) + if not data or not ( + data.min_microversion + and data.max_microversion + and discover.version_between( + data.min_microversion, + data.max_microversion, + min_microversion, + ) + ): + self.skipTest( + f'Service {service_type} does not provide microversion ' + f'{min_microversion}' + ) -IMAGE_NAME = _get_resource_value('image_name', 'cirros-0.3.4-x86_64-uec') -FLAVOR_NAME = _get_resource_value('flavor_name', 'm1.small') + def getUniqueString(self, prefix=None): + """Generate unique resource name""" + # Globally unique names can only rely on some form of uuid + # unix_t is also used to easier determine orphans when running real + # functional tests on a real cloud + return ( + prefix if prefix else '' + ) + f"{int(time.time())}-{uuid.uuid4().hex}" + def create_temporary_project(self): + """Create a new temporary project. -def service_exists(**kwargs): - """Decorator function to check whether a service exists + This is useful for tests that modify things like quotas, which would + cause issues for other tests. + """ + project_name = self.getUniqueString('project-') + project = self.operator_cloud.get_project(project_name) + if not project: + params = { + 'name': project_name, + 'description': f'Temporary project created for {self.id()}', + # assume identity API v3 for now + 'domain_id': self.operator_cloud.get_domain('default')['id'], + } + project = self.operator_cloud.create_project(**params) - Usage: - @unittest.skipUnless(base.service_exists(service_type="metering"), - "Metering service does not exist") - class TestMeter(base.BaseFunctionalTest): - ... + # Grant the current user access to the project + user_id = self.operator_cloud.current_user_id + role_assignment = self.operator_cloud.list_role_assignments( + {'user': user_id, 'project': project['id']} + ) + if not role_assignment: + self.operator_cloud.grant_role( + 'member', user=user_id, project=project['id'], wait=True + ) - :param kwargs: The kwargs needed to filter an endpoint. - :returns: True if the service exists, otherwise False. - """ - try: - conn = connection.from_config(cloud_name=TEST_CLOUD) - conn.session.get_endpoint(**kwargs) + self.addCleanup(self._delete_temporary_project, project) - return True - except _exceptions.EndpointNotFound: - return False + return project + def _delete_temporary_project(self, project): + self.operator_cloud.revoke_role( + 'member', + user=self.operator_cloud.current_user_id, + project=project.id, + ) + self.operator_cloud.delete_project(project.id) -class BaseFunctionalTest(unittest.TestCase): - @classmethod - def setUpClass(cls): - cls.conn = connection.from_config(cloud_name=TEST_CLOUD) +class KeystoneBaseFunctionalTest(BaseFunctionalTest): + def setUp(self): + super().setUp() - @classmethod - def assertIs(cls, expected, actual): - if expected != actual: - raise Exception(expected + ' != ' + actual) + # we only support v3, since v2 was deprecated in Queens (2018) - @classmethod - def linger_for_delete(cls): - time.sleep(40) + if not self.user_cloud.has_service('identity', '3'): + self.skipTest('identity service not supported by cloud') diff --git a/openstack/tests/functional/block_storage/__init__.py b/openstack/tests/functional/block_storage/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/block_storage/v2/__init__.py b/openstack/tests/functional/block_storage/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/block_storage/v2/base.py b/openstack/tests/functional/block_storage/v2/base.py new file mode 100644 index 0000000000..4bdcd2454f --- /dev/null +++ b/openstack/tests/functional/block_storage/v2/base.py @@ -0,0 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional import base + + +class BaseBlockStorageTest(base.BaseFunctionalTest): + _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_BLOCK_STORAGE' + + def setUp(self): + super().setUp() + self._set_user_cloud(block_storage_api_version='2') + self._set_operator_cloud(block_storage_api_version='2') + + if not self.user_cloud.has_service('block-storage', '2'): + self.skipTest('block-storage service not supported by cloud') diff --git a/openstack/tests/functional/block_storage/v2/test_backup.py b/openstack/tests/functional/block_storage/v2/test_backup.py new file mode 100644 index 0000000000..9b2fe79441 --- /dev/null +++ b/openstack/tests/functional/block_storage/v2/test_backup.py @@ -0,0 +1,69 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v2 import backup as _backup +from openstack.block_storage.v2 import volume as _volume +from openstack.tests.functional.block_storage.v2 import base + + +class TestBackup(base.BaseBlockStorageTest): + def setUp(self): + super().setUp() + + if not self.user_cloud.has_service('object-store'): + self.skipTest('Object service is requred, but not available') + + self.VOLUME_NAME = self.getUniqueString() + self.VOLUME_ID = None + self.BACKUP_NAME = self.getUniqueString() + self.BACKUP_ID = None + + volume = self.user_cloud.block_storage.create_volume( + name=self.VOLUME_NAME, size=1 + ) + self.user_cloud.block_storage.wait_for_status( + volume, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + assert isinstance(volume, _volume.Volume) + self.VOLUME_ID = volume.id + + backup = self.user_cloud.block_storage.create_backup( + name=self.BACKUP_NAME, volume_id=volume.id + ) + self.user_cloud.block_storage.wait_for_status( + backup, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + assert isinstance(backup, _backup.Backup) + self.assertEqual(self.BACKUP_NAME, backup.name) + self.BACKUP_ID = backup.id + + def tearDown(self): + sot = self.user_cloud.block_storage.delete_backup( + self.BACKUP_ID, ignore_missing=False + ) + sot = self.user_cloud.block_storage.delete_volume( + self.VOLUME_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def test_get(self): + sot = self.user_cloud.block_storage.get_backup(self.BACKUP_ID) + self.assertEqual(self.BACKUP_NAME, sot.name) diff --git a/openstack/tests/functional/block_storage/v2/test_snapshot.py b/openstack/tests/functional/block_storage/v2/test_snapshot.py new file mode 100644 index 0000000000..42c4bcc421 --- /dev/null +++ b/openstack/tests/functional/block_storage/v2/test_snapshot.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.block_storage.v2 import snapshot as _snapshot +from openstack.block_storage.v2 import volume as _volume +from openstack.tests.functional.block_storage.v2 import base + + +class TestSnapshot(base.BaseBlockStorageTest): + def setUp(self): + super().setUp() + + self.SNAPSHOT_NAME = self.getUniqueString() + self.SNAPSHOT_ID = None + self.VOLUME_NAME = self.getUniqueString() + self.VOLUME_ID = None + + volume = self.user_cloud.block_storage.create_volume( + name=self.VOLUME_NAME, size=1 + ) + self.user_cloud.block_storage.wait_for_status( + volume, + status='available', + failures=['error'], + interval=2, + wait=self._wait_for_timeout, + ) + assert isinstance(volume, _volume.Volume) + self.assertEqual(self.VOLUME_NAME, volume.name) + self.VOLUME_ID = volume.id + snapshot = self.user_cloud.block_storage.create_snapshot( + name=self.SNAPSHOT_NAME, volume_id=self.VOLUME_ID + ) + self.user_cloud.block_storage.wait_for_status( + snapshot, + status='available', + failures=['error'], + interval=2, + wait=self._wait_for_timeout, + ) + assert isinstance(snapshot, _snapshot.Snapshot) + self.assertEqual(self.SNAPSHOT_NAME, snapshot.name) + self.SNAPSHOT_ID = snapshot.id + + def tearDown(self): + snapshot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID) + sot = self.user_cloud.block_storage.delete_snapshot( + snapshot, ignore_missing=False + ) + self.user_cloud.block_storage.wait_for_delete( + snapshot, interval=2, wait=self._wait_for_timeout + ) + self.assertIsNone(sot) + sot = self.user_cloud.block_storage.delete_volume( + self.VOLUME_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def test_get(self): + sot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID) + self.assertEqual(self.SNAPSHOT_NAME, sot.name) diff --git a/openstack/tests/functional/block_storage/v2/test_stats.py b/openstack/tests/functional/block_storage/v2/test_stats.py new file mode 100644 index 0000000000..a76db0416e --- /dev/null +++ b/openstack/tests/functional/block_storage/v2/test_stats.py @@ -0,0 +1,57 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.block_storage.v2 import stats as _stats +from openstack.tests.functional.block_storage.v2 import base + + +class TestStats(base.BaseBlockStorageTest): + def setUp(self): + super().setUp() + + sot = self.operator_cloud.block_storage.backend_pools() + for pool in sot: + self.assertIsInstance(pool, _stats.Pools) + + def test_list(self): + capList = [ + 'volume_backend_name', + 'storage_protocol', + 'free_capacity_gb', + 'driver_version', + 'goodness_function', + 'QoS_support', + 'vendor_name', + 'pool_name', + 'thin_provisioning_support', + 'thick_provisioning_support', + 'timestamp', + 'max_over_subscription_ratio', + 'total_volumes', + 'total_capacity_gb', + 'filter_function', + 'multiattach', + 'provisioned_capacity_gb', + 'allocated_capacity_gb', + 'reserved_percentage', + 'location_info', + ] + capList.sort() + pools = self.operator_cloud.block_storage.backend_pools() + for pool in pools: + caps = pool.capabilities + keys = list(caps.keys()) + assert isinstance(caps, dict) + # Check that we have at minimum listed capabilities + for cap in sorted(capList): + self.assertIn(cap, keys) diff --git a/openstack/tests/functional/block_storage/v2/test_type.py b/openstack/tests/functional/block_storage/v2/test_type.py new file mode 100644 index 0000000000..f1e54ebf38 --- /dev/null +++ b/openstack/tests/functional/block_storage/v2/test_type.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.block_storage.v2 import type as _type +from openstack.tests.functional.block_storage.v2 import base + + +class TestType(base.BaseBlockStorageTest): + def setUp(self): + super().setUp() + + self.TYPE_NAME = self.getUniqueString() + self.TYPE_ID = None + + sot = self.operator_cloud.block_storage.create_type( + name=self.TYPE_NAME + ) + assert isinstance(sot, _type.Type) + self.assertEqual(self.TYPE_NAME, sot.name) + self.TYPE_ID = sot.id + + def tearDown(self): + sot = self.operator_cloud.block_storage.delete_type( + self.TYPE_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def test_get(self): + sot = self.operator_cloud.block_storage.get_type(self.TYPE_ID) + self.assertEqual(self.TYPE_NAME, sot.name) diff --git a/openstack/tests/functional/block_storage/v2/test_volume.py b/openstack/tests/functional/block_storage/v2/test_volume.py new file mode 100644 index 0000000000..9cb3e13d67 --- /dev/null +++ b/openstack/tests/functional/block_storage/v2/test_volume.py @@ -0,0 +1,50 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v2 import volume as _volume +from openstack.tests.functional.block_storage.v2 import base + + +class TestVolume(base.BaseBlockStorageTest): + def setUp(self): + super().setUp() + + if not self.user_cloud.has_service('block-storage'): + self.skipTest('block-storage service not supported by cloud') + + self.VOLUME_NAME = self.getUniqueString() + self.VOLUME_ID = None + + volume = self.user_cloud.block_storage.create_volume( + name=self.VOLUME_NAME, size=1 + ) + self.user_cloud.block_storage.wait_for_status( + volume, + status='available', + failures=['error'], + interval=2, + wait=self._wait_for_timeout, + ) + assert isinstance(volume, _volume.Volume) + self.assertEqual(self.VOLUME_NAME, volume.name) + self.VOLUME_ID = volume.id + + def tearDown(self): + sot = self.user_cloud.block_storage.delete_volume( + self.VOLUME_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def test_get(self): + sot = self.user_cloud.block_storage.get_volume(self.VOLUME_ID) + self.assertEqual(self.VOLUME_NAME, sot.name) diff --git a/openstack/tests/functional/block_storage/v3/__init__.py b/openstack/tests/functional/block_storage/v3/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/block_storage/v3/base.py b/openstack/tests/functional/block_storage/v3/base.py new file mode 100644 index 0000000000..d28c278ee2 --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/base.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import _proxy as _block_storage_v3 +from openstack.tests.functional import base +from openstack import utils + + +class BaseBlockStorageTest(base.BaseFunctionalTest): + _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_BLOCK_STORAGE' + + admin_block_storage_client: _block_storage_v3.Proxy + block_storage_client: _block_storage_v3.Proxy + + def setUp(self): + super().setUp() + self._set_user_cloud(block_storage_api_version='3') + if not self.user_cloud.has_service('block-storage', '3'): + self.skipTest('block-storage service not supported by cloud') + + self.admin_block_storage_client = utils.ensure_service_version( + self.operator_cloud.block_storage, '3' + ) + self.block_storage_client = utils.ensure_service_version( + self.user_cloud.block_storage, '3' + ) diff --git a/openstack/tests/functional/block_storage/v3/test_attachment.py b/openstack/tests/functional/block_storage/v3/test_attachment.py new file mode 100644 index 0000000000..79d8312fd6 --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_attachment.py @@ -0,0 +1,90 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import volume as _volume +from openstack.tests.functional.block_storage.v3 import base + + +class TestAttachment(base.BaseBlockStorageTest): + """Test class for volume attachment operations. + + We have implemented a test that performs attachment create + and attachment delete operations. Attachment create requires + the instance ID and the volume ID for which we have created a + volume resource and an instance resource. + We haven't implemented attachment update test since it requires + the host connector information which is not readily available to + us and hard to retrieve. Without passing this information, the + attachment update operation will fail. + Similarly, we haven't implement attachment complete test since it + depends on attachment update and can only be performed when the volume + status is 'attaching' which is done by attachment update operation. + """ + + def setUp(self): + super().setUp() + + # Create Volume + self.volume_name = self.getUniqueString() + + volume = self.user_cloud.block_storage.create_volume( + name=self.volume_name, size=1 + ) + self.user_cloud.block_storage.wait_for_status( + volume, + status='available', + failures=['error'], + interval=2, + wait=self._wait_for_timeout, + ) + self.assertIsInstance(volume, _volume.Volume) + self.VOLUME_ID = volume.id + + # Create Server + self.server_name = self.getUniqueString() + self.server = self.operator_cloud.compute.create_server( + name=self.server_name, + flavor_id=self.flavor.id, + image_id=self.image.id, + networks='none', + ) + self.operator_cloud.compute.wait_for_server( + self.server, wait=self._wait_for_timeout + ) + + def tearDown(self): + # Since delete_on_termination flag is set to True, we + # don't need to cleanup the volume manually + result = self.operator_cloud.compute.delete_server(self.server.id) + self.operator_cloud.compute.wait_for_delete( + self.server, wait=self._wait_for_timeout + ) + self.assertIsNone(result) + super().tearDown() + + def test_attachment(self): + attachment = self.admin_block_storage_client.create_attachment( + self.VOLUME_ID, + connector={}, + instance_id=self.server.id, + ) + self.assertIn('id', attachment) + self.assertIn('status', attachment) + self.assertIn('instance', attachment) + self.assertIn('volume_id', attachment) + self.assertIn('attached_at', attachment) + self.assertIn('detached_at', attachment) + self.assertIn('attach_mode', attachment) + self.assertIn('connection_info', attachment) + attachment = self.block_storage_client.delete_attachment( + attachment.id, ignore_missing=False + ) diff --git a/openstack/tests/functional/block_storage/v3/test_availability_zone.py b/openstack/tests/functional/block_storage/v3/test_availability_zone.py new file mode 100644 index 0000000000..3d1d4f2466 --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_availability_zone.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional import base +from openstack import utils + + +class TestAvailabilityZone(base.BaseFunctionalTest): + def test_list(self): + block_storage = utils.ensure_service_version( + self.operator_cloud.block_storage, '3' + ) + availability_zones = list(block_storage.availability_zones()) + self.assertGreater(len(availability_zones), 0) + + for az in availability_zones: + self.assertIsInstance(az.name, str) + self.assertIsInstance(az.state, dict) diff --git a/openstack/tests/functional/block_storage/v3/test_backup.py b/openstack/tests/functional/block_storage/v3/test_backup.py new file mode 100644 index 0000000000..6dcde68cc4 --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_backup.py @@ -0,0 +1,106 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import backup as _backup +from openstack.block_storage.v3 import volume as _volume +from openstack.tests.functional.block_storage.v3 import base + + +class TestBackup(base.BaseBlockStorageTest): + def setUp(self): + super().setUp() + + if not self.user_cloud.has_service('object-store'): + self.skipTest('Object service is requred, but not available') + + self.VOLUME_NAME = self.getUniqueString() + self.VOLUME_ID = None + self.BACKUP_NAME = self.getUniqueString() + self.BACKUP_ID = None + + volume = self.user_cloud.block_storage.create_volume( + name=self.VOLUME_NAME, size=1 + ) + self.user_cloud.block_storage.wait_for_status( + volume, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + assert isinstance(volume, _volume.Volume) + self.VOLUME_ID = volume.id + + backup = self.user_cloud.block_storage.create_backup( + name=self.BACKUP_NAME, volume_id=volume.id, is_incremental=False + ) + self.user_cloud.block_storage.wait_for_status( + backup, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + assert isinstance(backup, _backup.Backup) + self.assertEqual(self.BACKUP_NAME, backup.name) + self.BACKUP_ID = backup.id + + def tearDown(self): + sot = self.user_cloud.block_storage.delete_backup( + self.BACKUP_ID, ignore_missing=False + ) + sot = self.user_cloud.block_storage.delete_volume( + self.VOLUME_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def test_get(self): + sot = self.user_cloud.block_storage.get_backup(self.BACKUP_ID) + self.assertEqual(self.BACKUP_NAME, sot.name) + self.assertEqual(False, sot.is_incremental) + + def test_create_metadata(self): + metadata_backup = self.user_cloud.block_storage.create_backup( + name=self.getUniqueString(), + volume_id=self.VOLUME_ID, + metadata=dict(foo="bar"), + ) + self.user_cloud.block_storage.wait_for_status( + metadata_backup, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.user_cloud.block_storage.delete_backup( + metadata_backup.id, ignore_missing=False + ) + + def test_create_incremental(self): + incremental_backup = self.user_cloud.block_storage.create_backup( + name=self.getUniqueString(), + volume_id=self.VOLUME_ID, + is_incremental=True, + ) + self.user_cloud.block_storage.wait_for_status( + incremental_backup, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertEqual(True, incremental_backup.is_incremental) + self.user_cloud.block_storage.delete_backup( + incremental_backup.id, ignore_missing=False + ) + self.user_cloud.block_storage.wait_for_delete(incremental_backup) diff --git a/openstack/tests/functional/block_storage/v3/test_block_storage_summary.py b/openstack/tests/functional/block_storage/v3/test_block_storage_summary.py new file mode 100644 index 0000000000..084e9e63fc --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_block_storage_summary.py @@ -0,0 +1,21 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.block_storage.v3 import base + + +class TestBlockStorageSummary(base.BaseBlockStorageTest): + def test_get(self): + sot = self.admin_block_storage_client.summary(all_projects=True) + self.assertIn('total_size', sot) + self.assertIn('total_count', sot) + self.assertIn('metadata', sot) diff --git a/openstack/tests/functional/block_storage/v3/test_capabilities.py b/openstack/tests/functional/block_storage/v3/test_capabilities.py new file mode 100644 index 0000000000..f0fd8e7211 --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_capabilities.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.block_storage.v3 import base + + +class TestCapabilities(base.BaseBlockStorageTest): + # getting capabilities can be slow + TIMEOUT_SCALING_FACTOR = 1.5 + + def test_get(self): + services = list(self.operator_cloud.block_storage.services()) + host = next( + service + for service in services + if service.binary == 'cinder-volume' + ).host + + sot = self.operator_cloud.block_storage.get_capabilities(host) + self.assertIn('description', sot) + self.assertIn('display_name', sot) + self.assertIn('driver_version', sot) + self.assertIn('namespace', sot) + self.assertIn('pool_name', sot) + self.assertIn('properties', sot) + self.assertIn('replication_targets', sot) + self.assertIn('storage_protocol', sot) + self.assertIn('vendor_name', sot) + self.assertIn('visibility', sot) + self.assertIn('volume_backend_name', sot) diff --git a/openstack/tests/functional/block_storage/v3/test_default_type.py b/openstack/tests/functional/block_storage/v3/test_default_type.py new file mode 100644 index 0000000000..ab27f7bcd0 --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_default_type.py @@ -0,0 +1,69 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import default_type as _default_type +from openstack.tests.functional.block_storage.v3 import base + + +class TestDefaultType(base.BaseBlockStorageTest): + def setUp(self): + super().setUp() + if not self._operator_cloud_name: + self.skipTest("Operator cloud must be set for this test") + self._set_operator_cloud(block_storage_api_version='3.67') + block_storage = self.operator_cloud.block_storage + assert block_storage.api_version == '3' + self.admin_block_storage_client = block_storage + self.PROJECT_ID = self.create_temporary_project().id + + def test_default_type(self): + # Create a volume type + type_name = self.getUniqueString() + volume_type_id = self.admin_block_storage_client.create_type( + name=type_name, + ).id + + # Set default type for a project + default_type = self.admin_block_storage_client.set_default_type( + self.PROJECT_ID, + volume_type_id, + ) + self.assertIsInstance(default_type, _default_type.DefaultType) + + # Show default type for a project + default_type = self.admin_block_storage_client.show_default_type( + self.PROJECT_ID + ) + self.assertIsInstance(default_type, _default_type.DefaultType) + self.assertEqual(volume_type_id, default_type.volume_type_id) + + # List all default types + default_types = self.admin_block_storage_client.default_types() + for default_type in default_types: + self.assertIsInstance(default_type, _default_type.DefaultType) + # There could be existing default types set in the environment + # Just verify that the default type we have set is correct + if self.PROJECT_ID == default_type.project_id: + self.assertEqual(volume_type_id, default_type.volume_type_id) + + # Unset default type for a project + default_type = self.admin_block_storage_client.unset_default_type( + self.PROJECT_ID + ) + self.assertIsNone(default_type) + + # Delete the volume type + vol_type = self.admin_block_storage_client.delete_type( + volume_type_id, + ignore_missing=False, + ) + self.assertIsNone(vol_type) diff --git a/openstack/tests/functional/block_storage/v3/test_extension.py b/openstack/tests/functional/block_storage/v3/test_extension.py new file mode 100644 index 0000000000..ecf582b1a2 --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_extension.py @@ -0,0 +1,23 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.block_storage.v3 import base + + +class Extensions(base.BaseBlockStorageTest): + def test_get(self): + extensions = list(self.operator_cloud.block_storage.extensions()) + + for extension in extensions: + self.assertIsInstance(extension.alias, str) + self.assertIsInstance(extension.description, str) + self.assertIsInstance(extension.updated_at, str) diff --git a/openstack/tests/functional/block_storage/v3/test_group.py b/openstack/tests/functional/block_storage/v3/test_group.py new file mode 100644 index 0000000000..8bf27bf3fe --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_group.py @@ -0,0 +1,232 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import group as _group +from openstack.block_storage.v3 import group_snapshot as _group_snapshot +from openstack.block_storage.v3 import group_type as _group_type +from openstack.block_storage.v3 import volume as _volume +from openstack.tests.functional.block_storage.v3 import base + + +class TestGroup(base.BaseBlockStorageTest): + # TODO(stephenfin): We should use setUpClass here for MOAR SPEED!!! + def setUp(self): + super().setUp() + + # there will always be at least one volume type, i.e. the default one + volume_types = list(self.admin_block_storage_client.types()) + self.volume_type = volume_types[0] + + group_type_name = self.getUniqueString() + self.group_type = self.admin_block_storage_client.create_group_type( + name=group_type_name, + ) + self.assertIsInstance(self.group_type, _group_type.GroupType) + self.assertEqual(group_type_name, self.group_type.name) + + group_name = self.getUniqueString() + self.group = self.admin_block_storage_client.create_group( + name=group_name, + group_type=self.group_type.id, + volume_types=[self.volume_type.id], + ) + self.assertIsInstance(self.group, _group.Group) + self.assertEqual(group_name, self.group.name) + + def tearDown(self): + # we do this in tearDown rather than via 'addCleanup' since we need to + # wait for the deletion of the group before moving onto the deletion of + # the group type + self.admin_block_storage_client.delete_group( + self.group, delete_volumes=True + ) + self.admin_block_storage_client.wait_for_delete(self.group) + + self.admin_block_storage_client.delete_group_type(self.group_type) + self.admin_block_storage_client.wait_for_delete(self.group_type) + + super().tearDown() + + def test_group_type(self): + # get + group_type = self.admin_block_storage_client.get_group_type( + self.group_type.id + ) + self.assertEqual(self.group_type.name, group_type.name) + + # find + group_type = self.admin_block_storage_client.find_group_type( + self.group_type.name, + ) + self.assertEqual(self.group_type.id, group_type.id) + + # list + group_types = list(self.admin_block_storage_client.group_types()) + # other tests may have created group types and there can be defaults so + # we don't assert that this is the *only* group type present + self.assertIn(self.group_type.id, {g.id for g in group_types}) + + # update + group_type_name = self.getUniqueString() + group_type_description = self.getUniqueString() + group_type = self.admin_block_storage_client.update_group_type( + self.group_type, + name=group_type_name, + description=group_type_description, + ) + self.assertIsInstance(group_type, _group_type.GroupType) + group_type = self.admin_block_storage_client.get_group_type( + self.group_type.id + ) + self.assertEqual(group_type_name, group_type.name) + self.assertEqual(group_type_description, group_type.description) + + def test_group_type_group_specs(self): + # create + group_type = ( + self.admin_block_storage_client.create_group_type_group_specs( + self.group_type, + {'foo': 'bar', 'acme': 'buzz'}, + ) + ) + self.assertIsInstance(group_type, _group_type.GroupType) + group_type = self.admin_block_storage_client.get_group_type( + self.group_type.id + ) + self.assertEqual( + {'foo': 'bar', 'acme': 'buzz'}, group_type.group_specs + ) + + # get + spec = self.admin_block_storage_client.get_group_type_group_specs_property( # noqa: E501 + self.group_type, + 'foo', + ) + self.assertEqual('bar', spec) + + # update + spec = self.admin_block_storage_client.update_group_type_group_specs_property( # noqa: E501 + self.group_type, + 'foo', + 'baz', + ) + self.assertEqual('baz', spec) + group_type = self.admin_block_storage_client.get_group_type( + self.group_type.id + ) + self.assertEqual( + {'foo': 'baz', 'acme': 'buzz'}, group_type.group_specs + ) + + # delete + self.admin_block_storage_client.delete_group_type_group_specs_property( + self.group_type, + 'foo', + ) + group_type = self.admin_block_storage_client.get_group_type( + self.group_type.id + ) + self.assertEqual({'acme': 'buzz'}, group_type.group_specs) + + def test_group(self): + # get + group = self.admin_block_storage_client.get_group(self.group.id) + self.assertEqual(self.group.name, group.name) + + # find + group = self.admin_block_storage_client.find_group(self.group.name) + self.assertEqual(self.group.id, group.id) + + # list + groups = self.admin_block_storage_client.groups() + # other tests may have created groups and there can be defaults so we + # don't assert that this is the *only* group present + self.assertIn(self.group.id, {g.id for g in groups}) + + # update + group_name = self.getUniqueString() + group_description = self.getUniqueString() + group = self.admin_block_storage_client.update_group( + self.group, + name=group_name, + description=group_description, + ) + self.assertIsInstance(group, _group.Group) + group = self.admin_block_storage_client.get_group(self.group.id) + self.assertEqual(group_name, group.name) + self.assertEqual(group_description, group.description) + + def test_group_snapshot(self): + # group snapshots require a volume + # no need for a teardown as the deletion of the group (with the + # 'delete_volumes' flag) will handle this but we do need to wait for + # the thing to be created + volume_name = self.getUniqueString() + self.volume = self.admin_block_storage_client.create_volume( + name=volume_name, + volume_type=self.volume_type.id, + group_id=self.group.id, + size=1, + ) + self.admin_block_storage_client.wait_for_status( + self.volume, + status='available', + failures=['error'], + interval=2, + wait=self._wait_for_timeout, + ) + self.assertIsInstance(self.volume, _volume.Volume) + + group_snapshot_name = self.getUniqueString() + self.group_snapshot = ( + self.admin_block_storage_client.create_group_snapshot( + name=group_snapshot_name, + group_id=self.group.id, + ) + ) + self.admin_block_storage_client.wait_for_status( + self.group_snapshot, + status='available', + failures=['error'], + interval=2, + wait=self._wait_for_timeout, + ) + self.assertIsInstance( + self.group_snapshot, + _group_snapshot.GroupSnapshot, + ) + + # get + group_snapshot = self.admin_block_storage_client.get_group_snapshot( + self.group_snapshot.id, + ) + self.assertEqual(self.group_snapshot.name, group_snapshot.name) + + # find + group_snapshot = self.admin_block_storage_client.find_group_snapshot( + self.group_snapshot.name, + ) + self.assertEqual(self.group_snapshot.id, group_snapshot.id) + + # list + group_snapshots = self.admin_block_storage_client.group_snapshots() + # other tests may have created group snapshot and there can be defaults + # so we don't assert that this is the *only* group snapshot present + self.assertIn(self.group_snapshot.id, {g.id for g in group_snapshots}) + + # update (not supported) + + # delete + self.admin_block_storage_client.delete_group_snapshot( + self.group_snapshot + ) + self.admin_block_storage_client.wait_for_delete(self.group_snapshot) diff --git a/openstack/tests/functional/block_storage/v3/test_limits.py b/openstack/tests/functional/block_storage/v3/test_limits.py new file mode 100644 index 0000000000..70e8ba072e --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_limits.py @@ -0,0 +1,30 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.tests.functional.block_storage.v3 import base + + +class TestLimits(base.BaseBlockStorageTest): + def test_get(self): + sot = self.operator_cloud.block_storage.get_limits() + self.assertIsNotNone(sot.absolute.max_total_backup_gigabytes) + self.assertIsNotNone(sot.absolute.max_total_backups) + self.assertIsNotNone(sot.absolute.max_total_snapshots) + self.assertIsNotNone(sot.absolute.max_total_volume_gigabytes) + self.assertIsNotNone(sot.absolute.max_total_volumes) + self.assertIsNotNone(sot.absolute.total_backup_gigabytes_used) + self.assertIsNotNone(sot.absolute.total_backups_used) + self.assertIsNotNone(sot.absolute.total_gigabytes_used) + self.assertIsNotNone(sot.absolute.total_snapshots_used) + self.assertIsNotNone(sot.absolute.total_volumes_used) + self.assertIsNotNone(sot.rate) diff --git a/openstack/tests/functional/block_storage/v3/test_quota_set.py b/openstack/tests/functional/block_storage/v3/test_quota_set.py new file mode 100644 index 0000000000..fc0331263e --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_quota_set.py @@ -0,0 +1,53 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import quota_set as _quota_set +from openstack.tests.functional import base + + +class TestQuotaSet(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud must be set for this test") + + self.project = self.create_temporary_project() + + def test_quota_set(self): + # update quota + + quota_set = self.operator_cloud.block_storage.update_quota_set( + self.project.id, volumes=123 + ) + self.assertIsInstance(quota_set, _quota_set.QuotaSet) + self.assertEqual(quota_set.volumes, 123) + + # retrieve details of the (updated) quota + + quota_set = self.operator_cloud.block_storage.get_quota_set( + self.project.id + ) + self.assertIsInstance(quota_set, _quota_set.QuotaSet) + self.assertEqual(quota_set.volumes, 123) + + # retrieve quota defaults + + defaults = self.operator_cloud.block_storage.get_quota_set_defaults( + self.project.id + ) + self.assertIsInstance(defaults, _quota_set.QuotaSet) + self.assertNotEqual(defaults.volumes, 123) + + # revert quota + + self.operator_cloud.block_storage.revert_quota_set(self.project.id) diff --git a/openstack/tests/functional/block_storage/v3/test_resource_filters.py b/openstack/tests/functional/block_storage/v3/test_resource_filters.py new file mode 100644 index 0000000000..104ba6036d --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_resource_filters.py @@ -0,0 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.tests.functional.block_storage.v3 import base + + +class ResourceFilters(base.BaseBlockStorageTest): + def test_get(self): + resource_filters = list( + self.admin_block_storage_client.resource_filters() + ) + + for rf in resource_filters: + self.assertIsInstance(rf.filters, list) + self.assertIsInstance(rf.resource, str) diff --git a/openstack/tests/functional/block_storage/v3/test_service.py b/openstack/tests/functional/block_storage/v3/test_service.py new file mode 100644 index 0000000000..346d86629b --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_service.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional import base +from openstack import utils + + +class TestService(base.BaseFunctionalTest): + # listing services is slooowwww + TIMEOUT_SCALING_FACTOR = 2.0 + + def test_list(self): + sot = list(self.operator_cloud.block_storage.services()) + self.assertIsNotNone(sot) + + def test_disable_enable(self): + block_storage = utils.ensure_service_version( + self.operator_cloud.block_storage, '3' + ) + for srv in block_storage.services(): + # only nova-block_storage can be updated + if srv.name == 'nova-block_storage': + block_storage.disable_service(srv) + block_storage.enable_service(srv) + break + + def test_find(self): + for srv in self.operator_cloud.block_storage.services(): + self.operator_cloud.block_storage.find_service( + srv.name, + host=srv.host, + ignore_missing=False, + ) diff --git a/openstack/tests/functional/block_storage/v3/test_snapshot.py b/openstack/tests/functional/block_storage/v3/test_snapshot.py new file mode 100644 index 0000000000..a20816d77a --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_snapshot.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.block_storage.v3 import snapshot as _snapshot +from openstack.block_storage.v3 import volume as _volume +from openstack.tests.functional.block_storage.v3 import base + + +class TestSnapshot(base.BaseBlockStorageTest): + def setUp(self): + super().setUp() + + self.SNAPSHOT_NAME = self.getUniqueString() + self.SNAPSHOT_ID = None + self.VOLUME_NAME = self.getUniqueString() + self.VOLUME_ID = None + + volume = self.user_cloud.block_storage.create_volume( + name=self.VOLUME_NAME, size=1 + ) + self.user_cloud.block_storage.wait_for_status( + volume, + status='available', + failures=['error'], + interval=2, + wait=self._wait_for_timeout, + ) + assert isinstance(volume, _volume.Volume) + self.assertEqual(self.VOLUME_NAME, volume.name) + self.VOLUME_ID = volume.id + snapshot = self.user_cloud.block_storage.create_snapshot( + name=self.SNAPSHOT_NAME, volume_id=self.VOLUME_ID + ) + self.user_cloud.block_storage.wait_for_status( + snapshot, + status='available', + failures=['error'], + interval=2, + wait=self._wait_for_timeout, + ) + assert isinstance(snapshot, _snapshot.Snapshot) + self.assertEqual(self.SNAPSHOT_NAME, snapshot.name) + self.SNAPSHOT_ID = snapshot.id + + def tearDown(self): + snapshot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID) + sot = self.user_cloud.block_storage.delete_snapshot( + snapshot, ignore_missing=False + ) + self.user_cloud.block_storage.wait_for_delete( + snapshot, interval=2, wait=self._wait_for_timeout + ) + self.assertIsNone(sot) + sot = self.user_cloud.block_storage.delete_volume( + self.VOLUME_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def test_get(self): + sot = self.user_cloud.block_storage.get_snapshot(self.SNAPSHOT_ID) + self.assertEqual(self.SNAPSHOT_NAME, sot.name) diff --git a/openstack/tests/functional/block_storage/v3/test_transfer.py b/openstack/tests/functional/block_storage/v3/test_transfer.py new file mode 100644 index 0000000000..786d15b7a7 --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_transfer.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.block_storage.v3 import base +from openstack import utils + + +class TestTransfer(base.BaseBlockStorageTest): + def setUp(self): + super().setUp() + + self.VOLUME_NAME = self.getUniqueString() + + self.volume = self.user_cloud.block_storage.create_volume( + name=self.VOLUME_NAME, + size=1, + ) + self.user_cloud.block_storage.wait_for_status( + self.volume, + status='available', + failures=['error'], + interval=2, + wait=self._wait_for_timeout, + ) + self.VOLUME_ID = self.volume.id + + def tearDown(self): + sot = self.user_cloud.block_storage.delete_volume( + self.VOLUME_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def test_transfer(self): + if not utils.supports_microversion( + self.operator_cloud.block_storage, "3.55" + ): + self.skipTest("Cannot test new transfer API if MV < 3.55") + sot = self.operator_cloud.block_storage.create_transfer( + volume_id=self.VOLUME_ID, + name=self.VOLUME_NAME, + ) + self.assertIn('auth_key', sot) + self.assertIn('created_at', sot) + self.assertIn('id', sot) + self.assertIn('name', sot) + self.assertIn('volume_id', sot) + + sot = self.user_cloud.block_storage.delete_transfer( + sot.id, ignore_missing=False + ) diff --git a/openstack/tests/functional/block_storage/v3/test_type.py b/openstack/tests/functional/block_storage/v3/test_type.py new file mode 100644 index 0000000000..274d88b90b --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_type.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.block_storage.v3 import type as _type +from openstack.tests.functional.block_storage.v3 import base + + +class TestType(base.BaseBlockStorageTest): + def setUp(self): + super().setUp() + + self.TYPE_NAME = self.getUniqueString() + self.TYPE_ID = None + if not self._operator_cloud_name: + self.skipTest("Operator cloud must be set for this test") + self._set_operator_cloud(block_storage_api_version='3') + sot = self.operator_cloud.block_storage.create_type( + name=self.TYPE_NAME + ) + assert isinstance(sot, _type.Type) + self.assertEqual(self.TYPE_NAME, sot.name) + self.TYPE_ID = sot.id + + def tearDown(self): + sot = self.operator_cloud.block_storage.delete_type( + self.TYPE_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def test_get(self): + sot = self.operator_cloud.block_storage.get_type(self.TYPE_ID) + self.assertEqual(self.TYPE_NAME, sot.name) diff --git a/openstack/tests/functional/block_storage/v3/test_volume.py b/openstack/tests/functional/block_storage/v3/test_volume.py new file mode 100644 index 0000000000..4f76c01cea --- /dev/null +++ b/openstack/tests/functional/block_storage/v3/test_volume.py @@ -0,0 +1,70 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import volume as _volume +from openstack.tests.functional.block_storage.v3 import base + + +class TestVolume(base.BaseBlockStorageTest): + def setUp(self): + super().setUp() + + if not self.user_cloud.has_service('block-storage'): + self.skipTest('block-storage service not supported by cloud') + + volume_name = self.getUniqueString() + + self.volume = self.block_storage_client.create_volume( + name=volume_name, + size=1, + ) + self.block_storage_client.wait_for_status( + self.volume, + status='available', + failures=['error'], + interval=2, + wait=self._wait_for_timeout, + ) + self.assertIsInstance(self.volume, _volume.Volume) + self.assertEqual(volume_name, self.volume.name) + + def tearDown(self): + self.block_storage_client.delete_volume(self.volume) + super().tearDown() + + def test_volume(self): + # get + volume = self.block_storage_client.get_volume(self.volume.id) + self.assertEqual(self.volume.name, volume.name) + + # find + volume = self.block_storage_client.find_volume(self.volume.name) + self.assertEqual(self.volume.id, volume.id) + + # list + volumes = self.block_storage_client.volumes() + # other tests may have created volumes so we don't assert that this is + # the *only* volume present + self.assertIn(self.volume.id, {v.id for v in volumes}) + + # update + volume_name = self.getUniqueString() + volume_description = self.getUniqueString() + volume = self.block_storage_client.update_volume( + self.volume, + name=volume_name, + description=volume_description, + ) + self.assertIsInstance(volume, _volume.Volume) + volume = self.block_storage_client.get_volume(self.volume.id) + self.assertEqual(volume_name, volume.name) + self.assertEqual(volume_description, volume.description) diff --git a/openstack/tests/functional/block_store/v2/test_snapshot.py b/openstack/tests/functional/block_store/v2/test_snapshot.py deleted file mode 100644 index fd47476486..0000000000 --- a/openstack/tests/functional/block_store/v2/test_snapshot.py +++ /dev/null @@ -1,68 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from openstack.block_store.v2 import snapshot as _snapshot -from openstack.block_store.v2 import volume as _volume -from openstack.tests.functional import base - - -class TestSnapshot(base.BaseFunctionalTest): - - SNAPSHOT_NAME = uuid.uuid4().hex - SNAPSHOT_ID = None - VOLUME_NAME = uuid.uuid4().hex - VOLUME_ID = None - - @classmethod - def setUpClass(cls): - super(TestSnapshot, cls).setUpClass() - volume = cls.conn.block_store.create_volume( - name=cls.VOLUME_NAME, - size=1) - cls.conn.block_store.wait_for_status(volume, - status='available', - failures=['error'], - interval=2, - wait=120) - assert isinstance(volume, _volume.Volume) - cls.assertIs(cls.VOLUME_NAME, volume.name) - cls.VOLUME_ID = volume.id - snapshot = cls.conn.block_store.create_snapshot( - name=cls.SNAPSHOT_NAME, - volume_id=cls.VOLUME_ID) - cls.conn.block_store.wait_for_status(snapshot, - status='available', - failures=['error'], - interval=2, - wait=120) - assert isinstance(snapshot, _snapshot.Snapshot) - cls.assertIs(cls.SNAPSHOT_NAME, snapshot.name) - cls.SNAPSHOT_ID = snapshot.id - - @classmethod - def tearDownClass(cls): - snapshot = cls.conn.block_store.get_snapshot(cls.SNAPSHOT_ID) - sot = cls.conn.block_store.delete_snapshot(snapshot, - ignore_missing=False) - cls.conn.block_store.wait_for_delete(snapshot, - interval=2, - wait=120) - cls.assertIs(None, sot) - sot = cls.conn.block_store.delete_volume(cls.VOLUME_ID, - ignore_missing=False) - cls.assertIs(None, sot) - - def test_get(self): - sot = self.conn.block_store.get_snapshot(self.SNAPSHOT_ID) - self.assertEqual(self.SNAPSHOT_NAME, sot.name) diff --git a/openstack/tests/functional/block_store/v2/test_type.py b/openstack/tests/functional/block_store/v2/test_type.py deleted file mode 100644 index 428389adb7..0000000000 --- a/openstack/tests/functional/block_store/v2/test_type.py +++ /dev/null @@ -1,40 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from openstack.block_store.v2 import type as _type -from openstack.tests.functional import base - - -class TestType(base.BaseFunctionalTest): - - TYPE_NAME = uuid.uuid4().hex - TYPE_ID = None - - @classmethod - def setUpClass(cls): - super(TestType, cls).setUpClass() - sot = cls.conn.block_store.create_type(name=cls.TYPE_NAME) - assert isinstance(sot, _type.Type) - cls.assertIs(cls.TYPE_NAME, sot.name) - cls.TYPE_ID = sot.id - - @classmethod - def tearDownClass(cls): - sot = cls.conn.block_store.delete_type(cls.TYPE_ID, - ignore_missing=False) - cls.assertIs(None, sot) - - def test_get(self): - sot = self.conn.block_store.get_type(self.TYPE_ID) - self.assertEqual(self.TYPE_NAME, sot.name) diff --git a/openstack/tests/functional/block_store/v2/test_volume.py b/openstack/tests/functional/block_store/v2/test_volume.py deleted file mode 100644 index c8d70ba52e..0000000000 --- a/openstack/tests/functional/block_store/v2/test_volume.py +++ /dev/null @@ -1,47 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import uuid - -from openstack.block_store.v2 import volume as _volume -from openstack.tests.functional import base - - -class TestVolume(base.BaseFunctionalTest): - - VOLUME_NAME = uuid.uuid4().hex - VOLUME_ID = None - - @classmethod - def setUpClass(cls): - super(TestVolume, cls).setUpClass() - volume = cls.conn.block_store.create_volume( - name=cls.VOLUME_NAME, - size=1) - cls.conn.block_store.wait_for_status(volume, - status='available', - failures=['error'], - interval=2, - wait=120) - assert isinstance(volume, _volume.Volume) - cls.assertIs(cls.VOLUME_NAME, volume.name) - cls.VOLUME_ID = volume.id - - @classmethod - def tearDownClass(cls): - sot = cls.conn.block_store.delete_volume(cls.VOLUME_ID, - ignore_missing=False) - cls.assertIs(None, sot) - - def test_get(self): - sot = self.conn.block_store.get_volume(self.VOLUME_ID) - self.assertEqual(self.VOLUME_NAME, sot.name) diff --git a/openstack/tests/functional/cloud/__init__.py b/openstack/tests/functional/cloud/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/cloud/test_aggregate.py b/openstack/tests/functional/cloud/test_aggregate.py new file mode 100644 index 0000000000..9ffe953b46 --- /dev/null +++ b/openstack/tests/functional/cloud/test_aggregate.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_aggregate +---------------------------------- + +Functional tests for aggregate resource. +""" + +from openstack.tests.functional import base + + +class TestAggregate(base.BaseFunctionalTest): + def test_aggregates(self): + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + aggregate_name = self.getUniqueString() + availability_zone = self.getUniqueString() + self.addCleanup(self.cleanup, aggregate_name) + aggregate = self.operator_cloud.create_aggregate(aggregate_name) + + aggregate_ids = [ + v['id'] for v in self.operator_cloud.list_aggregates() + ] + self.assertIn(aggregate['id'], aggregate_ids) + + aggregate = self.operator_cloud.update_aggregate( + aggregate_name, availability_zone=availability_zone + ) + self.assertEqual(availability_zone, aggregate['availability_zone']) + + aggregate = self.operator_cloud.set_aggregate_metadata( + aggregate_name, {'key': 'value'} + ) + self.assertIn('key', aggregate['metadata']) + + aggregate = self.operator_cloud.set_aggregate_metadata( + aggregate_name, {'key': None} + ) + self.assertNotIn('key', aggregate['metadata']) + + # Validate that we can delete by name + self.assertTrue(self.operator_cloud.delete_aggregate(aggregate_name)) + + def cleanup(self, aggregate_name): + aggregate = self.operator_cloud.get_aggregate(aggregate_name) + if aggregate: + self.operator_cloud.delete_aggregate(aggregate['id']) diff --git a/openstack/tests/functional/cloud/test_cluster_templates.py b/openstack/tests/functional/cloud/test_cluster_templates.py new file mode 100644 index 0000000000..a98c85a6fc --- /dev/null +++ b/openstack/tests/functional/cloud/test_cluster_templates.py @@ -0,0 +1,120 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_cluster_templates +---------------------------------- + +Functional tests for `openstack.cloud` cluster_template methods. +""" + +import subprocess + +import fixtures +from testtools import content + +from openstack.tests.functional import base + + +class TestClusterTemplate(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.user_cloud.has_service( + 'container-infrastructure-management' + ): + self.skipTest('Container service not supported by cloud') + self.ct = None + self.ssh_directory = self.useFixture(fixtures.TempDir()).path + + def test_cluster_templates(self): + '''Test cluster_templates functionality''' + name = 'fake-cluster_template' + server_type = 'vm' + public = False + image_id = 'fedora-atomic-f23-dib' + tls_disabled = False + registry_enabled = False + coe = 'kubernetes' + keypair_id = 'testkey' + + self.addDetail('cluster_template', content.text_content(name)) + self.addCleanup(self.cleanup, name) + + # generate a keypair to add to nova + subprocess.call( + [ + 'ssh-keygen', + '-t', + 'rsa', + '-N', + '', + '-f', + f'{self.ssh_directory}/id_rsa_sdk', + ] + ) + + # add keypair to nova + with open(f'{self.ssh_directory}/id_rsa_sdk.pub') as f: + key_content = f.read() + self.user_cloud.create_keypair('testkey', key_content) + + # Test we can create a cluster_template and we get it returned + self.ct = self.user_cloud.create_cluster_template( + name=name, image_id=image_id, keypair_id=keypair_id, coe=coe + ) + self.assertEqual(self.ct['name'], name) + self.assertEqual(self.ct['image_id'], image_id) + self.assertEqual(self.ct['keypair_id'], keypair_id) + self.assertEqual(self.ct['coe'], coe) + self.assertEqual(self.ct['registry_enabled'], registry_enabled) + self.assertEqual(self.ct['tls_disabled'], tls_disabled) + self.assertEqual(self.ct['public'], public) + self.assertEqual(self.ct['server_type'], server_type) + + # Test that we can list cluster_templates + cluster_templates = self.user_cloud.list_cluster_templates() + self.assertIsNotNone(cluster_templates) + + # Test we get the same cluster_template with the + # get_cluster_template method + cluster_template_get = self.user_cloud.get_cluster_template( + self.ct['uuid'] + ) + self.assertEqual(cluster_template_get['uuid'], self.ct['uuid']) + + # Test the get method also works by name + cluster_template_get = self.user_cloud.get_cluster_template(name) + self.assertEqual(cluster_template_get['name'], self.ct['name']) + + # Test we can update a field on the cluster_template and only that + # field is updated + cluster_template_update = self.user_cloud.update_cluster_template( + self.ct, tls_disabled=True + ) + self.assertEqual(cluster_template_update['uuid'], self.ct['uuid']) + self.assertTrue(cluster_template_update['tls_disabled']) + + # Test we can delete and get True returned + cluster_template_delete = self.user_cloud.delete_cluster_template( + self.ct['uuid'] + ) + self.assertTrue(cluster_template_delete) + + def cleanup(self, name): + if self.ct: + try: + self.user_cloud.delete_cluster_template(self.ct['name']) + except Exception: + pass + + # delete keypair + self.user_cloud.delete_keypair('testkey') diff --git a/openstack/tests/functional/cloud/test_coe_clusters.py b/openstack/tests/functional/cloud/test_coe_clusters.py new file mode 100644 index 0000000000..be0b1aab29 --- /dev/null +++ b/openstack/tests/functional/cloud/test_coe_clusters.py @@ -0,0 +1,28 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_coe_clusters +---------------------------------- + +Functional tests for COE clusters methods. +""" + +from openstack.tests.functional import base + + +class TestCompute(base.BaseFunctionalTest): + # NOTE(flwang): Currently, running Magnum on a cloud which doesn't support + # nested virtualization will lead to timeout. So this test file is mostly + # like a note to document why we can't have function testing for Magnum + # clusters CRUD. + pass diff --git a/openstack/tests/functional/cloud/test_compute.py b/openstack/tests/functional/cloud/test_compute.py new file mode 100644 index 0000000000..037a7e0251 --- /dev/null +++ b/openstack/tests/functional/cloud/test_compute.py @@ -0,0 +1,565 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_compute +---------------------------------- + +Functional tests for compute methods. +""" + +import datetime + +from fixtures import TimeoutException + +from openstack import exceptions +from openstack.tests.functional import base +from openstack import utils + + +class TestCompute(base.BaseFunctionalTest): + def setUp(self): + # OS_TEST_TIMEOUT is 90 sec by default + # but on a bad day, test_attach_detach_volume can take more time. + self.TIMEOUT_SCALING_FACTOR = 1.5 + + super().setUp() + self.server_name = self.getUniqueString() + + def _cleanup_servers_and_volumes(self, server_name): + """Delete the named server and any attached volumes. + + Adding separate cleanup calls for servers and volumes can be tricky + since they need to be done in the proper order. And sometimes deleting + a server can start the process of deleting a volume if it is booted + from that volume. This encapsulates that logic. + """ + server = self.user_cloud.get_server(server_name) + if not server: + return + volumes = self.user_cloud.get_volumes(server) + try: + self.user_cloud.delete_server(server.name, wait=True) + for volume in volumes: + if volume.status != 'deleting': + self.user_cloud.delete_volume(volume.id, wait=True) + except (exceptions.ResourceTimeout, TimeoutException): + # Ups, some timeout occured during process of deletion server + # or volumes, so now we will try to call delete each of them + # once again and we will try to live with it + self.user_cloud.delete_server(server.name) + for volume in volumes: + self.operator_cloud.delete_volume( + volume.id, wait=False, force=True + ) + + def test_create_and_delete_server(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True, + ) + self.assertEqual(self.server_name, server['name']) + self.assertEqual(self.image.id, server['image']['id']) + self.assertEqual(self.flavor.name, server['flavor']['original_name']) + self.assertIsNotNone(server['adminPass']) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True) + ) + srv = self.user_cloud.get_server(self.server_name) + self.assertTrue(srv is None or srv.status.lower() == 'deleted') + + def test_create_and_delete_server_auto_ip_delete_ips(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + auto_ip=True, + wait=True, + ) + self.assertEqual(self.server_name, server['name']) + self.assertEqual(self.image.id, server['image']['id']) + self.assertEqual(self.flavor.name, server['flavor']['original_name']) + self.assertIsNotNone(server['adminPass']) + self.assertTrue( + self.user_cloud.delete_server( + self.server_name, wait=True, delete_ips=True + ) + ) + srv = self.user_cloud.get_server(self.server_name) + self.assertTrue(srv is None or srv.status.lower() == 'deleted') + + def test_attach_detach_volume(self): + self.skipTest('Volume functional tests temporarily disabled') + server_name = self.getUniqueString() + self.addCleanup(self._cleanup_servers_and_volumes, server_name) + server = self.user_cloud.create_server( + name=server_name, image=self.image, flavor=self.flavor, wait=True + ) + volume = self.user_cloud.create_volume(1) + vol_attachment = self.user_cloud.attach_volume(server, volume) + for key in ('device', 'serverId', 'volumeId'): + self.assertIn(key, vol_attachment) + self.assertTrue(vol_attachment[key]) # assert string is not empty + self.assertIsNone(self.user_cloud.detach_volume(server, volume)) + + def test_attach_volume_create_snapshot(self): + self.skipTest('Volume functional tests temporarily disabled') + server_name = self.getUniqueString() + self.addCleanup(self._cleanup_servers_and_volumes, server_name) + server = self.user_cloud.create_server( + name=server_name, image=self.image, flavor=self.flavor, wait=True + ) + volume = self.user_cloud.create_volume(1) + vol_attachment = self.user_cloud.attach_volume(server, volume) + for key in ('device', 'serverId', 'volumeId'): + self.assertIn(key, vol_attachment) + self.assertTrue(vol_attachment[key]) # assert string is not empty + snapshot = self.user_cloud.create_volume_snapshot( + volume_id=volume.id, force=True, wait=True + ) + self.addCleanup(self.user_cloud.delete_volume_snapshot, snapshot['id']) + self.assertIsNotNone(snapshot) + + def test_create_and_delete_server_with_config_drive(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + config_drive=True, + wait=True, + ) + self.assertEqual(self.server_name, server['name']) + self.assertEqual(self.image.id, server['image']['id']) + self.assertEqual(self.flavor.name, server['flavor']['original_name']) + self.assertTrue(server['has_config_drive']) + self.assertIsNotNone(server['adminPass']) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True) + ) + srv = self.user_cloud.get_server(self.server_name) + self.assertTrue(srv is None or srv.status.lower() == 'deleted') + + def test_create_and_delete_server_with_config_drive_none(self): + # check that we're not sending invalid values for config_drive + # if it's passed in explicitly as None - which nodepool does if it's + # not set in the config + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + config_drive=None, + wait=True, + ) + self.assertEqual(self.server_name, server['name']) + self.assertEqual(self.image.id, server['image']['id']) + self.assertEqual(self.flavor.name, server['flavor']['original_name']) + self.assertFalse(server['has_config_drive']) + self.assertIsNotNone(server['adminPass']) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True) + ) + srv = self.user_cloud.get_server(self.server_name) + self.assertTrue(srv is None or srv.status.lower() == 'deleted') + + def test_list_all_servers(self): + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True, + ) + # We're going to get servers from other tests, but that's ok, as long + # as we get the server we created with the demo user. + found_server = False + for s in self.operator_cloud.list_servers(all_projects=True): + if s.name == server.name: + found_server = True + self.assertTrue(found_server) + + def test_list_all_servers_bad_permissions(self): + # Normal users are not allowed to pass all_projects=True + self.assertRaises( + exceptions.SDKException, + self.user_cloud.list_servers, + all_projects=True, + ) + + def test_create_server_image_flavor_dict(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image={'id': self.image.id}, + flavor={'id': self.flavor.id}, + wait=True, + ) + self.assertEqual(self.server_name, server['name']) + self.assertEqual(self.image.id, server['image']['id']) + self.assertEqual(self.flavor.name, server['flavor']['original_name']) + self.assertIsNotNone(server['adminPass']) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True) + ) + srv = self.user_cloud.get_server(self.server_name) + self.assertTrue(srv is None or srv.status.lower() == 'deleted') + + def test_get_server_console(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True, + ) + # _get_server_console_output does not trap HTTP exceptions, so this + # returning a string tests that the call is correct. Testing that + # the cloud returns actual data in the output is out of scope. + log = self.user_cloud._get_server_console_output(server_id=server.id) + self.assertIsInstance(log, str) + + def test_get_server_console_name_or_id(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True, + ) + log = self.user_cloud.get_server_console(server=self.server_name) + self.assertIsInstance(log, str) + + def test_list_availability_zone_names(self): + self.assertEqual( + ['nova'], self.user_cloud.list_availability_zone_names() + ) + + def test_get_server_console_bad_server(self): + self.assertRaises( + exceptions.SDKException, + self.user_cloud.get_server_console, + server=self.server_name, + ) + + def test_create_and_delete_server_with_admin_pass(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + admin_pass='sheiqu9loegahSh', + wait=True, + ) + self.assertEqual(self.server_name, server['name']) + self.assertEqual(self.image.id, server['image']['id']) + self.assertEqual(self.flavor.name, server['flavor']['original_name']) + self.assertEqual(server['adminPass'], 'sheiqu9loegahSh') + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True) + ) + srv = self.user_cloud.get_server(self.server_name) + self.assertTrue(srv is None or srv.status.lower() == 'deleted') + + def test_get_image_id(self): + self.assertEqual( + self.image.id, self.user_cloud.get_image_id(self.image.id) + ) + self.assertEqual( + self.image.id, self.user_cloud.get_image_id(self.image.name) + ) + + def test_get_image_name(self): + self.assertEqual( + self.image.name, self.user_cloud.get_image_name(self.image.id) + ) + self.assertEqual( + self.image.name, self.user_cloud.get_image_name(self.image.name) + ) + + def _assert_volume_attach(self, server, volume_id=None, image=''): + self.assertEqual(self.server_name, server['name']) + self.assertEqual(image, server['image']) + self.assertEqual(self.flavor.id, server['flavor']['id']) + volumes = self.user_cloud.get_volumes(server) + self.assertEqual(1, len(volumes)) + volume = volumes[0] + if volume_id: + self.assertEqual(volume_id, volume['id']) + else: + volume_id = volume['id'] + self.assertEqual(1, len(volume['attachments']), 1) + self.assertEqual(server['id'], volume['attachments'][0]['server_id']) + return volume_id + + def test_create_boot_from_volume_image(self): + self.skipTest('Volume functional tests temporarily disabled') + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + boot_from_volume=True, + volume_size=1, + wait=True, + ) + volume_id = self._assert_volume_attach(server) + volume = self.user_cloud.get_volume(volume_id) + self.assertIsNotNone(volume) + self.assertEqual(volume['name'], volume['display_name']) + self.assertTrue(volume['bootable']) + self.assertEqual(server['id'], volume['attachments'][0]['server_id']) + self.assertTrue(self.user_cloud.delete_server(server.id, wait=True)) + self._wait_for_detach(volume.id) + self.assertTrue(self.user_cloud.delete_volume(volume.id, wait=True)) + srv = self.user_cloud.get_server(self.server_name) + self.assertTrue(srv is None or srv.status.lower() == 'deleted') + self.assertIsNone(self.user_cloud.get_volume(volume.id)) + + def _wait_for_detach(self, volume_id): + # Volumes do not show up as unattached for a bit immediately after + # deleting a server that had had a volume attached. Yay for eventual + # consistency! + for count in utils.iterate_timeout( + 60, + f'Timeout waiting for volume {volume_id} to detach', + ): + volume = self.user_cloud.get_volume(volume_id) + if volume.status in ( + 'available', + 'error', + 'error_restoring', + 'error_extending', + ): + return + + def test_create_terminate_volume_image(self): + self.skipTest('Volume functional tests temporarily disabled') + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + boot_from_volume=True, + terminate_volume=True, + volume_size=1, + wait=True, + ) + volume_id = self._assert_volume_attach(server) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True) + ) + volume = self.user_cloud.get_volume(volume_id) + # We can either get None (if the volume delete was quick), or a volume + # that is in the process of being deleted. + if volume: + self.assertEqual('deleting', volume.status) + srv = self.user_cloud.get_server(self.server_name) + self.assertTrue(srv is None or srv.status.lower() == 'deleted') + + def test_create_boot_from_volume_preexisting(self): + self.skipTest('Volume functional tests temporarily disabled') + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + volume = self.user_cloud.create_volume( + size=1, name=self.server_name, image=self.image, wait=True + ) + self.addCleanup(self.user_cloud.delete_volume, volume.id) + server = self.user_cloud.create_server( + name=self.server_name, + image=None, + flavor=self.flavor, + boot_volume=volume, + volume_size=1, + wait=True, + ) + volume_id = self._assert_volume_attach(server, volume_id=volume['id']) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True) + ) + volume = self.user_cloud.get_volume(volume_id) + self.assertIsNotNone(volume) + self.assertEqual(volume['name'], volume['display_name']) + self.assertTrue(volume['bootable']) + self.assertEqual([], volume['attachments']) + self._wait_for_detach(volume.id) + self.assertTrue(self.user_cloud.delete_volume(volume_id)) + srv = self.user_cloud.get_server(self.server_name) + self.assertTrue(srv is None or srv.status.lower() == 'deleted') + self.assertIsNone(self.user_cloud.get_volume(volume_id)) + + def test_create_boot_attach_volume(self): + self.skipTest('Volume functional tests temporarily disabled') + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + volume = self.user_cloud.create_volume( + size=1, name=self.server_name, image=self.image, wait=True + ) + self.addCleanup(self.user_cloud.delete_volume, volume['id']) + server = self.user_cloud.create_server( + name=self.server_name, + flavor=self.flavor, + image=self.image, + boot_from_volume=False, + volumes=[volume], + wait=True, + ) + volume_id = self._assert_volume_attach( + server, volume_id=volume['id'], image={'id': self.image['id']} + ) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True) + ) + volume = self.user_cloud.get_volume(volume_id) + self.assertIsNotNone(volume) + self.assertEqual(volume['name'], volume['display_name']) + self.assertEqual([], volume['attachments']) + self._wait_for_detach(volume.id) + self.assertTrue(self.user_cloud.delete_volume(volume_id)) + srv = self.user_cloud.get_server(self.server_name) + self.assertTrue(srv is None or srv.status.lower() == 'deleted') + self.assertIsNone(self.user_cloud.get_volume(volume_id)) + + def test_create_boot_from_volume_preexisting_terminate(self): + self.skipTest('Volume functional tests temporarily disabled') + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + volume = self.user_cloud.create_volume( + size=1, name=self.server_name, image=self.image, wait=True + ) + server = self.user_cloud.create_server( + name=self.server_name, + image=None, + flavor=self.flavor, + boot_volume=volume, + terminate_volume=True, + volume_size=1, + wait=True, + ) + volume_id = self._assert_volume_attach(server, volume_id=volume['id']) + self.assertTrue( + self.user_cloud.delete_server(self.server_name, wait=True) + ) + volume = self.user_cloud.get_volume(volume_id) + # We can either get None (if the volume delete was quick), or a volume + # that is in the process of being deleted. + if volume: + self.assertEqual('deleting', volume.status) + srv = self.user_cloud.get_server(self.server_name) + self.assertTrue(srv is None or srv.status.lower() == 'deleted') + + def test_create_image_snapshot_wait_active(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + server = self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + admin_pass='sheiqu9loegahSh', + wait=True, + ) + image = self.user_cloud.create_image_snapshot( + 'test-snapshot', server, wait=True + ) + self.addCleanup(self.user_cloud.delete_image, image['id']) + self.assertEqual('active', image['status']) + + def test_set_and_delete_metadata(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True, + ) + self.user_cloud.set_server_metadata( + self.server_name, {'key1': 'value1', 'key2': 'value2'} + ) + updated_server = self.user_cloud.get_server(self.server_name) + self.assertEqual( + set(updated_server.metadata.items()), + set({'key1': 'value1', 'key2': 'value2'}.items()), + ) + + self.user_cloud.set_server_metadata( + self.server_name, {'key2': 'value3'} + ) + updated_server = self.user_cloud.get_server(self.server_name) + self.assertEqual( + set(updated_server.metadata.items()), + set({'key1': 'value1', 'key2': 'value3'}.items()), + ) + + self.user_cloud.delete_server_metadata(self.server_name, ['key2']) + updated_server = self.user_cloud.get_server(self.server_name) + self.assertEqual( + set(updated_server.metadata.items()), + set({'key1': 'value1'}.items()), + ) + + self.user_cloud.delete_server_metadata(self.server_name, ['key1']) + updated_server = self.user_cloud.get_server(self.server_name) + self.assertEqual(set(updated_server.metadata.items()), set()) + + self.assertRaises( + exceptions.NotFoundException, + self.user_cloud.delete_server_metadata, + self.server_name, + ['key1'], + ) + + def test_update_server(self): + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True, + ) + server_updated = self.user_cloud.update_server( + self.server_name, name='new_name' + ) + self.assertEqual('new_name', server_updated['name']) + + def test_get_compute_usage(self): + '''Test usage functionality''' + # Add a server so that we can know we have usage + if not self.operator_cloud: + # TODO(gtema) rework method not to require getting project + self.skipTest("Operator cloud is required for this test") + self.addCleanup(self._cleanup_servers_and_volumes, self.server_name) + self.user_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True, + ) + start = datetime.datetime.now() - datetime.timedelta(seconds=5) + usage = self.operator_cloud.get_compute_usage('demo', start) + self.add_info_on_exception('usage', usage) + self.assertIsNotNone(usage) + self.assertIn('total_hours', usage) + self.assertIn('start', usage) + self.assertEqual(start.isoformat(), usage['start']) + self.assertIn('location', usage) diff --git a/openstack/tests/functional/cloud/test_devstack.py b/openstack/tests/functional/cloud/test_devstack.py new file mode 100644 index 0000000000..ddeeafdea1 --- /dev/null +++ b/openstack/tests/functional/cloud/test_devstack.py @@ -0,0 +1,44 @@ +# Copyright (c) 2016 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_devstack +------------- + +Throw errors if we do not actually detect the services we're supposed to. +""" + +import os + +from testscenarios import load_tests_apply_scenarios as load_tests # noqa + +from openstack.tests.functional import base + + +class TestDevstack(base.BaseFunctionalTest): + scenarios = [ + ('designate', dict(env='DESIGNATE', service='dns')), + ('heat', dict(env='HEAT', service='orchestration')), + ( + 'magnum', + dict(env='MAGNUM', service='container-infrastructure-management'), + ), + ('neutron', dict(env='NEUTRON', service='network')), + ('octavia', dict(env='OCTAVIA', service='load-balancer')), + ('swift', dict(env='SWIFT', service='object-store')), + ] + + def test_has_service(self): + if os.environ.get(f'OPENSTACKSDK_HAS_{self.env}', '0') == '1': + self.assertTrue(self.user_cloud.has_service(self.service)) diff --git a/openstack/tests/functional/cloud/test_domain.py b/openstack/tests/functional/cloud/test_domain.py new file mode 100644 index 0000000000..7c9a503bf7 --- /dev/null +++ b/openstack/tests/functional/cloud/test_domain.py @@ -0,0 +1,132 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_domain +---------------------------------- + +Functional tests for keystone domain resource. +""" + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestDomain(base.KeystoneBaseFunctionalTest): + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + self.domain_prefix = self.getUniqueString('domain') + self.addCleanup(self._cleanup_domains) + + def _cleanup_domains(self): + exception_list = list() + for domain in self.operator_cloud.list_domains(): + if domain['name'].startswith(self.domain_prefix): + try: + self.operator_cloud.delete_domain(domain['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise exceptions.SDKException('\n'.join(exception_list)) + + def test_search_domains(self): + domain_name = self.domain_prefix + '_search' + + # Shouldn't find any domain with this name yet + results = self.operator_cloud.search_domains( + filters=dict(name=domain_name) + ) + self.assertEqual(0, len(results)) + + # Now create a new domain + domain = self.operator_cloud.create_domain(domain_name) + self.assertEqual(domain_name, domain['name']) + + # Now we should find only the new domain + results = self.operator_cloud.search_domains( + filters=dict(name=domain_name) + ) + self.assertEqual(1, len(results)) + self.assertEqual(domain_name, results[0]['name']) + + # Now we search by name with name_or_id, should find only new domain + results = self.operator_cloud.search_domains(name_or_id=domain_name) + self.assertEqual(1, len(results)) + self.assertEqual(domain_name, results[0]['name']) + + def test_update_domain(self): + domain = self.operator_cloud.create_domain( + self.domain_prefix, 'description' + ) + self.assertEqual(self.domain_prefix, domain['name']) + self.assertEqual('description', domain['description']) + self.assertTrue(domain['enabled']) + updated = self.operator_cloud.update_domain( + domain['id'], + name='updated name', + description='updated description', + enabled=False, + ) + self.assertEqual('updated name', updated['name']) + self.assertEqual('updated description', updated['description']) + self.assertFalse(updated['enabled']) + + # Now we update domain by name with name_or_id + updated = self.operator_cloud.update_domain( + None, + name_or_id='updated name', + name='updated name 2', + description='updated description 2', + enabled=True, + ) + self.assertEqual('updated name 2', updated['name']) + self.assertEqual('updated description 2', updated['description']) + self.assertTrue(updated['enabled']) + + def test_delete_domain(self): + domain = self.operator_cloud.create_domain( + self.domain_prefix, 'description' + ) + self.assertEqual(self.domain_prefix, domain['name']) + self.assertEqual('description', domain['description']) + self.assertTrue(domain['enabled']) + deleted = self.operator_cloud.delete_domain(domain['id']) + self.assertTrue(deleted) + + # Now we delete domain by name with name_or_id + domain = self.operator_cloud.create_domain( + self.domain_prefix, 'description' + ) + self.assertEqual(self.domain_prefix, domain['name']) + self.assertEqual('description', domain['description']) + self.assertTrue(domain['enabled']) + deleted = self.operator_cloud.delete_domain(None, domain['name']) + self.assertTrue(deleted) + + # Finally, we assert we get False from delete_domain if domain does + # not exist + domain = self.operator_cloud.create_domain( + self.domain_prefix, 'description' + ) + self.assertEqual(self.domain_prefix, domain['name']) + self.assertEqual('description', domain['description']) + self.assertTrue(domain['enabled']) + deleted = self.operator_cloud.delete_domain(None, 'bogus_domain') + self.assertFalse(deleted) diff --git a/openstack/tests/functional/cloud/test_endpoints.py b/openstack/tests/functional/cloud/test_endpoints.py new file mode 100644 index 0000000000..4d7ec81b76 --- /dev/null +++ b/openstack/tests/functional/cloud/test_endpoints.py @@ -0,0 +1,235 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_endpoint +---------------------------------- + +Functional tests for endpoint resource. +""" + +import random +import string + +from openstack import exceptions +from openstack.tests.functional import base +from openstack import utils + + +class TestEndpoints(base.KeystoneBaseFunctionalTest): + endpoint_attributes = [ + 'id', + 'region', + 'publicurl', + 'internalurl', + 'service_id', + 'adminurl', + ] + + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + # Generate a random name for services and regions in this test + self.new_item_name = 'test_' + ''.join( + random.choice(string.ascii_lowercase) for _ in range(5) + ) + + self.addCleanup(self._cleanup_services) + self.addCleanup(self._cleanup_endpoints) + + def _cleanup_endpoints(self): + exception_list = list() + for endpoint in self.operator_cloud.list_endpoints(): + if endpoint.get('region') is not None and endpoint[ + 'region' + ].startswith(self.new_item_name): + try: + self.operator_cloud.delete_endpoint(id=endpoint['id']) + except Exception as exc: + # We were unable to delete a service, let's try with next + exception_list.append(str(exc)) + continue + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise exceptions.SDKException('\n'.join(exception_list)) + + def _cleanup_services(self): + exception_list = list() + for s in self.operator_cloud.list_services(): + if s['name'] is not None and s['name'].startswith( + self.new_item_name + ): + try: + self.operator_cloud.delete_service(name_or_id=s['id']) + except Exception as e: + # We were unable to delete a service, let's try with next + exception_list.append(str(e)) + continue + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise exceptions.SDKException('\n'.join(exception_list)) + + def test_create_endpoint(self): + service_name = self.new_item_name + '_create' + + identity = utils.ensure_service_version( + self.operator_cloud.identity, '3' + ) + region = next(iter(identity.regions())).id + + service = self.operator_cloud.create_service( + name=service_name, + type='test_type', + description='this is a test description', + ) + + endpoints = self.operator_cloud.create_endpoint( + service_name_or_id=service['id'], + public_url='http://public.test/', + internal_url='http://internal.test/', + admin_url='http://admin.url/', + region=region, + ) + + self.assertNotEqual([], endpoints) + self.assertIsNotNone(endpoints[0].get('id')) + + # Test None parameters + endpoints = self.operator_cloud.create_endpoint( + service_name_or_id=service['id'], + public_url='http://public.test/', + region=region, + ) + + self.assertNotEqual([], endpoints) + self.assertIsNotNone(endpoints[0].get('id')) + + def test_update_endpoint(self): + # service operations require existing region. Do not test updating + # region for now + identity = utils.ensure_service_version( + self.operator_cloud.identity, '3' + ) + region = next(iter(identity.regions())).id + + service = self.operator_cloud.create_service( + name='service1', type='test_type' + ) + endpoint = self.operator_cloud.create_endpoint( + service_name_or_id=service['id'], + url='http://admin.url/', + interface='admin', + region=region, + enabled=False, + )[0] + + new_service = self.operator_cloud.create_service( + name='service2', type='test_type' + ) + new_endpoint = self.operator_cloud.update_endpoint( + endpoint.id, + service_name_or_id=new_service.id, + url='http://public.url/', + interface='public', + region=region, + enabled=True, + ) + + self.assertEqual(new_endpoint.url, 'http://public.url/') + self.assertEqual(new_endpoint.interface, 'public') + self.assertEqual(new_endpoint.region_id, region) + self.assertEqual(new_endpoint.service_id, new_service.id) + self.assertTrue(new_endpoint.is_enabled) + + def test_list_endpoints(self): + service_name = self.new_item_name + '_list' + + identity = utils.ensure_service_version( + self.operator_cloud.identity, '3' + ) + region = next(iter(identity.regions())).id + + service = self.operator_cloud.create_service( + name=service_name, + type='test_type', + description='this is a test description', + ) + + endpoints = self.operator_cloud.create_endpoint( + service_name_or_id=service['id'], + public_url='http://public.test/', + internal_url='http://internal.test/', + region=region, + ) + + observed_endpoints = self.operator_cloud.list_endpoints() + found = False + for e in observed_endpoints: + # Test all attributes are returned + for endpoint in endpoints: + if e['id'] == endpoint['id']: + found = True + self.assertEqual(service['id'], e['service_id']) + if 'interface' in e: + if e['interface'] == 'internal': + self.assertEqual('http://internal.test/', e['url']) + elif e['interface'] == 'public': + self.assertEqual('http://public.test/', e['url']) + else: + self.assertEqual('http://public.test/', e['publicurl']) + self.assertEqual( + 'http://internal.test/', e['internalurl'] + ) + self.assertEqual(region, e['region_id']) + + self.assertTrue(found, msg='new endpoint not found in endpoints list!') + + def test_delete_endpoint(self): + service_name = self.new_item_name + '_delete' + + identity = utils.ensure_service_version( + self.operator_cloud.identity, '3' + ) + region = next(iter(identity.regions())).id + + service = self.operator_cloud.create_service( + name=service_name, + type='test_type', + description='this is a test description', + ) + + endpoints = self.operator_cloud.create_endpoint( + service_name_or_id=service['id'], + public_url='http://public.test/', + internal_url='http://internal.test/', + region=region, + ) + + self.assertNotEqual([], endpoints) + for endpoint in endpoints: + self.operator_cloud.delete_endpoint(endpoint['id']) + + observed_endpoints = self.operator_cloud.list_endpoints() + found = False + for e in observed_endpoints: + for endpoint in endpoints: + if e['id'] == endpoint['id']: + found = True + break + self.assertEqual(False, found, message='new endpoint was not deleted!') diff --git a/openstack/tests/functional/cloud/test_flavor.py b/openstack/tests/functional/cloud/test_flavor.py new file mode 100644 index 0000000000..6181c97279 --- /dev/null +++ b/openstack/tests/functional/cloud/test_flavor.py @@ -0,0 +1,190 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_flavor +---------------------------------- + +Functional tests for flavor resource. +""" + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestFlavor(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + + # Generate a random name for flavors in this test + self.new_item_name = self.getUniqueString('flavor') + + self.addCleanup(self._cleanup_flavors) + + def _cleanup_flavors(self): + exception_list = list() + if self.operator_cloud: + for f in self.operator_cloud.list_flavors(get_extra=False): + if f['name'].startswith(self.new_item_name): + try: + self.operator_cloud.delete_flavor(f['id']) + except Exception as e: + # We were unable to delete a flavor, let's try with + # next + exception_list.append(str(e)) + continue + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise exceptions.SDKException('\n'.join(exception_list)) + + def test_create_flavor(self): + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + flavor_name = self.new_item_name + '_create' + flavor_kwargs = dict( + name=flavor_name, + ram=1024, + vcpus=2, + disk=10, + ephemeral=5, + swap=100, + rxtx_factor=1.5, + is_public=True, + ) + + flavor = self.operator_cloud.create_flavor(**flavor_kwargs) + + self.assertIsNotNone(flavor['id']) + + # When properly normalized, we should always get an extra_specs + # and expect empty dict on create. + self.assertIn('extra_specs', flavor) + self.assertEqual({}, flavor['extra_specs']) + + # We should also always have ephemeral and public attributes + self.assertIn('ephemeral', flavor) + self.assertEqual(5, flavor['ephemeral']) + self.assertIn('is_public', flavor) + self.assertTrue(flavor['is_public']) + + for key in flavor_kwargs.keys(): + self.assertIn(key, flavor) + for key, value in flavor_kwargs.items(): + self.assertEqual(value, flavor[key]) + + def test_list_flavors(self): + pub_flavor_name = self.new_item_name + '_public' + priv_flavor_name = self.new_item_name + '_private' + public_kwargs = dict( + name=pub_flavor_name, ram=1024, vcpus=2, disk=10, is_public=True + ) + private_kwargs = dict( + name=priv_flavor_name, ram=1024, vcpus=2, disk=10, is_public=False + ) + + if self.operator_cloud: + # Create a public and private flavor. We expect both to be listed + # for an operator. + self.operator_cloud.create_flavor(**public_kwargs) + self.operator_cloud.create_flavor(**private_kwargs) + + flavors = self.operator_cloud.list_flavors(get_extra=False) + + # Flavor list will include the standard devstack flavors. We just + # want to make sure both of the flavors we just created are + # present. + found = [] + for f in flavors: + # extra_specs should be added within list_flavors() + self.assertIn('extra_specs', f) + if f['name'] in (pub_flavor_name, priv_flavor_name): + found.append(f) + self.assertEqual(2, len(found)) + else: + self.user_cloud.list_flavors() + + def test_flavor_access(self): + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + priv_flavor_name = self.new_item_name + '_private' + private_kwargs = dict( + name=priv_flavor_name, ram=1024, vcpus=2, disk=10, is_public=False + ) + new_flavor = self.operator_cloud.create_flavor(**private_kwargs) + + # Validate the 'demo' user cannot see the new flavor + flavors = self.user_cloud.search_flavors(priv_flavor_name) + self.assertEqual(0, len(flavors)) + + # We need the tenant ID for the 'demo' user + project = self.operator_cloud.get_project('demo') + self.assertIsNotNone(project) + + # Now give 'demo' access + self.operator_cloud.add_flavor_access(new_flavor['id'], project['id']) + + # Now see if the 'demo' user has access to it + flavors = self.user_cloud.search_flavors(priv_flavor_name) + self.assertEqual(1, len(flavors)) + self.assertEqual(priv_flavor_name, flavors[0]['name']) + + # Now see if the 'demo' user has access to it without needing + # the demo_cloud access. + acls = self.operator_cloud.list_flavor_access(new_flavor['id']) + self.assertEqual(1, len(acls)) + self.assertEqual(project['id'], acls[0]['tenant_id']) + + # Now revoke the access and make sure we can't find it + self.operator_cloud.remove_flavor_access( + new_flavor['id'], project['id'] + ) + flavors = self.user_cloud.search_flavors(priv_flavor_name) + self.assertEqual(0, len(flavors)) + + def test_set_unset_flavor_specs(self): + """ + Test setting and unsetting flavor extra specs + """ + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + flavor_name = self.new_item_name + '_spec_test' + kwargs = dict(name=flavor_name, ram=1024, vcpus=2, disk=10) + new_flavor = self.operator_cloud.create_flavor(**kwargs) + + # Expect no extra_specs + self.assertEqual({}, new_flavor['extra_specs']) + + # Now set them + extra_specs = {'foo': 'aaa', 'bar': 'bbb'} + self.operator_cloud.set_flavor_specs(new_flavor['id'], extra_specs) + mod_flavor = self.operator_cloud.get_flavor( + new_flavor['id'], get_extra=True + ) + + # Verify extra_specs were set + self.assertIn('extra_specs', mod_flavor) + self.assertEqual(extra_specs, mod_flavor['extra_specs']) + + # Unset the 'foo' value + self.operator_cloud.unset_flavor_specs(mod_flavor['id'], ['foo']) + mod_flavor = self.operator_cloud.get_flavor_by_id( + new_flavor['id'], get_extra=True + ) + + # Verify 'foo' is unset and 'bar' is still set + self.assertEqual({'bar': 'bbb'}, mod_flavor['extra_specs']) diff --git a/openstack/tests/functional/cloud/test_floating_ip.py b/openstack/tests/functional/cloud/test_floating_ip.py new file mode 100644 index 0000000000..e557fd3785 --- /dev/null +++ b/openstack/tests/functional/cloud/test_floating_ip.py @@ -0,0 +1,342 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_floating_ip +---------------------------------- + +Functional tests for floating IP resource. +""" + +import pprint +import sys + +from testtools import content + +from openstack.cloud import meta +from openstack import exceptions +from openstack import proxy +from openstack.tests.functional import base +from openstack import utils + + +class TestFloatingIP(base.BaseFunctionalTest): + timeout = 60 + + def setUp(self): + super().setUp() + + # Generate a random name for these tests + self.new_item_name = self.getUniqueString() + + self.addCleanup(self._cleanup_network) + self.addCleanup(self._cleanup_servers) + + def _cleanup_network(self): + exception_list = list() + tb_list = list() + + # Delete stale networks as well as networks created for this test + if self.user_cloud.has_service('network'): + # Delete routers + for r in self.user_cloud.list_routers(): + try: + if r['name'].startswith(self.new_item_name): + self.user_cloud.update_router( + r, ext_gateway_net_id=None + ) + for s in self.user_cloud.list_subnets(): + if s['name'].startswith(self.new_item_name): + try: + self.user_cloud.remove_router_interface( + r, subnet_id=s['id'] + ) + except Exception: + pass + self.user_cloud.delete_router(r.id) + except Exception as e: + exception_list.append(e) + tb_list.append(sys.exc_info()[2]) + continue + # Delete subnets + for s in self.user_cloud.list_subnets(): + if s['name'].startswith(self.new_item_name): + try: + self.user_cloud.delete_subnet(s.id) + except Exception as e: + exception_list.append(e) + tb_list.append(sys.exc_info()[2]) + continue + # Delete networks + for n in self.user_cloud.list_networks(): + if n['name'].startswith(self.new_item_name): + try: + self.user_cloud.delete_network(n.id) + except Exception as e: + exception_list.append(e) + tb_list.append(sys.exc_info()[2]) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + if len(exception_list) > 1: + self.addDetail( + 'exceptions', + content.text_content( + '\n'.join([str(ex) for ex in exception_list]) + ), + ) + exc = exception_list[0] + raise exc + + def _cleanup_servers(self): + exception_list = list() + + # Delete stale servers as well as server created for this test + for i in self.user_cloud.list_servers(bare=True): + if i.name.startswith(self.new_item_name): + try: + self.user_cloud.delete_server(i.id, wait=True) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise exceptions.SDKException('\n'.join(exception_list)) + + def _cleanup_ips(self, server): + exception_list = list() + + fixed_ip = meta.get_server_private_ip(server) + + for ip in self.user_cloud.list_floating_ips(): + if ( + ip.get('fixed_ip', None) == fixed_ip + or ip.get('fixed_ip_address', None) == fixed_ip + ): + try: + self.user_cloud.delete_floating_ip(ip.id) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise exceptions.SDKException('\n'.join(exception_list)) + + def _setup_networks(self): + if self.user_cloud.has_service('network'): + # Create a network + self.test_net = self.user_cloud.create_network( + name=self.new_item_name + '_net' + ) + # Create a subnet on it + self.test_subnet = self.user_cloud.create_subnet( + subnet_name=self.new_item_name + '_subnet', + network_name_or_id=self.test_net['id'], + cidr='10.24.4.0/24', + enable_dhcp=True, + ) + # Create a router + self.test_router = self.user_cloud.create_router( + name=self.new_item_name + '_router' + ) + # Attach the router to an external network + ext_nets = self.user_cloud.search_networks( + filters={'router:external': True} + ) + self.user_cloud.update_router( + name_or_id=self.test_router['id'], + ext_gateway_net_id=ext_nets[0]['id'], + ) + # Attach the router to the internal subnet + self.user_cloud.add_router_interface( + self.test_router, subnet_id=self.test_subnet['id'] + ) + + # Select the network for creating new servers + self.nic = {'net-id': self.test_net['id']} + self.addDetail( + 'networks-neutron', + content.text_content( + pprint.pformat(self.user_cloud.list_networks()) + ), + ) + else: + # Find network names for nova-net + data = proxy._json_response( + self.user_cloud.compute.get('/os-tenant-networks') + ) + nets = meta.get_and_munchify('networks', data) + self.addDetail( + 'networks-nova', content.text_content(pprint.pformat(nets)) + ) + self.nic = {'net-id': nets[0].id} + + def test_private_ip(self): + self._setup_networks() + + new_server = self.user_cloud.get_openstack_vars( + self.user_cloud.create_server( + wait=True, + name=self.new_item_name + '_server', + image=self.image, + flavor=self.flavor, + nics=[self.nic], + ) + ) + + self.addDetail( + 'server', content.text_content(pprint.pformat(new_server)) + ) + self.assertNotEqual(new_server['private_v4'], '') + + def test_add_auto_ip(self): + self._setup_networks() + + new_server = self.user_cloud.create_server( + wait=True, + name=self.new_item_name + '_server', + image=self.image, + flavor=self.flavor, + nics=[self.nic], + ) + + # ToDo: remove the following iteration when create_server waits for + # the IP to be attached + ip = None + for _ in utils.iterate_timeout( + self.timeout, "Timeout waiting for IP address to be attached" + ): + ip = meta.get_server_external_ipv4(self.user_cloud, new_server) + if ip is not None: + break + new_server = self.user_cloud.get_server(new_server.id) + + self.addCleanup(self._cleanup_ips, new_server) + + def test_detach_ip_from_server(self): + self._setup_networks() + + new_server = self.user_cloud.create_server( + wait=True, + name=self.new_item_name + '_server', + image=self.image, + flavor=self.flavor, + nics=[self.nic], + ) + + # ToDo: remove the following iteration when create_server waits for + # the IP to be attached + ip = None + for _ in utils.iterate_timeout( + self.timeout, "Timeout waiting for IP address to be attached" + ): + ip = meta.get_server_external_ipv4(self.user_cloud, new_server) + if ip is not None: + break + new_server = self.user_cloud.get_server(new_server.id) + + self.addCleanup(self._cleanup_ips, new_server) + + f_ip = self.user_cloud.get_floating_ip( + id=None, filters={'floating_ip_address': ip} + ) + self.user_cloud.detach_ip_from_server( + server_id=new_server.id, floating_ip_id=f_ip['id'] + ) + + def test_list_floating_ips(self): + if self.operator_cloud: + fip_admin = self.operator_cloud.create_floating_ip() + self.addCleanup( + self.operator_cloud.delete_floating_ip, fip_admin.id + ) + fip_user = self.user_cloud.create_floating_ip() + self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id) + + # Get all the floating ips. + if self.operator_cloud: + fip_op_id_list = [ + fip.id for fip in self.operator_cloud.list_floating_ips() + ] + fip_user_id_list = [ + fip.id for fip in self.user_cloud.list_floating_ips() + ] + + if self.user_cloud.has_service('network'): + self.assertIn(fip_user.id, fip_user_id_list) + # Neutron returns all FIP for all projects by default + if self.operator_cloud and fip_admin: + self.assertIn(fip_user.id, fip_op_id_list) + + # Ask Neutron for only a subset of all the FIPs. + if self.operator_cloud: + filtered_fip_id_list = [ + fip.id + for fip in self.operator_cloud.list_floating_ips( + {'tenant_id': self.user_cloud.current_project_id} + ) + ] + self.assertNotIn(fip_admin.id, filtered_fip_id_list) + self.assertIn(fip_user.id, filtered_fip_id_list) + + else: + if fip_admin: + self.assertIn(fip_admin.id, fip_op_id_list) + # By default, Nova returns only the FIPs that belong to the + # project which made the listing request. + if self.operator_cloud: + self.assertNotIn(fip_user.id, fip_op_id_list) + self.assertRaisesRegex( + ValueError, + "Nova-network don't support server-side.*", + self.operator_cloud.list_floating_ips, + filters={'foo': 'bar'}, + ) + + def test_search_floating_ips(self): + fip_user = self.user_cloud.create_floating_ip() + self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id) + + self.assertIn( + fip_user['id'], + [fip.id for fip in self.user_cloud.search_floating_ips()], + ) + + def test_get_floating_ip_by_id(self): + fip_user = self.user_cloud.create_floating_ip() + self.addCleanup(self.user_cloud.delete_floating_ip, fip_user.id) + + ret_fip = self.user_cloud.get_floating_ip_by_id(fip_user.id) + self.assertEqual(fip_user, ret_fip) + + def test_available_floating_ip(self): + fips_user = self.user_cloud.list_floating_ips() + self.assertEqual(fips_user, []) + + new_fip = self.user_cloud.available_floating_ip() + self.assertIsNotNone(new_fip) + self.assertIn('id', new_fip) + self.addCleanup(self.user_cloud.delete_floating_ip, new_fip.id) + + new_fips_user = self.user_cloud.list_floating_ips() + self.assertEqual(new_fips_user, [new_fip]) + + reuse_fip = self.user_cloud.available_floating_ip() + self.assertEqual(reuse_fip.id, new_fip.id) diff --git a/openstack/tests/functional/cloud/test_floating_ip_pool.py b/openstack/tests/functional/cloud/test_floating_ip_pool.py new file mode 100644 index 0000000000..3fe94f59c1 --- /dev/null +++ b/openstack/tests/functional/cloud/test_floating_ip_pool.py @@ -0,0 +1,41 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_floating_ip_pool +---------------------------------- + +Functional tests for floating IP pool resource (managed by nova) +""" + +from openstack.tests.functional import base + + +# When using nova-network, floating IP pools are created with nova-manage +# command. +# When using Neutron, floating IP pools in Nova are mapped from external +# network names. This only if the floating-ip-pools nova extension is +# available. +# For instance, for current implementation of hpcloud that's not true: +# nova floating-ip-pool-list returns 404. + + +class TestFloatingIPPool(base.BaseFunctionalTest): + def test_list_floating_ip_pools(self): + pools = self.user_cloud.list_floating_ip_pools() + if not pools: + self.assertFalse('no floating-ip pool available') + + for pool in pools: + self.assertIn('name', pool) diff --git a/openstack/tests/functional/cloud/test_groups.py b/openstack/tests/functional/cloud/test_groups.py new file mode 100644 index 0000000000..fffbdfb793 --- /dev/null +++ b/openstack/tests/functional/cloud/test_groups.py @@ -0,0 +1,108 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_groups +---------------------------------- + +Functional tests for keystone group resource. +""" + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestGroup(base.KeystoneBaseFunctionalTest): + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + self.group_prefix = self.getUniqueString('group') + self.addCleanup(self._cleanup_groups) + + def _cleanup_groups(self): + exception_list = list() + for group in self.operator_cloud.list_groups(): + if group['name'].startswith(self.group_prefix): + try: + self.operator_cloud.delete_group(group['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise exceptions.SDKException('\n'.join(exception_list)) + + def test_create_group(self): + group_name = self.group_prefix + '_create' + group = self.operator_cloud.create_group(group_name, 'test group') + + for key in ('id', 'name', 'description', 'domain_id'): + self.assertIn(key, group) + self.assertEqual(group_name, group['name']) + self.assertEqual('test group', group['description']) + + def test_delete_group(self): + group_name = self.group_prefix + '_delete' + + group = self.operator_cloud.create_group(group_name, 'test group') + self.assertIsNotNone(group) + + self.assertTrue(self.operator_cloud.delete_group(group_name)) + + results = self.operator_cloud.search_groups( + filters=dict(name=group_name) + ) + self.assertEqual(0, len(results)) + + def test_delete_group_not_exists(self): + self.assertFalse(self.operator_cloud.delete_group('xInvalidGroupx')) + + def test_search_groups(self): + group_name = self.group_prefix + '_search' + + # Shouldn't find any group with this name yet + results = self.operator_cloud.search_groups( + filters=dict(name=group_name) + ) + self.assertEqual(0, len(results)) + + # Now create a new group + group = self.operator_cloud.create_group(group_name, 'test group') + self.assertEqual(group_name, group['name']) + + # Now we should find only the new group + results = self.operator_cloud.search_groups( + filters=dict(name=group_name) + ) + self.assertEqual(1, len(results)) + self.assertEqual(group_name, results[0]['name']) + + def test_update_group(self): + group_name = self.group_prefix + '_update' + group_desc = 'test group' + + group = self.operator_cloud.create_group(group_name, group_desc) + self.assertEqual(group_name, group['name']) + self.assertEqual(group_desc, group['description']) + + updated_group_name = group_name + '_xyz' + updated_group_desc = group_desc + ' updated' + updated_group = self.operator_cloud.update_group( + group_name, name=updated_group_name, description=updated_group_desc + ) + self.assertEqual(updated_group_name, updated_group['name']) + self.assertEqual(updated_group_desc, updated_group['description']) diff --git a/openstack/tests/functional/cloud/test_image.py b/openstack/tests/functional/cloud/test_image.py new file mode 100644 index 0000000000..a13b31cbff --- /dev/null +++ b/openstack/tests/functional/cloud/test_image.py @@ -0,0 +1,192 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_compute +---------------------------------- + +Functional tests for image methods. +""" + +import filecmp +import os +import tempfile + +from openstack.tests.functional import base + + +class TestImage(base.BaseFunctionalTest): + def test_create_image(self): + test_image = tempfile.NamedTemporaryFile(delete=False) + test_image.write(b'\0' * 1024 * 1024) + test_image.close() + image_name = self.getUniqueString('image') + try: + self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + tags=['custom'], + wait=True, + ) + finally: + self.user_cloud.delete_image(image_name, wait=True) + + def test_download_image(self): + test_image = tempfile.NamedTemporaryFile(delete=False) + self.addCleanup(os.remove, test_image.name) + test_image.write(b'\0' * 1024 * 1024) + test_image.close() + image_name = self.getUniqueString('image') + self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + wait=True, + ) + self.addCleanup(self.user_cloud.delete_image, image_name, wait=True) + output = os.path.join(tempfile.gettempdir(), self.getUniqueString()) + self.user_cloud.download_image(image_name, output) + self.addCleanup(os.remove, output) + self.assertTrue( + filecmp.cmp(test_image.name, output), + "Downloaded contents don't match created image", + ) + + def test_create_image_skip_duplicate(self): + test_image = tempfile.NamedTemporaryFile(delete=False) + test_image.write(b'\0' * 1024 * 1024) + test_image.close() + image_name = self.getUniqueString('image') + try: + first_image = self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + validate_checksum=True, + wait=True, + ) + second_image = self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + validate_checksum=True, + wait=True, + ) + self.assertEqual(first_image.id, second_image.id) + finally: + self.user_cloud.delete_image(image_name, wait=True) + + def test_create_image_force_duplicate(self): + test_image = tempfile.NamedTemporaryFile(delete=False) + test_image.write(b'\0' * 1024 * 1024) + test_image.close() + image_name = self.getUniqueString('image') + first_image = None + second_image = None + try: + first_image = self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + wait=True, + ) + second_image = self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + allow_duplicates=True, + wait=True, + ) + self.assertNotEqual(first_image.id, second_image.id) + finally: + if first_image: + self.user_cloud.delete_image(first_image.id, wait=True) + if second_image: + self.user_cloud.delete_image(second_image.id, wait=True) + + def test_create_image_update_properties(self): + test_image = tempfile.NamedTemporaryFile(delete=False) + test_image.write(b'\0' * 1024 * 1024) + test_image.close() + image_name = self.getUniqueString('image') + try: + image = self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + wait=True, + ) + self.user_cloud.update_image_properties( + image=image, name=image_name, foo='bar' + ) + image = self.user_cloud.get_image(image_name) + self.assertIn('foo', image.properties) + self.assertEqual(image.properties['foo'], 'bar') + finally: + self.user_cloud.delete_image(image_name, wait=True) + + def test_create_image_without_filename(self): + image_name = self.getUniqueString('image') + image = self.user_cloud.create_image( + name=image_name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + allow_duplicates=True, + wait=False, + ) + self.assertEqual(image_name, image.name) + self.user_cloud.delete_image(image.id, wait=True) + + def test_get_image_by_id(self): + test_image = tempfile.NamedTemporaryFile(delete=False) + test_image.write(b'\0' * 1024 * 1024) + test_image.close() + image_name = self.getUniqueString('image') + try: + image = self.user_cloud.create_image( + name=image_name, + filename=test_image.name, + disk_format='raw', + container_format='bare', + min_disk=10, + min_ram=1024, + wait=True, + ) + image = self.user_cloud.get_image_by_id(image.id) + self.assertEqual(image_name, image.name) + self.assertEqual('raw', image.disk_format) + finally: + self.user_cloud.delete_image(image_name, wait=True) diff --git a/openstack/tests/functional/cloud/test_inventory.py b/openstack/tests/functional/cloud/test_inventory.py new file mode 100644 index 0000000000..532080b7e7 --- /dev/null +++ b/openstack/tests/functional/cloud/test_inventory.py @@ -0,0 +1,89 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_inventory +---------------------------------- + +Functional tests for inventory methods. +""" + +from openstack.cloud import inventory +from openstack.tests.functional import base + + +class TestInventory(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + # This needs to use an admin account, otherwise a public IP + # is not allocated from devstack. + self.inventory = inventory.OpenStackInventory(cloud='devstack-admin') + self.server_name = self.getUniqueString('inventory') + self.addCleanup(self._cleanup_server) + server = self.operator_cloud.create_server( + name=self.server_name, + image=self.image, + flavor=self.flavor, + wait=True, + auto_ip=True, + network='public', + ) + self.server_id = server['id'] + + def _cleanup_server(self): + self.user_cloud.delete_server(self.server_id, wait=True) + + def _test_host_content(self, host): + self.assertEqual(host['image']['id'], self.image.id) + self.assertIsInstance(host['volumes'], list) + self.assertIsInstance(host['metadata'], dict) + self.assertIn('interface_ip', host) + self.assertIn('ram', host['flavor']) + + def _test_expanded_host_content(self, host): + self.assertEqual(host['image']['name'], self.image.name) + self.assertEqual(host['flavor']['name'], self.flavor.name) + + def test_get_host(self): + host = self.inventory.get_host(self.server_id) + self.assertIsNotNone(host) + self.assertEqual(host['name'], self.server_name) + self._test_host_content(host) + self._test_expanded_host_content(host) + host_found = False + for host in self.inventory.list_hosts(): + if host['id'] == self.server_id: + host_found = True + self._test_host_content(host) + self.assertTrue(host_found) + + def test_get_host_no_detail(self): + host = self.inventory.get_host(self.server_id, expand=False) + self.assertIsNotNone(host) + self.assertEqual(host['name'], self.server_name) + + self.assertEqual(host['image']['id'], self.image.id) + self.assertNotIn('links', host['image']) + self.assertNotIn('name', host['name']) + self.assertIn('ram', host['flavor']) + + host_found = False + for host in self.inventory.list_hosts(expand=False): + if host['id'] == self.server_id: + host_found = True + self._test_host_content(host) + self.assertTrue(host_found) diff --git a/openstack/tests/functional/cloud/test_keypairs.py b/openstack/tests/functional/cloud/test_keypairs.py new file mode 100644 index 0000000000..9a4635b7f8 --- /dev/null +++ b/openstack/tests/functional/cloud/test_keypairs.py @@ -0,0 +1,63 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_keypairs +---------------------------------- + +Functional tests for keypairs methods +""" + +from openstack.tests import fakes +from openstack.tests.functional import base + + +class TestKeypairs(base.BaseFunctionalTest): + def test_create_and_delete(self): + '''Test creating and deleting keypairs functionality''' + name = self.getUniqueString('keypair') + self.addCleanup(self.user_cloud.delete_keypair, name) + keypair = self.user_cloud.create_keypair(name=name) + self.assertEqual(keypair['name'], name) + self.assertIsNotNone(keypair['public_key']) + self.assertIsNotNone(keypair['private_key']) + self.assertIsNotNone(keypair['fingerprint']) + self.assertEqual(keypair['type'], 'ssh') + + keypairs = self.user_cloud.list_keypairs() + self.assertIn(name, [k['name'] for k in keypairs]) + + self.user_cloud.delete_keypair(name) + + keypairs = self.user_cloud.list_keypairs() + self.assertNotIn(name, [k['name'] for k in keypairs]) + + def test_create_and_delete_with_key(self): + '''Test creating and deleting keypairs functionality''' + name = self.getUniqueString('keypair') + self.addCleanup(self.user_cloud.delete_keypair, name) + keypair = self.user_cloud.create_keypair( + name=name, public_key=fakes.FAKE_PUBLIC_KEY + ) + self.assertEqual(keypair['name'], name) + self.assertIsNotNone(keypair['public_key']) + self.assertIsNone(keypair['private_key']) + self.assertIsNotNone(keypair['fingerprint']) + self.assertEqual(keypair['type'], 'ssh') + + keypairs = self.user_cloud.list_keypairs() + self.assertIn(name, [k['name'] for k in keypairs]) + + self.user_cloud.delete_keypair(name) + + keypairs = self.user_cloud.list_keypairs() + self.assertNotIn(name, [k['name'] for k in keypairs]) diff --git a/openstack/tests/functional/cloud/test_limits.py b/openstack/tests/functional/cloud/test_limits.py new file mode 100644 index 0000000000..e4e6a19e72 --- /dev/null +++ b/openstack/tests/functional/cloud/test_limits.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_limits +---------------------------------- + +Functional tests for limits method +""" + +from openstack.compute.v2 import limits as _limits +from openstack.tests.functional import base + + +class TestUsage(base.BaseFunctionalTest): + def test_get_our_compute_limits(self): + """Test limits functionality""" + limits = self.user_cloud.get_compute_limits() + self.assertIsNotNone(limits) + + self.assertIsInstance(limits, _limits.AbsoluteLimits) + self.assertIsNotNone(limits.server_meta) + self.assertIsNotNone(limits.image_meta) + + def test_get_other_compute_limits(self): + """Test limits functionality""" + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + limits = self.operator_cloud.get_compute_limits('demo') + self.assertIsNotNone(limits) + self.assertTrue(hasattr(limits, 'server_meta')) + + # Test normalize limits + self.assertFalse(hasattr(limits, 'maxImageMeta')) + + def test_get_our_volume_limits(self): + """Test limits functionality""" + limits = self.user_cloud.get_volume_limits() + self.assertIsNotNone(limits) + self.assertFalse(hasattr(limits, 'maxTotalVolumes')) + + def test_get_other_volume_limits(self): + """Test limits functionality""" + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + limits = self.operator_cloud.get_volume_limits('demo') + self.assertFalse(hasattr(limits, 'maxTotalVolumes')) diff --git a/openstack/tests/functional/cloud/test_magnum_services.py b/openstack/tests/functional/cloud/test_magnum_services.py new file mode 100644 index 0000000000..63ef86bde6 --- /dev/null +++ b/openstack/tests/functional/cloud/test_magnum_services.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_magnum_services +-------------------- + +Functional tests for services method. +""" + +from openstack.tests.functional import base + + +class TestMagnumServices(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.user_cloud.has_service( + 'container-infrastructure-management' + ): + self.skipTest('Container service not supported by cloud') + + def test_magnum_services(self): + '''Test magnum services functionality''' + + # Test that we can list services + services = self.operator_cloud.list_magnum_services() + + self.assertEqual(1, len(services)) + self.assertEqual(services[0]['id'], 1) + self.assertEqual('up', services[0]['state']) + self.assertEqual('magnum-conductor', services[0]['binary']) + self.assertGreater(services[0]['report_count'], 0) diff --git a/openstack/tests/functional/cloud/test_network.py b/openstack/tests/functional/cloud/test_network.py new file mode 100644 index 0000000000..1c596d769d --- /dev/null +++ b/openstack/tests/functional/cloud/test_network.py @@ -0,0 +1,137 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_network +---------------------------------- + +Functional tests for network methods. +""" + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestNetwork(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + self.network_name = self.getUniqueString('network') + self.addCleanup(self._cleanup_networks) + + def _cleanup_networks(self): + exception_list = list() + for network in self.operator_cloud.list_networks(): + if network['name'].startswith(self.network_name): + try: + self.operator_cloud.delete_network(network['name']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise exceptions.SDKException('\n'.join(exception_list)) + + def test_create_network_basic(self): + net1 = self.operator_cloud.create_network(name=self.network_name) + self.assertIn('id', net1) + self.assertEqual(self.network_name, net1['name']) + self.assertFalse(net1['shared']) + self.assertFalse(net1['router:external']) + self.assertTrue(net1['admin_state_up']) + self.assertTrue(net1['port_security_enabled']) + + def test_get_network_by_id(self): + net1 = self.operator_cloud.create_network(name=self.network_name) + self.assertIn('id', net1) + self.assertEqual(self.network_name, net1['name']) + self.assertFalse(net1['shared']) + self.assertFalse(net1['router:external']) + self.assertTrue(net1['admin_state_up']) + + ret_net1 = self.operator_cloud.get_network_by_id(net1.id) + self.assertIn('id', ret_net1) + self.assertEqual(self.network_name, ret_net1['name']) + self.assertFalse(ret_net1['shared']) + self.assertFalse(ret_net1['router:external']) + self.assertTrue(ret_net1['admin_state_up']) + + def test_create_network_advanced(self): + net1 = self.operator_cloud.create_network( + name=self.network_name, + shared=True, + external=True, + admin_state_up=False, + ) + self.assertIn('id', net1) + self.assertEqual(self.network_name, net1['name']) + self.assertTrue(net1['router:external']) + self.assertTrue(net1['shared']) + self.assertFalse(net1['admin_state_up']) + + def test_create_network_provider_flat(self): + existing_public = self.operator_cloud.search_networks( + filters={'provider:network_type': 'flat'} + ) + if existing_public: + self.skipTest('Physical network already allocated') + net1 = self.operator_cloud.create_network( + name=self.network_name, + shared=True, + provider={ + 'physical_network': 'public', + 'network_type': 'flat', + }, + ) + self.assertIn('id', net1) + self.assertEqual(self.network_name, net1['name']) + self.assertEqual('flat', net1['provider:network_type']) + self.assertEqual('public', net1['provider:physical_network']) + self.assertIsNone(net1['provider:segmentation_id']) + + def test_create_network_port_security_disabled(self): + net1 = self.operator_cloud.create_network( + name=self.network_name, + port_security_enabled=False, + ) + self.assertIn('id', net1) + self.assertEqual(self.network_name, net1['name']) + self.assertTrue(net1['admin_state_up']) + self.assertFalse(net1['shared']) + self.assertFalse(net1['router:external']) + self.assertFalse(net1['port_security_enabled']) + + def test_list_networks_filtered(self): + net1 = self.operator_cloud.create_network(name=self.network_name) + self.assertIsNotNone(net1) + net2 = self.operator_cloud.create_network( + name=self.network_name + 'other' + ) + self.assertIsNotNone(net2) + match = self.operator_cloud.list_networks( + filters=dict(name=self.network_name) + ) + self.assertEqual(1, len(match)) + self.assertEqual(net1['name'], match[0]['name']) + + def test_update_network(self): + net = self.operator_cloud.create_network(name=self.network_name) + self.assertEqual(net.name, self.network_name) + new_name = self.getUniqueString('network') + net = self.operator_cloud.update_network(net.id, name=new_name) + self.addCleanup(self.operator_cloud.delete_network, new_name) + self.assertNotEqual(net.name, self.network_name) + self.assertEqual(net.name, new_name) diff --git a/openstack/tests/functional/cloud/test_object.py b/openstack/tests/functional/cloud/test_object.py new file mode 100644 index 0000000000..4b9d22c5fe --- /dev/null +++ b/openstack/tests/functional/cloud/test_object.py @@ -0,0 +1,190 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_object +---------------------------------- + +Functional tests for object methods. +""" + +import random +import string +import tempfile + +from testtools import content + +from openstack.tests.functional import base + + +class TestObject(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.user_cloud.has_service('object-store'): + self.skipTest('Object service not supported by cloud') + + def test_create_object(self): + '''Test uploading small and large files.''' + container_name = self.getUniqueString('container') + self.addDetail('container', content.text_content(container_name)) + self.addCleanup(self.user_cloud.delete_container, container_name) + self.user_cloud.create_container(container_name) + container = self.user_cloud.get_container(container_name) + self.assertEqual(container_name, container.name) + self.assertEqual( + [], self.user_cloud.list_containers(prefix='somethin') + ) + sizes = ( + (64 * 1024, 1), # 64K, one segment + (64 * 1024, 5), # 64MB, 5 segments + ) + for size, nseg in sizes: + segment_size = round(size / nseg) + with tempfile.NamedTemporaryFile() as fake_file: + fake_content = ''.join( + random.SystemRandom().choice( + string.ascii_uppercase + string.digits + ) + for _ in range(size) + ).encode('latin-1') + + fake_file.write(fake_content) + fake_file.flush() + name = f'test-{size}' + self.addCleanup( + self.user_cloud.delete_object, container_name, name + ) + self.user_cloud.create_object( + container_name, + name, + fake_file.name, + segment_size=segment_size, + metadata={'foo': 'bar'}, + ) + self.assertFalse( + self.user_cloud.is_object_stale( + container_name, name, fake_file.name + ) + ) + self.assertEqual( + 'bar', + self.user_cloud.get_object_metadata(container_name, name)[ + 'foo' + ], + ) + self.user_cloud.update_object( + container=container_name, + name=name, + metadata={'testk': 'testv'}, + ) + self.assertEqual( + 'testv', + self.user_cloud.get_object_metadata(container_name, name)[ + 'testk' + ], + ) + self.assertIsNotNone( + self.user_cloud.get_object(container_name, name) + ) + self.assertEqual( + name, self.user_cloud.list_objects(container_name)[0]['name'] + ) + self.assertEqual( + [], self.user_cloud.list_objects(container_name, prefix='abc') + ) + self.assertTrue( + self.user_cloud.delete_object(container_name, name) + ) + self.assertEqual([], self.user_cloud.list_objects(container_name)) + self.assertEqual( + container_name, self.user_cloud.get_container(container_name).name + ) + self.user_cloud.delete_container(container_name) + + def test_download_object_to_file(self): + '''Test uploading small and large files.''' + container_name = self.getUniqueString('container') + self.addDetail('container', content.text_content(container_name)) + self.addCleanup(self.user_cloud.delete_container, container_name) + self.user_cloud.create_container(container_name) + self.assertEqual( + container_name, self.user_cloud.list_containers()[0]['name'] + ) + sizes = ( + (64 * 1024, 1), # 64K, one segment + (64 * 1024, 5), # 64MB, 5 segments + ) + for size, nseg in sizes: + fake_content = b'' + segment_size = round(size / nseg) + with tempfile.NamedTemporaryFile() as fake_file: + fake_content = ''.join( + random.SystemRandom().choice( + string.ascii_uppercase + string.digits + ) + for _ in range(size) + ).encode('latin-1') + + fake_file.write(fake_content) + fake_file.flush() + name = f'test-{size}' + self.addCleanup( + self.user_cloud.delete_object, container_name, name + ) + self.user_cloud.create_object( + container_name, + name, + fake_file.name, + segment_size=segment_size, + metadata={'foo': 'bar'}, + ) + self.assertFalse( + self.user_cloud.is_object_stale( + container_name, name, fake_file.name + ) + ) + self.assertEqual( + 'bar', + self.user_cloud.get_object_metadata(container_name, name)[ + 'foo' + ], + ) + self.user_cloud.update_object( + container=container_name, + name=name, + metadata={'testk': 'testv'}, + ) + self.assertEqual( + 'testv', + self.user_cloud.get_object_metadata(container_name, name)[ + 'testk' + ], + ) + + with tempfile.NamedTemporaryFile() as fake_file: + self.user_cloud.get_object( + container_name, name, outfile=fake_file.name + ) + downloaded_content = open(fake_file.name, 'rb').read() + self.assertEqual(fake_content, downloaded_content) + + self.assertEqual( + name, self.user_cloud.list_objects(container_name)[0]['name'] + ) + self.assertTrue( + self.user_cloud.delete_object(container_name, name) + ) + self.assertEqual([], self.user_cloud.list_objects(container_name)) + self.assertEqual( + container_name, self.user_cloud.list_containers()[0]['name'] + ) + self.user_cloud.delete_container(container_name) diff --git a/openstack/tests/functional/cloud/test_port.py b/openstack/tests/functional/cloud/test_port.py new file mode 100644 index 0000000000..3d46e8d319 --- /dev/null +++ b/openstack/tests/functional/cloud/test_port.py @@ -0,0 +1,147 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_port +---------------------------------- + +Functional tests for port resource. +""" + +import random +import string + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestPort(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + # Skip Neutron tests if neutron is not present + if not self.user_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + + net_name = self.getUniqueString('CloudPortName') + self.net = self.user_cloud.network.create_network(name=net_name) + self.addCleanup(self.user_cloud.network.delete_network, self.net.id) + + # Generate a unique port name to allow concurrent tests + self.new_port_name = 'test_' + ''.join( + random.choice(string.ascii_lowercase) for _ in range(5) + ) + + self.addCleanup(self._cleanup_ports) + + def _cleanup_ports(self): + exception_list = list() + + for p in self.user_cloud.list_ports(): + if p['name'].startswith(self.new_port_name): + try: + self.user_cloud.delete_port(name_or_id=p['id']) + except Exception as e: + # We were unable to delete this port, let's try with next + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise exceptions.SDKException('\n'.join(exception_list)) + + def test_create_port(self): + port_name = self.new_port_name + '_create' + + port = self.user_cloud.create_port( + network_id=self.net.id, name=port_name + ) + self.assertIsInstance(port, dict) + self.assertIn('id', port) + self.assertEqual(port.get('name'), port_name) + + def test_get_port(self): + port_name = self.new_port_name + '_get' + + port = self.user_cloud.create_port( + network_id=self.net.id, name=port_name + ) + self.assertIsInstance(port, dict) + self.assertIn('id', port) + self.assertEqual(port.get('name'), port_name) + + updated_port = self.user_cloud.get_port(name_or_id=port['id']) + # extra_dhcp_opts is added later by Neutron... + if 'extra_dhcp_opts' in updated_port and 'extra_dhcp_opts' not in port: + del updated_port['extra_dhcp_opts'] + self.assertEqual(port, updated_port) + + def test_get_port_by_id(self): + port_name = self.new_port_name + '_get_by_id' + + port = self.user_cloud.create_port( + network_id=self.net.id, name=port_name + ) + self.assertIsInstance(port, dict) + self.assertIn('id', port) + self.assertEqual(port.get('name'), port_name) + + updated_port = self.user_cloud.get_port_by_id(port['id']) + # extra_dhcp_opts is added later by Neutron... + if 'extra_dhcp_opts' in updated_port and 'extra_dhcp_opts' not in port: + del updated_port['extra_dhcp_opts'] + self.assertEqual(port, updated_port) + + def test_update_port(self): + port_name = self.new_port_name + '_update' + new_port_name = port_name + '_new' + + self.user_cloud.create_port(network_id=self.net.id, name=port_name) + + port = self.user_cloud.update_port( + name_or_id=port_name, name=new_port_name + ) + self.assertIsInstance(port, dict) + self.assertEqual(port.get('name'), new_port_name) + + updated_port = self.user_cloud.get_port(name_or_id=port['id']) + self.assertEqual(port.get('name'), new_port_name) + port.pop('revision_number', None) + port.pop('revision_number', None) + port.pop('updated_at', None) + port.pop('updated_at', None) + updated_port.pop('revision_number', None) + updated_port.pop('revision_number', None) + updated_port.pop('updated_at', None) + updated_port.pop('updated_at', None) + + self.assertEqual(port, updated_port) + + def test_delete_port(self): + port_name = self.new_port_name + '_delete' + + port = self.user_cloud.create_port( + network_id=self.net.id, name=port_name + ) + self.assertIsInstance(port, dict) + self.assertIn('id', port) + self.assertEqual(port.get('name'), port_name) + + updated_port = self.user_cloud.get_port(name_or_id=port['id']) + self.assertIsNotNone(updated_port) + + self.user_cloud.delete_port(name_or_id=port_name) + + updated_port = self.user_cloud.get_port(name_or_id=port['id']) + self.assertIsNone(updated_port) diff --git a/openstack/tests/functional/cloud/test_project.py b/openstack/tests/functional/cloud/test_project.py new file mode 100644 index 0000000000..9331f9ee66 --- /dev/null +++ b/openstack/tests/functional/cloud/test_project.py @@ -0,0 +1,133 @@ +# Copyright (c) 2016 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_project +---------------------------------- + +Functional tests for project resource. +""" + +import pprint + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestProject(base.KeystoneBaseFunctionalTest): + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + self.new_project_name = self.getUniqueString('project') + self.addCleanup(self._cleanup_projects) + + def _cleanup_projects(self): + exception_list = list() + for p in self.operator_cloud.list_projects(): + if p['name'].startswith(self.new_project_name): + try: + self.operator_cloud.delete_project(p['id']) + except Exception as e: + exception_list.append(str(e)) + continue + if exception_list: + raise exceptions.SDKException('\n'.join(exception_list)) + + def test_create_project(self): + project_name = self.new_project_name + '_create' + + params = { + 'name': project_name, + 'description': 'test_create_project', + 'domain_id': self.operator_cloud.get_domain('default')['id'], + } + + project = self.operator_cloud.create_project(**params) + + self.assertIsNotNone(project) + self.assertEqual(project_name, project['name']) + self.assertEqual('test_create_project', project['description']) + + user_id = self.operator_cloud.current_user_id + + # Grant the current user access to the project + self.assertTrue( + self.operator_cloud.grant_role( + 'member', user=user_id, project=project['id'], wait=True + ) + ) + self.addCleanup( + self.operator_cloud.revoke_role, + 'member', + user=user_id, + project=project['id'], + wait=True, + ) + + new_cloud = self.operator_cloud.connect_as_project(project) + self.add_info_on_exception( + 'new_cloud_config', pprint.pformat(new_cloud.config.config) + ) + location = new_cloud.current_location + self.assertEqual(project_name, location['project']['name']) + + def test_update_project(self): + project_name = self.new_project_name + '_update' + + params = { + 'name': project_name, + 'description': 'test_update_project', + 'enabled': True, + 'domain_id': self.operator_cloud.get_domain('default')['id'], + } + + project = self.operator_cloud.create_project(**params) + updated_project = self.operator_cloud.update_project( + project_name, enabled=False, description='new' + ) + self.assertIsNotNone(updated_project) + self.assertEqual(project['id'], updated_project['id']) + self.assertEqual(project['name'], updated_project['name']) + self.assertEqual(updated_project['description'], 'new') + self.assertTrue(project['enabled']) + self.assertFalse(updated_project['enabled']) + + # Revert the description and verify the project is still disabled + updated_project = self.operator_cloud.update_project( + project_name, description=params['description'] + ) + self.assertIsNotNone(updated_project) + self.assertEqual(project['id'], updated_project['id']) + self.assertEqual(project['name'], updated_project['name']) + self.assertEqual( + project['description'], updated_project['description'] + ) + self.assertTrue(project['enabled']) + self.assertFalse(updated_project['enabled']) + + def test_delete_project(self): + project_name = self.new_project_name + '_delete' + params = { + 'name': project_name, + 'domain_id': self.operator_cloud.get_domain('default')['id'], + } + project = self.operator_cloud.create_project(**params) + self.assertIsNotNone(project) + self.assertTrue(self.operator_cloud.delete_project(project['id'])) + + def test_delete_project_not_found(self): + self.assertFalse(self.operator_cloud.delete_project('doesNotExist')) diff --git a/openstack/tests/functional/cloud/test_project_cleanup.py b/openstack/tests/functional/cloud/test_project_cleanup.py new file mode 100644 index 0000000000..35bf2f5f3b --- /dev/null +++ b/openstack/tests/functional/cloud/test_project_cleanup.py @@ -0,0 +1,447 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_project_cleanup +---------------------------------- + +Functional tests for project cleanup methods. +""" + +import queue + +from openstack.network.v2 import network as _network +from openstack import resource +from openstack.tests.functional import base + + +class TestProjectCleanup(base.BaseFunctionalTest): + _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_CLEANUP' + + def setUp(self): + super().setUp() + if not self.user_cloud_alt: + self.skipTest("Alternate demo cloud is required for this test") + + self.network_name = self.getUniqueString('network') + + def _create_network_resources(self): + self.net = self.user_cloud_alt.network.create_network( + name=self.network_name, + ) + self.subnet = self.user_cloud_alt.network.create_subnet( + name=self.getUniqueString('subnet'), + network_id=self.net.id, + cidr='192.169.1.0/24', + ip_version=4, + ) + self.router = self.user_cloud_alt.network.create_router( + name=self.getUniqueString('router') + ) + self.user_cloud_alt.network.add_interface_to_router( + self.router.id, subnet_id=self.subnet.id + ) + + def test_cleanup(self): + self._create_network_resources() + status_queue: queue.Queue[resource.Resource] = queue.Queue() + + # First round - check no resources are old enough + self.user_cloud_alt.project_cleanup( + dry_run=True, + wait_timeout=120, + status_queue=status_queue, + filters={'created_at': '2000-01-01'}, + ) + + self.assertTrue(status_queue.empty()) + + # Second round - resource evaluation function return false, ensure + # nothing identified + self.user_cloud_alt.project_cleanup( + dry_run=True, + wait_timeout=120, + status_queue=status_queue, + filters={'created_at': '2200-01-01'}, + resource_evaluation_fn=lambda x, y, z: False, + ) + + self.assertTrue(status_queue.empty()) + + # Third round - filters set too low + self.user_cloud_alt.project_cleanup( + dry_run=True, + wait_timeout=120, + status_queue=status_queue, + filters={'created_at': '2200-01-01'}, + ) + + objects = [] + while not status_queue.empty(): + objects.append(status_queue.get()) + + # At least known networks should be identified + net_names = list(obj.name for obj in objects) + self.assertIn(self.network_name, net_names) + + # Fourth round - dry run with no filters, ensure everything identified + self.user_cloud_alt.project_cleanup( + dry_run=True, wait_timeout=120, status_queue=status_queue + ) + + objects = [] + while not status_queue.empty(): + objects.append(status_queue.get()) + + net_names = list(obj.name for obj in objects) + self.assertIn(self.network_name, net_names) + + # Ensure network still exists + net = self.user_cloud_alt.network.get_network(self.net.id) + self.assertEqual(net.name, self.net.name) + + # Last round - do a real cleanup + self.user_cloud_alt.project_cleanup( + dry_run=False, wait_timeout=600, status_queue=status_queue + ) + + objects = [] + while not status_queue.empty(): + objects.append(status_queue.get()) + + nets = self.user_cloud_alt.network.networks() + net_names = list(obj.name for obj in nets) + # Since we might not have enough privs to drop all nets - ensure + # we do not have our known one + self.assertNotIn(self.network_name, net_names) + + def test_block_storage_cleanup(self): + if not self.user_cloud.has_service('object-store'): + self.skipTest('Object service is requred, but not available') + + status_queue: queue.Queue[resource.Resource] = queue.Queue() + + vol = self.user_cloud_alt.block_storage.create_volume( + name='vol1', size='1' + ) + self.user_cloud_alt.block_storage.wait_for_status(vol) + s1 = self.user_cloud_alt.block_storage.create_snapshot( + volume_id=vol.id + ) + self.user_cloud_alt.block_storage.wait_for_status(s1) + b1 = self.user_cloud_alt.block_storage.create_backup(volume_id=vol.id) + self.user_cloud_alt.block_storage.wait_for_status(b1) + b2 = self.user_cloud_alt.block_storage.create_backup( + volume_id=vol.id, is_incremental=True, snapshot_id=s1.id + ) + self.user_cloud_alt.block_storage.wait_for_status(b2) + b3 = self.user_cloud_alt.block_storage.create_backup( + volume_id=vol.id, is_incremental=True, snapshot_id=s1.id + ) + self.user_cloud_alt.block_storage.wait_for_status(b3) + + # First round - check no resources are old enough + self.user_cloud_alt.project_cleanup( + dry_run=True, + wait_timeout=120, + status_queue=status_queue, + filters={'created_at': '2000-01-01'}, + ) + + self.assertTrue(status_queue.empty()) + + # Second round - resource evaluation function return false, ensure + # nothing identified + self.user_cloud_alt.project_cleanup( + dry_run=True, + wait_timeout=120, + status_queue=status_queue, + filters={'created_at': '2200-01-01'}, + resource_evaluation_fn=lambda x, y, z: False, + ) + + self.assertTrue(status_queue.empty()) + + # Third round - filters set too low + self.user_cloud_alt.project_cleanup( + dry_run=True, + wait_timeout=120, + status_queue=status_queue, + filters={'created_at': '2200-01-01'}, + ) + + objects = [] + while not status_queue.empty(): + objects.append(status_queue.get()) + + # At least known networks should be identified + volumes = list(obj.id for obj in objects) + self.assertIn(vol.id, volumes) + + # Fourth round - dry run with no filters, ensure everything identified + self.user_cloud_alt.project_cleanup( + dry_run=True, wait_timeout=120, status_queue=status_queue + ) + + objects = [] + while not status_queue.empty(): + objects.append(status_queue.get()) + + vol_ids = list(obj.id for obj in objects) + self.assertIn(vol.id, vol_ids) + + # Ensure volume still exists + vol_check = self.user_cloud_alt.block_storage.get_volume(vol.id) + self.assertEqual(vol.name, vol_check.name) + + # Last round - do a real cleanup + self.user_cloud_alt.project_cleanup( + dry_run=False, wait_timeout=600, status_queue=status_queue + ) + # Ensure no backups remain + self.assertEqual( + 0, len(list(self.user_cloud_alt.block_storage.backups())) + ) + # Ensure no snapshots remain + self.assertEqual( + 0, len(list(self.user_cloud_alt.block_storage.snapshots())) + ) + + def test_cleanup_swift(self): + if not self.user_cloud.has_service('object-store'): + self.skipTest('Object service is requred, but not available') + + status_queue: queue.Queue[resource.Resource] = queue.Queue() + + self.user_cloud_alt.object_store.create_container('test_cleanup') + for i in range(1, 10): + self.user_cloud_alt.object_store.create_object( + "test_cleanup", f"test{i}", data="test{i}" + ) + + # First round - check no resources are old enough + self.user_cloud_alt.project_cleanup( + dry_run=True, + wait_timeout=120, + status_queue=status_queue, + filters={'updated_at': '2000-01-01'}, + ) + + self.assertTrue(status_queue.empty()) + + # Second round - filters set too low + self.user_cloud_alt.project_cleanup( + dry_run=True, + wait_timeout=120, + status_queue=status_queue, + filters={'updated_at': '2200-01-01'}, + ) + objects = [] + while not status_queue.empty(): + objects.append(status_queue.get()) + + # At least known objects should be identified + obj_names = list(obj.name for obj in objects) + self.assertIn('test1', obj_names) + + # Ensure object still exists + obj = self.user_cloud_alt.object_store.get_object( + "test1", "test_cleanup" + ) + self.assertIsNotNone(obj) + + # Last round - do a real cleanup + self.user_cloud_alt.project_cleanup( + dry_run=False, wait_timeout=600, status_queue=status_queue + ) + + objects.clear() + while not status_queue.empty(): + objects.append(status_queue.get()) + self.assertIsNone(self.user_cloud_alt.get_container('test_container')) + + def test_cleanup_vpnaas(self): + if not list( + self.user_cloud_alt.network.service_providers(service_type="VPN") + ): + self.skipTest("VPNaaS plugin is requred, but not available") + + status_queue: queue.Queue[resource.Resource] = queue.Queue() + + # Find available external networks and use one + for network in self.user_cloud_alt.network.networks(): + if network.is_router_external: + external_network: _network.Network = network + break + else: + self.skipTest("External network is required, but not available") + + # Create left network resources + network_left = self.user_cloud_alt.network.create_network( + name="network_left" + ) + subnet_left = self.user_cloud_alt.network.create_subnet( + name="subnet_left", + network_id=network_left.id, + cidr="192.168.1.0/24", + ip_version=4, + ) + router_left = self.user_cloud_alt.network.create_router( + name="router_left" + ) + self.user_cloud_alt.network.add_interface_to_router( + router=router_left.id, subnet_id=subnet_left.id + ) + router_left = self.user_cloud_alt.network.update_router( + router_left, + external_gateway_info={"network_id": external_network.id}, + ) + + # Create right network resources + network_right = self.user_cloud_alt.network.create_network( + name="network_right" + ) + subnet_right = self.user_cloud_alt.network.create_subnet( + name="subnet_right", + network_id=network_right.id, + cidr="192.168.2.0/24", + ip_version=4, + ) + router_right = self.user_cloud_alt.network.create_router( + name="router_right" + ) + self.user_cloud_alt.network.add_interface_to_router( + router=router_right.id, subnet_id=subnet_right.id + ) + router_right = self.user_cloud_alt.network.update_router( + router_right, + external_gateway_info={"network_id": external_network.id}, + ) + + # Create VPNaaS resources + ike_policy = self.user_cloud_alt.network.create_vpn_ike_policy( + name="ike_policy" + ) + ipsec_policy = self.user_cloud_alt.network.create_vpn_ipsec_policy( + name="ipsec_policy" + ) + + vpn_service = self.user_cloud_alt.network.create_vpn_service( + name="vpn_service", router_id=router_left.id + ) + + ep_group_local = self.user_cloud_alt.network.create_vpn_endpoint_group( + name="endpoint_group_local", + type="subnet", + endpoints=[subnet_left.id], + ) + ep_group_peer = self.user_cloud_alt.network.create_vpn_endpoint_group( + name="endpoint_group_peer", + type="cidr", + endpoints=[subnet_right.cidr], + ) + + router_right_ip = router_right.external_gateway_info[ + 'external_fixed_ips' + ][0]['ip_address'] + ipsec_site_conn = ( + self.user_cloud_alt.network.create_vpn_ipsec_site_connection( + name="ipsec_site_connection", + vpnservice_id=vpn_service.id, + ikepolicy_id=ike_policy.id, + ipsecpolicy_id=ipsec_policy.id, + local_ep_group_id=ep_group_local.id, + peer_ep_group_id=ep_group_peer.id, + psk="test", + peer_address=router_right_ip, + peer_id=router_right_ip, + ) + ) + + # First round - check no resources are old enough + self.user_cloud_alt.project_cleanup( + dry_run=True, + wait_timeout=120, + status_queue=status_queue, + filters={'created_at': '2000-01-01'}, + ) + self.assertTrue(status_queue.empty()) + + # Second round - resource evaluation function return false, ensure + # nothing identified + self.user_cloud_alt.project_cleanup( + dry_run=True, + wait_timeout=120, + status_queue=status_queue, + filters={'created_at': '2200-01-01'}, + resource_evaluation_fn=lambda x, y, z: False, + ) + self.assertTrue(status_queue.empty()) + + # Third round - filters set too low + self.user_cloud_alt.project_cleanup( + dry_run=True, + wait_timeout=120, + status_queue=status_queue, + filters={'created_at': '2200-01-01'}, + ) + objects = [] + while not status_queue.empty(): + objects.append(status_queue.get()) + + # VPN resources do not have a created_at property + # Check for the network instead + resource_ids = list(obj.id for obj in objects) + self.assertIn(network_left.id, resource_ids) + + # Fourth round - dry run with no filters, ensure everything identified + self.user_cloud_alt.project_cleanup( + dry_run=True, wait_timeout=120, status_queue=status_queue + ) + objects = [] + while not status_queue.empty(): + objects.append(status_queue.get()) + + resource_ids = list(obj.id for obj in objects) + self.assertIn(ipsec_site_conn.id, resource_ids) + + # Ensure vpn resources still exist + site_conn_check = ( + self.user_cloud_alt.network.get_vpn_ipsec_site_connection( + ipsec_site_conn.id + ) + ) + self.assertEqual(site_conn_check.name, ipsec_site_conn.name) + + # Last round - do a real cleanup + self.user_cloud_alt.project_cleanup( + dry_run=False, wait_timeout=600, status_queue=status_queue + ) + # Ensure no VPN resources remain + self.assertEqual( + 0, len(list(self.user_cloud_alt.network.vpn_ike_policies())) + ) + self.assertEqual( + 0, len(list(self.user_cloud_alt.network.vpn_ipsec_policies())) + ) + self.assertEqual( + 0, len(list(self.user_cloud_alt.network.vpn_services())) + ) + self.assertEqual( + 0, len(list(self.user_cloud_alt.network.vpn_endpoint_groups())) + ) + self.assertEqual( + 0, + len( + list(self.user_cloud_alt.network.vpn_ipsec_site_connections()) + ), + ) diff --git a/openstack/tests/functional/cloud/test_qos_bandwidth_limit_rule.py b/openstack/tests/functional/cloud/test_qos_bandwidth_limit_rule.py new file mode 100644 index 0000000000..ab7e8b7004 --- /dev/null +++ b/openstack/tests/functional/cloud/test_qos_bandwidth_limit_rule.py @@ -0,0 +1,110 @@ +# Copyright 2017 OVH SAS +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_qos_bandwidth_limit_rule +---------------------------------- + +Functional tests for QoS bandwidth limit methods. +""" + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestQosBandwidthLimitRule(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + if not self.operator_cloud._has_neutron_extension('qos'): + self.skipTest('QoS network extension not supported by cloud') + + policy_name = self.getUniqueString('qos_policy') + self.policy = self.operator_cloud.create_qos_policy(name=policy_name) + + self.addCleanup(self._cleanup_qos_policy) + + def _cleanup_qos_policy(self): + try: + self.operator_cloud.delete_qos_policy(self.policy['id']) + except Exception as e: + raise exceptions.SDKException(str(e)) + + def test_qos_bandwidth_limit_rule_lifecycle(self): + max_kbps = 1500 + max_burst_kbps = 500 + updated_max_kbps = 2000 + + # Create bw limit rule + rule = self.operator_cloud.create_qos_bandwidth_limit_rule( + self.policy['id'], max_kbps=max_kbps, max_burst_kbps=max_burst_kbps + ) + self.assertIn('id', rule) + self.assertEqual(max_kbps, rule['max_kbps']) + self.assertEqual(max_burst_kbps, rule['max_burst_kbps']) + + # Now try to update rule + updated_rule = self.operator_cloud.update_qos_bandwidth_limit_rule( + self.policy['id'], rule['id'], max_kbps=updated_max_kbps + ) + self.assertIn('id', updated_rule) + self.assertEqual(updated_max_kbps, updated_rule['max_kbps']) + self.assertEqual(max_burst_kbps, updated_rule['max_burst_kbps']) + + # List rules from policy + policy_rules = self.operator_cloud.list_qos_bandwidth_limit_rules( + self.policy['id'] + ) + self.assertEqual([updated_rule], policy_rules) + + # Delete rule + self.operator_cloud.delete_qos_bandwidth_limit_rule( + self.policy['id'], updated_rule['id'] + ) + + # Check if there is no rules in policy + policy_rules = self.operator_cloud.list_qos_bandwidth_limit_rules( + self.policy['id'] + ) + self.assertEqual([], policy_rules) + + def test_create_qos_bandwidth_limit_rule_direction(self): + if not self.operator_cloud._has_neutron_extension( + 'qos-bw-limit-direction' + ): + self.skipTest( + "'qos-bw-limit-direction' network extension " + "not supported by cloud" + ) + max_kbps = 1500 + direction = "ingress" + updated_direction = "egress" + + # Create bw limit rule + rule = self.operator_cloud.create_qos_bandwidth_limit_rule( + self.policy['id'], max_kbps=max_kbps, direction=direction + ) + self.assertIn('id', rule) + self.assertEqual(max_kbps, rule['max_kbps']) + self.assertEqual(direction, rule['direction']) + + # Now try to update direction in rule + updated_rule = self.operator_cloud.update_qos_bandwidth_limit_rule( + self.policy['id'], rule['id'], direction=updated_direction + ) + self.assertIn('id', updated_rule) + self.assertEqual(max_kbps, updated_rule['max_kbps']) + self.assertEqual(updated_direction, updated_rule['direction']) diff --git a/openstack/tests/functional/cloud/test_qos_dscp_marking_rule.py b/openstack/tests/functional/cloud/test_qos_dscp_marking_rule.py new file mode 100644 index 0000000000..d7c19a9320 --- /dev/null +++ b/openstack/tests/functional/cloud/test_qos_dscp_marking_rule.py @@ -0,0 +1,79 @@ +# Copyright 2017 OVH SAS +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_qos_dscp_marking_rule +---------------------------------- + +Functional tests for QoS DSCP marking rule methods. +""" + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestQosDscpMarkingRule(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + if not self.operator_cloud._has_neutron_extension('qos'): + self.skipTest('QoS network extension not supported by cloud') + + policy_name = self.getUniqueString('qos_policy') + self.policy = self.operator_cloud.create_qos_policy(name=policy_name) + + self.addCleanup(self._cleanup_qos_policy) + + def _cleanup_qos_policy(self): + try: + self.operator_cloud.delete_qos_policy(self.policy['id']) + except Exception as e: + raise exceptions.SDKException(str(e)) + + def test_qos_dscp_marking_rule_lifecycle(self): + dscp_mark = 16 + updated_dscp_mark = 32 + + # Create DSCP marking rule + rule = self.operator_cloud.create_qos_dscp_marking_rule( + self.policy['id'], dscp_mark=dscp_mark + ) + self.assertIn('id', rule) + self.assertEqual(dscp_mark, rule['dscp_mark']) + + # Now try to update rule + updated_rule = self.operator_cloud.update_qos_dscp_marking_rule( + self.policy['id'], rule['id'], dscp_mark=updated_dscp_mark + ) + self.assertIn('id', updated_rule) + self.assertEqual(updated_dscp_mark, updated_rule['dscp_mark']) + + # List rules from policy + policy_rules = self.operator_cloud.list_qos_dscp_marking_rules( + self.policy['id'] + ) + self.assertEqual([updated_rule], policy_rules) + + # Delete rule + self.operator_cloud.delete_qos_dscp_marking_rule( + self.policy['id'], updated_rule['id'] + ) + + # Check if there is no rules in policy + policy_rules = self.operator_cloud.list_qos_dscp_marking_rules( + self.policy['id'] + ) + self.assertEqual([], policy_rules) diff --git a/openstack/tests/functional/cloud/test_qos_minimum_bandwidth_rule.py b/openstack/tests/functional/cloud/test_qos_minimum_bandwidth_rule.py new file mode 100644 index 0000000000..834e2a2160 --- /dev/null +++ b/openstack/tests/functional/cloud/test_qos_minimum_bandwidth_rule.py @@ -0,0 +1,79 @@ +# Copyright 2017 OVH SAS +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_qos_minumum_bandwidth_rule +---------------------------------- + +Functional tests for QoS minimum bandwidth methods. +""" + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestQosMinimumBandwidthRule(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + if not self.operator_cloud._has_neutron_extension('qos'): + self.skipTest('QoS network extension not supported by cloud') + + policy_name = self.getUniqueString('qos_policy') + self.policy = self.operator_cloud.create_qos_policy(name=policy_name) + + self.addCleanup(self._cleanup_qos_policy) + + def _cleanup_qos_policy(self): + try: + self.operator_cloud.delete_qos_policy(self.policy['id']) + except Exception as e: + raise exceptions.SDKException(str(e)) + + def test_qos_minimum_bandwidth_rule_lifecycle(self): + min_kbps = 1500 + updated_min_kbps = 2000 + + # Create min bw rule + rule = self.operator_cloud.create_qos_minimum_bandwidth_rule( + self.policy['id'], min_kbps=min_kbps + ) + self.assertIn('id', rule) + self.assertEqual(min_kbps, rule['min_kbps']) + + # Now try to update rule + updated_rule = self.operator_cloud.update_qos_minimum_bandwidth_rule( + self.policy['id'], rule['id'], min_kbps=updated_min_kbps + ) + self.assertIn('id', updated_rule) + self.assertEqual(updated_min_kbps, updated_rule['min_kbps']) + + # List rules from policy + policy_rules = self.operator_cloud.list_qos_minimum_bandwidth_rules( + self.policy['id'] + ) + self.assertEqual([updated_rule], policy_rules) + + # Delete rule + self.operator_cloud.delete_qos_minimum_bandwidth_rule( + self.policy['id'], updated_rule['id'] + ) + + # Check if there is no rules in policy + policy_rules = self.operator_cloud.list_qos_minimum_bandwidth_rules( + self.policy['id'] + ) + self.assertEqual([], policy_rules) diff --git a/openstack/tests/functional/cloud/test_qos_policy.py b/openstack/tests/functional/cloud/test_qos_policy.py new file mode 100644 index 0000000000..03a2f3c3c8 --- /dev/null +++ b/openstack/tests/functional/cloud/test_qos_policy.py @@ -0,0 +1,103 @@ +# Copyright 2017 OVH SAS +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_qos_policy +---------------------------------- + +Functional tests for QoS policies methods. +""" + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestQosPolicy(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + if not self.operator_cloud._has_neutron_extension('qos'): + self.skipTest('QoS network extension not supported by cloud') + self.policy_name = self.getUniqueString('qos_policy') + self.addCleanup(self._cleanup_policies) + + def _cleanup_policies(self): + exception_list = list() + for policy in self.operator_cloud.list_qos_policies(): + if policy['name'].startswith(self.policy_name): + try: + self.operator_cloud.delete_qos_policy(policy['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise exceptions.SDKException('\n'.join(exception_list)) + + def test_create_qos_policy_basic(self): + policy = self.operator_cloud.create_qos_policy(name=self.policy_name) + self.assertIn('id', policy) + self.assertEqual(self.policy_name, policy['name']) + self.assertFalse(policy['is_shared']) + self.assertFalse(policy['is_default']) + + def test_create_qos_policy_shared(self): + policy = self.operator_cloud.create_qos_policy( + name=self.policy_name, shared=True + ) + self.assertIn('id', policy) + self.assertEqual(self.policy_name, policy['name']) + self.assertTrue(policy['is_shared']) + self.assertFalse(policy['is_default']) + + def test_create_qos_policy_default(self): + if not self.operator_cloud._has_neutron_extension('qos-default'): + self.skipTest( + "'qos-default' network extension not supported by cloud" + ) + policy = self.operator_cloud.create_qos_policy( + name=self.policy_name, default=True + ) + self.assertIn('id', policy) + self.assertEqual(self.policy_name, policy['name']) + self.assertFalse(policy['is_shared']) + self.assertTrue(policy['is_default']) + + def test_update_qos_policy(self): + policy = self.operator_cloud.create_qos_policy(name=self.policy_name) + self.assertEqual(self.policy_name, policy['name']) + self.assertFalse(policy['is_shared']) + self.assertFalse(policy['is_default']) + + updated_policy = self.operator_cloud.update_qos_policy( + policy['id'], shared=True, default=True + ) + self.assertEqual(self.policy_name, updated_policy['name']) + self.assertTrue(updated_policy['is_shared']) + self.assertTrue(updated_policy['is_default']) + + def test_list_qos_policies_filtered(self): + policy1 = self.operator_cloud.create_qos_policy(name=self.policy_name) + self.assertIsNotNone(policy1) + policy2 = self.operator_cloud.create_qos_policy( + name=self.policy_name + 'other' + ) + self.assertIsNotNone(policy2) + match = self.operator_cloud.list_qos_policies( + filters=dict(name=self.policy_name) + ) + self.assertEqual(1, len(match)) + self.assertEqual(policy1['name'], match[0]['name']) diff --git a/openstack/tests/functional/cloud/test_quotas.py b/openstack/tests/functional/cloud/test_quotas.py new file mode 100644 index 0000000000..0db5878bd0 --- /dev/null +++ b/openstack/tests/functional/cloud/test_quotas.py @@ -0,0 +1,123 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_quotas +---------------------------------- + +Functional tests for quotas methods. +""" + +from openstack.tests.functional import base + + +class TestComputeQuotas(base.BaseFunctionalTest): + def test_get_quotas(self): + '''Test quotas functionality''' + self.user_cloud.get_compute_quotas(self.user_cloud.current_project_id) + + def test_set_quotas(self): + '''Test quotas functionality''' + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + quotas = self.operator_cloud.get_compute_quotas('demo') + cores = quotas['cores'] + self.operator_cloud.set_compute_quotas('demo', cores=cores + 1) + self.assertEqual( + cores + 1, self.operator_cloud.get_compute_quotas('demo')['cores'] + ) + self.operator_cloud.delete_compute_quotas('demo') + self.assertEqual( + cores, self.operator_cloud.get_compute_quotas('demo')['cores'] + ) + + +class TestVolumeQuotas(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + + def test_get_quotas(self): + '''Test get quotas functionality''' + self.user_cloud.get_volume_quotas(self.user_cloud.current_project_id) + + def test_set_quotas(self): + '''Test set quotas functionality''' + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + quotas = self.operator_cloud.get_volume_quotas('demo') + volumes = quotas['volumes'] + self.operator_cloud.set_volume_quotas('demo', volumes=volumes + 1) + self.assertEqual( + volumes + 1, + self.operator_cloud.get_volume_quotas('demo')['volumes'], + ) + self.operator_cloud.delete_volume_quotas('demo') + self.assertEqual( + volumes, self.operator_cloud.get_volume_quotas('demo')['volumes'] + ) + + +class TestNetworkQuotas(base.BaseFunctionalTest): + def test_get_quotas(self): + '''Test get quotas functionality''' + self.user_cloud.get_network_quotas(self.user_cloud.current_project_id) + + def test_quotas(self): + '''Test quotas functionality''' + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + if not self.operator_cloud.has_service('network'): + self.skipTest('network service not supported by cloud') + + quotas = self.operator_cloud.get_network_quotas('demo') + network = quotas['networks'] + self.operator_cloud.set_network_quotas('demo', networks=network + 1) + self.assertEqual( + network + 1, + self.operator_cloud.get_network_quotas('demo')['networks'], + ) + self.operator_cloud.delete_network_quotas('demo') + self.assertEqual( + network, self.operator_cloud.get_network_quotas('demo')['networks'] + ) + + def test_get_quotas_details(self): + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + if not self.operator_cloud.has_service('network'): + self.skipTest('network service not supported by cloud') + + quotas = [ + 'floating_ips', + 'networks', + 'ports', + 'rbac_policies', + 'routers', + 'subnets', + 'subnet_pools', + 'security_group_rules', + 'security_groups', + ] + expected_keys = ['limit', 'used', 'reserved'] + '''Test getting details about quota usage''' + quota_details = self.operator_cloud.get_network_quotas( + 'demo', details=True + ) + for quota in quotas: + quota_val = quota_details[quota] + if quota_val: + for expected_key in expected_keys: + self.assertIn(expected_key, quota_val) diff --git a/openstack/tests/functional/cloud/test_range_search.py b/openstack/tests/functional/cloud/test_range_search.py new file mode 100644 index 0000000000..d6662511ee --- /dev/null +++ b/openstack/tests/functional/cloud/test_range_search.py @@ -0,0 +1,146 @@ +# Copyright (c) 2016 IBM +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestRangeSearch(base.BaseFunctionalTest): + def _filter_m1_flavors(self, results): + """The m1 flavors are the original devstack flavors""" + new_results = [] + for flavor in results: + if flavor['name'].startswith("m1."): + new_results.append(flavor) + return new_results + + def test_range_search_bad_range(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + self.assertRaises( + exceptions.SDKException, + self.user_cloud.range_search, + flavors, + {"ram": "<1a0"}, + ) + + def test_range_search_exact(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": "4096"}) + self.assertIsInstance(result, list) + # should only be 1 m1 flavor with 4096 ram + result = self._filter_m1_flavors(result) + self.assertEqual(1, len(result)) + self.assertEqual("m1.medium", result[0]['name']) + + def test_range_search_min(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": "MIN"}) + self.assertIsInstance(result, list) + self.assertEqual(1, len(result)) + # older devstack does not have cirros256 + self.assertIn(result[0]['name'], ('cirros256', 'm1.tiny')) + + def test_range_search_max(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": "MAX"}) + self.assertIsInstance(result, list) + self.assertEqual(1, len(result)) + self.assertEqual("m1.xlarge", result[0]['name']) + + def test_range_search_lt(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": "<1024"}) + self.assertIsInstance(result, list) + # should only be 1 m1 flavor with <1024 ram + result = self._filter_m1_flavors(result) + self.assertEqual(1, len(result)) + self.assertEqual("m1.tiny", result[0]['name']) + + def test_range_search_gt(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": ">4096"}) + self.assertIsInstance(result, list) + # should only be 2 m1 flavors with >4096 ram + result = self._filter_m1_flavors(result) + self.assertEqual(2, len(result)) + flavor_names = [r['name'] for r in result] + self.assertIn("m1.large", flavor_names) + self.assertIn("m1.xlarge", flavor_names) + + def test_range_search_le(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": "<=4096"}) + self.assertIsInstance(result, list) + # should only be 3 m1 flavors with <=4096 ram + result = self._filter_m1_flavors(result) + self.assertEqual(3, len(result)) + flavor_names = [r['name'] for r in result] + self.assertIn("m1.tiny", flavor_names) + self.assertIn("m1.small", flavor_names) + self.assertIn("m1.medium", flavor_names) + + def test_range_search_ge(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search(flavors, {"ram": ">=4096"}) + self.assertIsInstance(result, list) + # should only be 3 m1 flavors with >=4096 ram + result = self._filter_m1_flavors(result) + self.assertEqual(3, len(result)) + flavor_names = [r['name'] for r in result] + self.assertIn("m1.medium", flavor_names) + self.assertIn("m1.large", flavor_names) + self.assertIn("m1.xlarge", flavor_names) + + def test_range_search_multi_1(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search( + flavors, {"ram": "MIN", "vcpus": "MIN"} + ) + self.assertIsInstance(result, list) + self.assertEqual(1, len(result)) + # older devstack does not have cirros256 + self.assertIn(result[0]['name'], ('cirros256', 'm1.tiny')) + + def test_range_search_multi_2(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search( + flavors, {"ram": "<1024", "vcpus": "MIN"} + ) + self.assertIsInstance(result, list) + result = self._filter_m1_flavors(result) + self.assertEqual(1, len(result)) + flavor_names = [r['name'] for r in result] + self.assertIn("m1.tiny", flavor_names) + + def test_range_search_multi_3(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search( + flavors, {"ram": ">=4096", "vcpus": "<6"} + ) + self.assertIsInstance(result, list) + result = self._filter_m1_flavors(result) + self.assertEqual(2, len(result)) + flavor_names = [r['name'] for r in result] + self.assertIn("m1.medium", flavor_names) + self.assertIn("m1.large", flavor_names) + + def test_range_search_multi_4(self): + flavors = self.user_cloud.list_flavors(get_extra=False) + result = self.user_cloud.range_search( + flavors, {"ram": ">=4096", "vcpus": "MAX"} + ) + self.assertIsInstance(result, list) + self.assertEqual(1, len(result)) + # This is the only result that should have max vcpu + self.assertEqual("m1.xlarge", result[0]['name']) diff --git a/openstack/tests/functional/cloud/test_recordset.py b/openstack/tests/functional/cloud/test_recordset.py new file mode 100644 index 0000000000..ce6e9127cb --- /dev/null +++ b/openstack/tests/functional/cloud/test_recordset.py @@ -0,0 +1,156 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_recordset +---------------------------------- + +Functional tests for recordset methods. +""" + +import random +import string + +from testtools import content + +from openstack.tests.functional import base + + +class TestRecordset(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.user_cloud.has_service('dns'): + self.skipTest('dns service not supported by cloud') + + def test_recordsets_with_zone_id(self): + '''Test DNS recordsets functionality''' + sub = ''.join(random.choice(string.ascii_lowercase) for _ in range(6)) + + zone = f'{sub}.example2.net.' + email = 'test@example2.net' + name = f'www.{zone}' + type_ = 'a' + description = 'Test recordset' + ttl = 3600 + records = ['192.168.1.1'] + + self.addDetail('zone', content.text_content(zone)) + self.addDetail('recordset', content.text_content(name)) + + # Create a zone to hold the tested recordset + zone_obj = self.user_cloud.create_zone(name=zone, email=email) + + # Test we can create a recordset and we get it returned + created_recordset = self.user_cloud.create_recordset( + zone_obj['id'], name, type_, records, description, ttl + ) + self.addCleanup(self.cleanup, zone, created_recordset['id']) + + self.assertEqual(created_recordset['zone_id'], zone_obj['id']) + self.assertEqual(created_recordset['name'], name) + self.assertEqual(created_recordset['type'], type_.upper()) + self.assertEqual(created_recordset['records'], records) + self.assertEqual(created_recordset['description'], description) + self.assertEqual(created_recordset['ttl'], ttl) + + # Test that we can list recordsets + recordsets = self.user_cloud.list_recordsets( + zone_obj['id'], + ) + self.assertIsNotNone(recordsets) + + # Test we get the same recordset with the get_recordset method + get_recordset = self.user_cloud.get_recordset( + zone_obj['id'], created_recordset['id'] + ) + self.assertEqual(get_recordset['id'], created_recordset['id']) + + # Test we can update a field on the recordset and only that field + # is updated + updated_recordset = self.user_cloud.update_recordset( + zone_obj['id'], created_recordset['id'], ttl=7200 + ) + self.assertEqual(updated_recordset['id'], created_recordset['id']) + self.assertEqual(updated_recordset['name'], name) + self.assertEqual(updated_recordset['type'], type_.upper()) + self.assertEqual(updated_recordset['records'], records) + self.assertEqual(updated_recordset['description'], description) + self.assertEqual(updated_recordset['ttl'], 7200) + + # Test we can delete and get True returned + deleted_recordset = self.user_cloud.delete_recordset( + zone, created_recordset['id'] + ) + self.assertTrue(deleted_recordset) + + def test_recordsets_with_zone_name(self): + '''Test DNS recordsets functionality''' + sub = ''.join(random.choice(string.ascii_lowercase) for _ in range(6)) + + zone = f'{sub}.example2.net.' + email = 'test@example2.net' + name = f'www.{zone}' + type_ = 'a' + description = 'Test recordset' + ttl = 3600 + records = ['192.168.1.1'] + + self.addDetail('zone', content.text_content(zone)) + self.addDetail('recordset', content.text_content(name)) + + # Create a zone to hold the tested recordset + zone_obj = self.user_cloud.create_zone(name=zone, email=email) + + # Test we can create a recordset and we get it returned + created_recordset = self.user_cloud.create_recordset( + zone, name, type_, records, description, ttl + ) + self.addCleanup(self.cleanup, zone, created_recordset['id']) + + self.assertEqual(created_recordset['zone_id'], zone_obj['id']) + self.assertEqual(created_recordset['name'], name) + self.assertEqual(created_recordset['type'], type_.upper()) + self.assertEqual(created_recordset['records'], records) + self.assertEqual(created_recordset['description'], description) + self.assertEqual(created_recordset['ttl'], ttl) + + # Test that we can list recordsets + recordsets = self.user_cloud.list_recordsets(zone) + self.assertIsNotNone(recordsets) + + # Test we get the same recordset with the get_recordset method + get_recordset = self.user_cloud.get_recordset( + zone, created_recordset['id'] + ) + self.assertEqual(get_recordset['id'], created_recordset['id']) + + # Test we can update a field on the recordset and only that field + # is updated + updated_recordset = self.user_cloud.update_recordset( + zone_obj['id'], created_recordset['id'], ttl=7200 + ) + self.assertEqual(updated_recordset['id'], created_recordset['id']) + self.assertEqual(updated_recordset['name'], name) + self.assertEqual(updated_recordset['type'], type_.upper()) + self.assertEqual(updated_recordset['records'], records) + self.assertEqual(updated_recordset['description'], description) + self.assertEqual(updated_recordset['ttl'], 7200) + + # Test we can delete and get True returned + deleted_recordset = self.user_cloud.delete_recordset( + zone, created_recordset['id'] + ) + self.assertTrue(deleted_recordset) + + def cleanup(self, zone_name, recordset_id): + self.user_cloud.delete_recordset(zone_name, recordset_id) + self.user_cloud.delete_zone(zone_name) diff --git a/openstack/tests/functional/cloud/test_roles.py b/openstack/tests/functional/cloud/test_roles.py new file mode 100644 index 0000000000..37ce193ffa --- /dev/null +++ b/openstack/tests/functional/cloud/test_roles.py @@ -0,0 +1,341 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_roles +---------------------------------- + +Functional tests for role methods. +""" + +import random +import string + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestRoles(base.KeystoneBaseFunctionalTest): + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + self.role_prefix = 'test_role' + ''.join( + random.choice(string.ascii_lowercase) for _ in range(5) + ) + self.user_prefix = self.getUniqueString('user') + self.group_prefix = self.getUniqueString('group') + + self.addCleanup(self._cleanup_users) + self.addCleanup(self._cleanup_groups) + self.addCleanup(self._cleanup_roles) + + def _cleanup_groups(self): + exception_list = list() + for group in self.operator_cloud.list_groups(): + if group['name'].startswith(self.group_prefix): + try: + self.operator_cloud.delete_group(group['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise exceptions.SDKException('\n'.join(exception_list)) + + def _cleanup_users(self): + exception_list = list() + for user in self.operator_cloud.list_users(): + if user['name'].startswith(self.user_prefix): + try: + self.operator_cloud.delete_user(user['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise exceptions.SDKException('\n'.join(exception_list)) + + def _cleanup_roles(self): + exception_list = list() + for role in self.operator_cloud.list_roles(): + if role['name'].startswith(self.role_prefix): + try: + self.operator_cloud.delete_role(role['name']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise exceptions.SDKException('\n'.join(exception_list)) + + def _create_user(self, **kwargs): + domain = self.operator_cloud.get_domain('default') + return self.operator_cloud.create_user( + domain_id=domain['id'], **kwargs + ) + + def test_list_roles(self): + roles = self.operator_cloud.list_roles() + self.assertIsNotNone(roles) + self.assertNotEqual([], roles) + + def test_get_role(self): + role = self.operator_cloud.get_role('admin') + self.assertIsNotNone(role) + self.assertIn('id', role) + self.assertIn('name', role) + self.assertEqual('admin', role['name']) + + def test_search_roles(self): + roles = self.operator_cloud.search_roles(filters={'name': 'admin'}) + self.assertIsNotNone(roles) + self.assertEqual(1, len(roles)) + self.assertEqual('admin', roles[0]['name']) + + def test_create_role(self): + role_name = self.role_prefix + '_create_role' + role = self.operator_cloud.create_role(role_name) + self.assertIsNotNone(role) + self.assertIn('id', role) + self.assertIn('name', role) + self.assertEqual(role_name, role['name']) + + def test_delete_role(self): + role_name = self.role_prefix + '_delete_role' + role = self.operator_cloud.create_role(role_name) + self.assertIsNotNone(role) + self.assertTrue(self.operator_cloud.delete_role(role_name)) + + # TODO(Shrews): Once we can support assigning roles within shade, we + # need to make this test a little more specific, and add more for testing + # filtering functionality. + def test_list_role_assignments(self): + assignments = self.operator_cloud.list_role_assignments() + self.assertIsInstance(assignments, list) + self.assertGreater(len(assignments), 0) + + def test_list_role_assignments_v2(self): + user = self.operator_cloud.get_user('demo') + project = self.operator_cloud.get_project('demo') + assignments = self.operator_cloud.list_role_assignments( + filters={'user': user['id'], 'project': project['id']} + ) + self.assertIsInstance(assignments, list) + self.assertGreater(len(assignments), 0) + + def test_grant_revoke_role_user_project(self): + user_name = self.user_prefix + '_user_project' + user_email = 'nobody@nowhere.com' + role_name = self.role_prefix + '_grant_user_project' + role = self.operator_cloud.create_role(role_name) + user = self._create_user( + name=user_name, email=user_email, default_project='demo' + ) + self.assertTrue( + self.operator_cloud.grant_role( + role_name, user=user['id'], project='demo', wait=True + ) + ) + assignments = self.operator_cloud.list_role_assignments( + { + 'role': role['id'], + 'user': user['id'], + 'project': self.operator_cloud.get_project('demo')['id'], + } + ) + self.assertIsInstance(assignments, list) + self.assertEqual(1, len(assignments)) + self.assertTrue( + self.operator_cloud.revoke_role( + role_name, user=user['id'], project='demo', wait=True + ) + ) + assignments = self.operator_cloud.list_role_assignments( + { + 'role': role['id'], + 'user': user['id'], + 'project': self.operator_cloud.get_project('demo')['id'], + } + ) + self.assertIsInstance(assignments, list) + self.assertEqual(0, len(assignments)) + + def test_grant_revoke_role_group_project(self): + role_name = self.role_prefix + '_grant_group_project' + role = self.operator_cloud.create_role(role_name) + group_name = self.group_prefix + '_group_project' + group = self.operator_cloud.create_group( + name=group_name, description='test group', domain='default' + ) + self.assertTrue( + self.operator_cloud.grant_role( + role_name, group=group['id'], project='demo' + ) + ) + assignments = self.operator_cloud.list_role_assignments( + { + 'role': role['id'], + 'group': group['id'], + 'project': self.operator_cloud.get_project('demo')['id'], + } + ) + self.assertIsInstance(assignments, list) + self.assertEqual(1, len(assignments)) + self.assertTrue( + self.operator_cloud.revoke_role( + role_name, group=group['id'], project='demo' + ) + ) + assignments = self.operator_cloud.list_role_assignments( + { + 'role': role['id'], + 'group': group['id'], + 'project': self.operator_cloud.get_project('demo')['id'], + } + ) + self.assertIsInstance(assignments, list) + self.assertEqual(0, len(assignments)) + + def test_grant_revoke_role_user_domain(self): + role_name = self.role_prefix + '_grant_user_domain' + role = self.operator_cloud.create_role(role_name) + user_name = self.user_prefix + '_user_domain' + user_email = 'nobody@nowhere.com' + user = self._create_user( + name=user_name, email=user_email, default_project='demo' + ) + self.assertTrue( + self.operator_cloud.grant_role( + role_name, user=user['id'], domain='default' + ) + ) + assignments = self.operator_cloud.list_role_assignments( + { + 'role': role['id'], + 'user': user['id'], + 'domain': self.operator_cloud.get_domain('default')['id'], + } + ) + self.assertIsInstance(assignments, list) + self.assertEqual(1, len(assignments)) + self.assertTrue( + self.operator_cloud.revoke_role( + role_name, user=user['id'], domain='default' + ) + ) + assignments = self.operator_cloud.list_role_assignments( + { + 'role': role['id'], + 'user': user['id'], + 'domain': self.operator_cloud.get_domain('default')['id'], + } + ) + self.assertIsInstance(assignments, list) + self.assertEqual(0, len(assignments)) + + def test_grant_revoke_role_group_domain(self): + role_name = self.role_prefix + '_grant_group_domain' + role = self.operator_cloud.create_role(role_name) + group_name = self.group_prefix + '_group_domain' + group = self.operator_cloud.create_group( + name=group_name, description='test group', domain='default' + ) + self.assertTrue( + self.operator_cloud.grant_role( + role_name, group=group['id'], domain='default' + ) + ) + assignments = self.operator_cloud.list_role_assignments( + { + 'role': role['id'], + 'group': group['id'], + 'domain': self.operator_cloud.get_domain('default')['id'], + } + ) + self.assertIsInstance(assignments, list) + self.assertEqual(1, len(assignments)) + self.assertTrue( + self.operator_cloud.revoke_role( + role_name, group=group['id'], domain='default' + ) + ) + assignments = self.operator_cloud.list_role_assignments( + { + 'role': role['id'], + 'group': group['id'], + 'domain': self.operator_cloud.get_domain('default')['id'], + } + ) + self.assertIsInstance(assignments, list) + self.assertEqual(0, len(assignments)) + + def test_grant_revoke_role_user_system(self): + role_name = self.role_prefix + '_grant_user_system' + role = self.operator_cloud.create_role(role_name) + user_name = self.user_prefix + '_user_system' + user_email = 'nobody@nowhere.com' + user = self._create_user( + name=user_name, email=user_email, default_project='demo' + ) + self.assertTrue( + self.operator_cloud.grant_role( + role_name, user=user['id'], system='all' + ) + ) + assignments = self.operator_cloud.list_role_assignments( + {'role': role['id'], 'user': user['id'], 'system': 'all'} + ) + self.assertIsInstance(assignments, list) + self.assertEqual(1, len(assignments)) + self.assertTrue( + self.operator_cloud.revoke_role( + role_name, user=user['id'], system='all' + ) + ) + assignments = self.operator_cloud.list_role_assignments( + {'role': role['id'], 'user': user['id'], 'system': 'all'} + ) + self.assertIsInstance(assignments, list) + self.assertEqual(0, len(assignments)) + + def test_grant_revoke_role_group_system(self): + role_name = self.role_prefix + '_grant_group_system' + role = self.operator_cloud.create_role(role_name) + group_name = self.group_prefix + '_group_system' + group = self.operator_cloud.create_group( + name=group_name, description='test group' + ) + self.assertTrue( + self.operator_cloud.grant_role( + role_name, group=group['id'], system='all' + ) + ) + assignments = self.operator_cloud.list_role_assignments( + {'role': role['id'], 'group': group['id'], 'system': 'all'} + ) + self.assertIsInstance(assignments, list) + self.assertEqual(1, len(assignments)) + self.assertTrue( + self.operator_cloud.revoke_role( + role_name, group=group['id'], system='all' + ) + ) + assignments = self.operator_cloud.list_role_assignments( + {'role': role['id'], 'group': group['id'], 'system': 'all'} + ) + self.assertIsInstance(assignments, list) + self.assertEqual(0, len(assignments)) diff --git a/openstack/tests/functional/cloud/test_router.py b/openstack/tests/functional/cloud/test_router.py new file mode 100644 index 0000000000..e547f94ce0 --- /dev/null +++ b/openstack/tests/functional/cloud/test_router.py @@ -0,0 +1,395 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_router +---------------------------------- + +Functional tests for router methods. +""" + +import ipaddress + +from openstack import exceptions +from openstack.tests.functional import base + + +EXPECTED_TOPLEVEL_FIELDS = ( + 'id', + 'name', + 'is_admin_state_up', + 'external_gateway_info', + 'project_id', + 'routes', + 'status', +) + +EXPECTED_GW_INFO_FIELDS = ('network_id', 'enable_snat', 'external_fixed_ips') + + +class TestRouter(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud required for this test") + if not self.operator_cloud.has_service('network'): + self.skipTest('Network service not supported by cloud') + + self.router_prefix = self.getUniqueString('router') + self.network_prefix = self.getUniqueString('network') + self.subnet_prefix = self.getUniqueString('subnet') + + # NOTE(Shrews): Order matters! + self.addCleanup(self._cleanup_networks) + self.addCleanup(self._cleanup_subnets) + self.addCleanup(self._cleanup_routers) + + def _cleanup_routers(self): + exception_list = list() + for router in self.operator_cloud.list_routers(): + if router['name'].startswith(self.router_prefix): + try: + self.operator_cloud.delete_router(router['name']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise exceptions.SDKException('\n'.join(exception_list)) + + def _cleanup_networks(self): + exception_list = list() + for network in self.operator_cloud.list_networks(): + if network['name'].startswith(self.network_prefix): + try: + self.operator_cloud.delete_network(network['name']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise exceptions.SDKException('\n'.join(exception_list)) + + def _cleanup_subnets(self): + exception_list = list() + for subnet in self.operator_cloud.list_subnets(): + if subnet['name'].startswith(self.subnet_prefix): + try: + self.operator_cloud.delete_subnet(subnet['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise exceptions.SDKException('\n'.join(exception_list)) + + def test_create_router_basic(self): + net1_name = self.network_prefix + '_net1' + net1 = self.operator_cloud.create_network( + name=net1_name, external=True + ) + + router_name = self.router_prefix + '_create_basic' + router = self.operator_cloud.create_router( + name=router_name, + admin_state_up=True, + ext_gateway_net_id=net1['id'], + ) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, router) + + ext_gw_info = router['external_gateway_info'] + for field in EXPECTED_GW_INFO_FIELDS: + self.assertIn(field, ext_gw_info) + + self.assertEqual(router_name, router['name']) + self.assertEqual('ACTIVE', router['status']) + self.assertEqual(net1['id'], ext_gw_info['network_id']) + self.assertTrue(ext_gw_info['enable_snat']) + + def test_create_router_project(self): + project = self.operator_cloud.get_project('demo') + self.assertIsNotNone(project) + proj_id = project['id'] + net1_name = self.network_prefix + '_net1' + net1 = self.operator_cloud.create_network( + name=net1_name, external=True, project_id=proj_id + ) + + router_name = self.router_prefix + '_create_project' + router = self.operator_cloud.create_router( + name=router_name, + admin_state_up=True, + ext_gateway_net_id=net1['id'], + project_id=proj_id, + ) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, router) + + ext_gw_info = router['external_gateway_info'] + for field in EXPECTED_GW_INFO_FIELDS: + self.assertIn(field, ext_gw_info) + + self.assertEqual(router_name, router['name']) + self.assertEqual('ACTIVE', router['status']) + self.assertEqual(proj_id, router['project_id']) + self.assertEqual(net1['id'], ext_gw_info['network_id']) + self.assertTrue(ext_gw_info['enable_snat']) + + def _create_and_verify_advanced_router( + self, external_cidr, external_gateway_ip=None + ): + # external_cidr must be passed in as unicode (u'') + # NOTE(Shrews): The arguments are needed because these tests + # will run in parallel and we want to make sure that each test + # is using different resources to prevent race conditions. + net1_name = self.network_prefix + '_net1' + sub1_name = self.subnet_prefix + '_sub1' + net1 = self.operator_cloud.create_network( + name=net1_name, external=True + ) + sub1 = self.operator_cloud.create_subnet( + net1['id'], + external_cidr, + subnet_name=sub1_name, + gateway_ip=external_gateway_ip, + ) + + ip_net = ipaddress.IPv4Network(external_cidr) + last_ip = str(list(ip_net.hosts())[-1]) + + router_name = self.router_prefix + '_create_advanced' + router = self.operator_cloud.create_router( + name=router_name, + admin_state_up=False, + ext_gateway_net_id=net1['id'], + enable_snat=False, + ext_fixed_ips=[{'subnet_id': sub1['id'], 'ip_address': last_ip}], + ) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, router) + + ext_gw_info = router['external_gateway_info'] + for field in EXPECTED_GW_INFO_FIELDS: + self.assertIn(field, ext_gw_info) + + self.assertEqual(router_name, router['name']) + self.assertEqual('ACTIVE', router['status']) + self.assertFalse(router['admin_state_up']) + + self.assertEqual(1, len(ext_gw_info['external_fixed_ips'])) + self.assertEqual( + sub1['id'], ext_gw_info['external_fixed_ips'][0]['subnet_id'] + ) + self.assertEqual( + last_ip, ext_gw_info['external_fixed_ips'][0]['ip_address'] + ) + + return router + + def test_create_router_advanced(self): + self._create_and_verify_advanced_router(external_cidr='10.2.2.0/24') + + def test_add_remove_router_interface(self): + router = self._create_and_verify_advanced_router( + external_cidr='10.3.3.0/24' + ) + net_name = self.network_prefix + '_intnet1' + sub_name = self.subnet_prefix + '_intsub1' + net = self.operator_cloud.create_network(name=net_name) + sub = self.operator_cloud.create_subnet( + net['id'], + '10.4.4.0/24', + subnet_name=sub_name, + gateway_ip='10.4.4.1', + ) + + iface = self.operator_cloud.add_router_interface( + router, subnet_id=sub['id'] + ) + self.assertIsNone( + self.operator_cloud.remove_router_interface( + router, subnet_id=sub['id'] + ) + ) + + # Test return values *after* the interface is detached so the + # resources we've created can be cleaned up if these asserts fail. + self.assertIsNotNone(iface) + for key in ('id', 'subnet_id', 'port_id', 'project_id'): + self.assertIn(key, iface) + self.assertEqual(router['id'], iface['id']) + self.assertEqual(sub['id'], iface['subnet_id']) + + def test_list_router_interfaces(self): + router = self._create_and_verify_advanced_router( + external_cidr='10.5.5.0/24' + ) + net_name = self.network_prefix + '_intnet1' + sub_name = self.subnet_prefix + '_intsub1' + net = self.operator_cloud.create_network(name=net_name) + sub = self.operator_cloud.create_subnet( + net['id'], + '10.6.6.0/24', + subnet_name=sub_name, + gateway_ip='10.6.6.1', + ) + + iface = self.operator_cloud.add_router_interface( + router, subnet_id=sub['id'] + ) + all_ifaces = self.operator_cloud.list_router_interfaces(router) + int_ifaces = self.operator_cloud.list_router_interfaces( + router, interface_type='internal' + ) + ext_ifaces = self.operator_cloud.list_router_interfaces( + router, interface_type='external' + ) + self.assertIsNone( + self.operator_cloud.remove_router_interface( + router, subnet_id=sub['id'] + ) + ) + + # Test return values *after* the interface is detached so the + # resources we've created can be cleaned up if these asserts fail. + self.assertIsNotNone(iface) + self.assertEqual(2, len(all_ifaces)) + self.assertEqual(1, len(int_ifaces)) + self.assertEqual(1, len(ext_ifaces)) + + ext_fixed_ips = router['external_gateway_info']['external_fixed_ips'] + self.assertEqual( + ext_fixed_ips[0]['subnet_id'], + ext_ifaces[0]['fixed_ips'][0]['subnet_id'], + ) + self.assertEqual(sub['id'], int_ifaces[0]['fixed_ips'][0]['subnet_id']) + + def test_update_router_name(self): + router = self._create_and_verify_advanced_router( + external_cidr='10.7.7.0/24' + ) + + new_name = self.router_prefix + '_update_name' + updated = self.operator_cloud.update_router( + router['id'], name=new_name + ) + self.assertIsNotNone(updated) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, updated) + + # Name is the only change we expect + self.assertEqual(new_name, updated['name']) + + # Validate nothing else changed + self.assertEqual(router['status'], updated['status']) + self.assertEqual(router['admin_state_up'], updated['admin_state_up']) + self.assertEqual( + router['external_gateway_info'], updated['external_gateway_info'] + ) + + def test_update_router_routes(self): + router = self._create_and_verify_advanced_router( + external_cidr='10.7.7.0/24' + ) + + routes = [{"destination": "10.7.7.0/24", "nexthop": "10.7.7.99"}] + + updated = self.operator_cloud.update_router( + router['id'], routes=routes + ) + self.assertIsNotNone(updated) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, updated) + + # Name is the only change we expect + self.assertEqual(routes, updated['routes']) + + # Validate nothing else changed + self.assertEqual(router['status'], updated['status']) + self.assertEqual(router['admin_state_up'], updated['admin_state_up']) + self.assertEqual( + router['external_gateway_info'], updated['external_gateway_info'] + ) + + def test_update_router_admin_state(self): + router = self._create_and_verify_advanced_router( + external_cidr='10.8.8.0/24' + ) + + updated = self.operator_cloud.update_router( + router['id'], admin_state_up=True + ) + self.assertIsNotNone(updated) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, updated) + + # admin_state_up is the only change we expect + self.assertTrue(updated['admin_state_up']) + self.assertNotEqual( + router['admin_state_up'], updated['admin_state_up'] + ) + + # Validate nothing else changed + self.assertEqual(router['status'], updated['status']) + self.assertEqual(router['name'], updated['name']) + self.assertEqual( + router['external_gateway_info'], updated['external_gateway_info'] + ) + + def test_update_router_ext_gw_info(self): + router = self._create_and_verify_advanced_router( + external_cidr='10.9.9.0/24' + ) + + # create a new subnet + existing_net_id = router['external_gateway_info']['network_id'] + sub_name = self.subnet_prefix + '_update' + sub = self.operator_cloud.create_subnet( + existing_net_id, + '10.10.10.0/24', + subnet_name=sub_name, + gateway_ip='10.10.10.1', + ) + + updated = self.operator_cloud.update_router( + router['id'], + ext_gateway_net_id=existing_net_id, + ext_fixed_ips=[ + {'subnet_id': sub['id'], 'ip_address': '10.10.10.77'} + ], + ) + self.assertIsNotNone(updated) + + for field in EXPECTED_TOPLEVEL_FIELDS: + self.assertIn(field, updated) + + # external_gateway_info is the only change we expect + ext_gw_info = updated['external_gateway_info'] + self.assertEqual(1, len(ext_gw_info['external_fixed_ips'])) + self.assertEqual( + sub['id'], ext_gw_info['external_fixed_ips'][0]['subnet_id'] + ) + self.assertEqual( + '10.10.10.77', ext_gw_info['external_fixed_ips'][0]['ip_address'] + ) + + # Validate nothing else changed + self.assertEqual(router['status'], updated['status']) + self.assertEqual(router['name'], updated['name']) + self.assertEqual(router['admin_state_up'], updated['admin_state_up']) diff --git a/openstack/tests/functional/cloud/test_security_groups.py b/openstack/tests/functional/cloud/test_security_groups.py new file mode 100644 index 0000000000..e452ec62d5 --- /dev/null +++ b/openstack/tests/functional/cloud/test_security_groups.py @@ -0,0 +1,80 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_security_groups +---------------------------------- + +Functional tests for security_groups resource. +""" + +from openstack.tests.functional import base + + +class TestSecurityGroups(base.BaseFunctionalTest): + def test_create_list_security_groups(self): + sg1 = self.user_cloud.create_security_group( + name="sg1", description="sg1" + ) + self.addCleanup(self.user_cloud.delete_security_group, sg1['id']) + if self.user_cloud.has_service('network'): + # Neutron defaults to all_tenants=1 when admin + sg_list = self.user_cloud.list_security_groups() + self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) + + else: + # Nova does not list all tenants by default + sg_list = self.operator_cloud.list_security_groups() + + def test_create_list_security_groups_operator(self): + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + sg1 = self.user_cloud.create_security_group( + name="sg1", description="sg1" + ) + self.addCleanup(self.user_cloud.delete_security_group, sg1['id']) + sg2 = self.operator_cloud.create_security_group( + name="sg2", description="sg2" + ) + self.addCleanup(self.operator_cloud.delete_security_group, sg2['id']) + + if self.user_cloud.has_service('network'): + # Neutron defaults to all_tenants=1 when admin + sg_list = self.operator_cloud.list_security_groups() + self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) + + # Filter by tenant_id (filtering by project_id won't work with + # Keystone V2) + sg_list = self.operator_cloud.list_security_groups( + filters={'tenant_id': self.user_cloud.current_project_id} + ) + self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) + self.assertNotIn(sg2['id'], [sg['id'] for sg in sg_list]) + + else: + # Nova does not list all tenants by default + sg_list = self.operator_cloud.list_security_groups() + self.assertIn(sg2['id'], [sg['id'] for sg in sg_list]) + self.assertNotIn(sg1['id'], [sg['id'] for sg in sg_list]) + + sg_list = self.operator_cloud.list_security_groups( + filters={'all_tenants': 1} + ) + self.assertIn(sg1['id'], [sg['id'] for sg in sg_list]) + + def test_get_security_group_by_id(self): + sg = self.user_cloud.create_security_group(name='sg', description='sg') + self.addCleanup(self.user_cloud.delete_security_group, sg['id']) + + ret_sg = self.user_cloud.get_security_group_by_id(sg['id']) + self.assertEqual(sg, ret_sg) diff --git a/openstack/tests/functional/cloud/test_server_group.py b/openstack/tests/functional/cloud/test_server_group.py new file mode 100644 index 0000000000..9ec0f360ee --- /dev/null +++ b/openstack/tests/functional/cloud/test_server_group.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_server_group +---------------------------------- + +Functional tests for server_group resource. +""" + +from openstack.tests.functional import base + + +class TestServerGroup(base.BaseFunctionalTest): + def test_server_group(self): + server_group_name = self.getUniqueString() + self.addCleanup(self.cleanup, server_group_name) + server_group = self.user_cloud.create_server_group( + server_group_name, ['affinity'] + ) + + server_group_ids = [ + v['id'] for v in self.user_cloud.list_server_groups() + ] + self.assertIn(server_group['id'], server_group_ids) + + self.user_cloud.delete_server_group(server_group_name) + + def cleanup(self, server_group_name): + server_group = self.user_cloud.get_server_group(server_group_name) + if server_group: + self.user_cloud.delete_server_group(server_group['id']) diff --git a/openstack/tests/functional/cloud/test_services.py b/openstack/tests/functional/cloud/test_services.py new file mode 100644 index 0000000000..48df7cd9f3 --- /dev/null +++ b/openstack/tests/functional/cloud/test_services.py @@ -0,0 +1,131 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_services +---------------------------------- + +Functional tests for service resource. +""" + +import random +import string + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestServices(base.KeystoneBaseFunctionalTest): + service_attributes = ['id', 'name', 'type', 'description'] + + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + # Generate a random name for services in this test + self.new_service_name = 'test_' + ''.join( + random.choice(string.ascii_lowercase) for _ in range(5) + ) + + self.addCleanup(self._cleanup_services) + + def _cleanup_services(self): + exception_list = list() + for s in self.operator_cloud.list_services(): + if s['name'] is not None and s['name'].startswith( + self.new_service_name + ): + try: + self.operator_cloud.delete_service(name_or_id=s['id']) + except Exception as e: + # We were unable to delete a service, let's try with next + exception_list.append(str(e)) + continue + if exception_list: + # Raise an error: we must make users aware that something went + # wrong + raise exceptions.SDKException('\n'.join(exception_list)) + + def test_create_service(self): + service = self.operator_cloud.create_service( + name=self.new_service_name + '_create', + type='test_type', + description='this is a test description', + ) + self.assertIsNotNone(service.get('id')) + + def test_update_service(self): + service = self.operator_cloud.create_service( + name=self.new_service_name + '_create', + type='test_type', + description='this is a test description', + enabled=True, + ) + new_service = self.operator_cloud.update_service( + service.id, + name=self.new_service_name + '_update', + description='this is an updated description', + enabled=False, + ) + self.assertEqual(new_service.name, self.new_service_name + '_update') + self.assertEqual( + new_service.description, 'this is an updated description' + ) + self.assertFalse(new_service.is_enabled) + self.assertEqual(service.id, new_service.id) + + def test_list_services(self): + service = self.operator_cloud.create_service( + name=self.new_service_name + '_list', type='test_type' + ) + observed_services = self.operator_cloud.list_services() + self.assertIsInstance(observed_services, list) + found = False + for s in observed_services: + # Test all attributes are returned + if s['id'] == service['id']: + self.assertEqual( + self.new_service_name + '_list', s.get('name') + ) + self.assertEqual('test_type', s.get('type')) + found = True + self.assertTrue(found, msg='new service not found in service list!') + + def test_delete_service_by_name(self): + # Test delete by name + service = self.operator_cloud.create_service( + name=self.new_service_name + '_delete_by_name', type='test_type' + ) + self.operator_cloud.delete_service(name_or_id=service['name']) + observed_services = self.operator_cloud.list_services() + found = False + for s in observed_services: + if s['id'] == service['id']: + found = True + break + self.assertEqual(False, found, message='service was not deleted!') + + def test_delete_service_by_id(self): + # Test delete by id + service = self.operator_cloud.create_service( + name=self.new_service_name + '_delete_by_id', type='test_type' + ) + self.operator_cloud.delete_service(name_or_id=service['id']) + observed_services = self.operator_cloud.list_services() + found = False + for s in observed_services: + if s['id'] == service['id']: + found = True + self.assertEqual(False, found, message='service was not deleted!') diff --git a/openstack/tests/functional/cloud/test_stack.py b/openstack/tests/functional/cloud/test_stack.py new file mode 100644 index 0000000000..1a3a17bdc7 --- /dev/null +++ b/openstack/tests/functional/cloud/test_stack.py @@ -0,0 +1,177 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_stack +---------------------------------- + +Functional tests for stack methods. +""" + +import tempfile + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.functional import base + +simple_template = '''heat_template_version: 2014-10-16 +parameters: + length: + type: number + default: 10 + +resources: + my_rand: + type: OS::Heat::RandomString + properties: + length: {get_param: length} +outputs: + rand: + value: + get_attr: [my_rand, value] +''' + +root_template = '''heat_template_version: 2014-10-16 +parameters: + length: + type: number + default: 10 + count: + type: number + default: 5 + +resources: + my_rands: + type: OS::Heat::ResourceGroup + properties: + count: {get_param: count} + resource_def: + type: My::Simple::Template + properties: + length: {get_param: length} +outputs: + rands: + value: + get_attr: [my_rands, attributes, rand] +''' + +environment = ''' +resource_registry: + My::Simple::Template: %s +''' + +validate_template = '''heat_template_version: asdf-no-such-version ''' + + +class TestStack(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.user_cloud.has_service('orchestration'): + self.skipTest('Orchestration service not supported by cloud') + + def _cleanup_stack(self): + self.user_cloud.delete_stack(self.stack_name, wait=True) + self.assertIsNone(self.user_cloud.get_stack(self.stack_name)) + + def test_stack_validation(self): + test_template = tempfile.NamedTemporaryFile(delete=False) + test_template.write(validate_template.encode('utf-8')) + test_template.close() + stack_name = self.getUniqueString('validate_template') + self.assertRaises( + exceptions.SDKException, + self.user_cloud.create_stack, + name=stack_name, + template_file=test_template.name, + ) + + def test_stack_simple(self): + test_template = tempfile.NamedTemporaryFile(delete=False) + test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) + test_template.close() + self.stack_name = self.getUniqueString('simple_stack') + self.addCleanup(self._cleanup_stack) + stack = self.user_cloud.create_stack( + name=self.stack_name, template_file=test_template.name, wait=True + ) + + # assert expected values in stack + self.assertEqual('CREATE_COMPLETE', stack['stack_status']) + rand = stack['outputs'][0]['output_value'] + self.assertEqual(10, len(rand)) + + # assert get_stack matches returned create_stack + stack = self.user_cloud.get_stack(self.stack_name) + self.assertEqual('CREATE_COMPLETE', stack['stack_status']) + self.assertEqual(rand, stack['outputs'][0]['output_value']) + + # assert stack is in list_stacks + stacks = self.user_cloud.list_stacks() + stack_ids = [s['id'] for s in stacks] + self.assertIn(stack['id'], stack_ids) + + # update with no changes + stack = self.user_cloud.update_stack( + self.stack_name, template_file=test_template.name, wait=True + ) + + # assert no change in updated stack + self.assertEqual('UPDATE_COMPLETE', stack['stack_status']) + rand = stack['outputs'][0]['output_value'] + self.assertEqual(rand, stack['outputs'][0]['output_value']) + + # update with changes + stack = self.user_cloud.update_stack( + self.stack_name, + template_file=test_template.name, + wait=True, + length=12, + ) + + # assert changed output in updated stack + stack = self.user_cloud.get_stack(self.stack_name) + self.assertEqual('UPDATE_COMPLETE', stack['stack_status']) + new_rand = stack['outputs'][0]['output_value'] + self.assertNotEqual(rand, new_rand) + self.assertEqual(12, len(new_rand)) + + def test_stack_nested(self): + test_template = tempfile.NamedTemporaryFile( + suffix='.yaml', delete=False + ) + test_template.write(root_template.encode('utf-8')) + test_template.close() + + simple_tmpl = tempfile.NamedTemporaryFile(suffix='.yaml', delete=False) + simple_tmpl.write(fakes.FAKE_TEMPLATE.encode('utf-8')) + simple_tmpl.close() + + env = tempfile.NamedTemporaryFile(suffix='.yaml', delete=False) + expanded_env = environment % simple_tmpl.name + env.write(expanded_env.encode('utf-8')) + env.close() + + self.stack_name = self.getUniqueString('nested_stack') + self.addCleanup(self._cleanup_stack) + stack = self.user_cloud.create_stack( + name=self.stack_name, + template_file=test_template.name, + environment_files=[env.name], + wait=True, + ) + + # assert expected values in stack + self.assertEqual('CREATE_COMPLETE', stack['stack_status']) + rands = stack['outputs'][0]['output_value'] + self.assertEqual(['0', '1', '2', '3', '4'], sorted(rands.keys())) + for rand in rands.values(): + self.assertEqual(10, len(rand)) diff --git a/openstack/tests/functional/cloud/test_users.py b/openstack/tests/functional/cloud/test_users.py new file mode 100644 index 0000000000..9c21bee0ec --- /dev/null +++ b/openstack/tests/functional/cloud/test_users.py @@ -0,0 +1,178 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_users +---------------------------------- + +Functional tests for user methods. +""" + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestUsers(base.KeystoneBaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + self.user_prefix = self.getUniqueString('user') + self.addCleanup(self._cleanup_users) + + def _cleanup_users(self): + exception_list = list() + for user in self.operator_cloud.list_users(): + if user['name'].startswith(self.user_prefix): + try: + self.operator_cloud.delete_user(user['id']) + except Exception as e: + exception_list.append(str(e)) + continue + + if exception_list: + raise exceptions.SDKException('\n'.join(exception_list)) + + def _create_user(self, **kwargs): + domain = self.operator_cloud.get_domain('default') + return self.operator_cloud.create_user( + domain_id=domain['id'], **kwargs + ) + + def test_list_users(self): + users = self.operator_cloud.list_users() + self.assertIsNotNone(users) + self.assertNotEqual([], users) + + def test_get_user(self): + user = self.operator_cloud.get_user('admin') + self.assertIsNotNone(user) + self.assertIn('id', user) + self.assertIn('name', user) + self.assertEqual('admin', user['name']) + + def test_search_users(self): + users = self.operator_cloud.search_users(filters={'is_enabled': True}) + self.assertIsNotNone(users) + + def test_search_users_jmespath(self): + users = self.operator_cloud.search_users(filters="[?enabled]") + self.assertIsNotNone(users) + + def test_create_user(self): + user_name = self.user_prefix + '_create' + user_email = 'nobody@nowhere.com' + user = self._create_user(name=user_name, email=user_email) + self.assertIsNotNone(user) + self.assertEqual(user_name, user['name']) + self.assertEqual(user_email, user['email']) + self.assertTrue(user['is_enabled']) + + def test_delete_user(self): + user_name = self.user_prefix + '_delete' + user_email = 'nobody@nowhere.com' + user = self._create_user(name=user_name, email=user_email) + self.assertIsNotNone(user) + self.assertTrue(self.operator_cloud.delete_user(user['id'])) + + def test_delete_user_not_found(self): + self.assertFalse(self.operator_cloud.delete_user('does_not_exist')) + + def test_update_user(self): + user_name = self.user_prefix + '_updatev3' + user_email = 'nobody@nowhere.com' + user = self._create_user(name=user_name, email=user_email) + self.assertIsNotNone(user) + self.assertTrue(user['is_enabled']) + + # Pass some keystone v3 params. This should work no matter which + # version of keystone we are testing against. + new_user = self.operator_cloud.update_user( + user['id'], + name=user_name + '2', + email='somebody@nowhere.com', + enabled=False, + password='secret', + description='', + ) + self.assertIsNotNone(new_user) + self.assertEqual(user['id'], new_user['id']) + self.assertEqual(user_name + '2', new_user['name']) + self.assertEqual('somebody@nowhere.com', new_user['email']) + self.assertFalse(new_user['is_enabled']) + + def test_update_user_password(self): + user_name = self.user_prefix + '_password' + user_email = 'nobody@nowhere.com' + user = self._create_user( + name=user_name, email=user_email, password='old_secret' + ) + self.assertIsNotNone(user) + self.assertTrue(user['enabled']) + + # This should work for both v2 and v3 + new_user = self.operator_cloud.update_user( + user['id'], password='new_secret' + ) + self.assertIsNotNone(new_user) + self.assertEqual(user['id'], new_user['id']) + self.assertEqual(user_name, new_user['name']) + self.assertEqual(user_email, new_user['email']) + self.assertTrue(new_user['enabled']) + self.assertTrue( + self.operator_cloud.grant_role( + 'member', user=user['id'], project='demo', wait=True + ) + ) + self.addCleanup( + self.operator_cloud.revoke_role, + 'member', + user=user['id'], + project='demo', + wait=True, + ) + + new_cloud = self.operator_cloud.connect_as( + user_id=user['id'], password='new_secret', project_name='demo' + ) + + self.assertIsNotNone(new_cloud) + location = new_cloud.current_location + self.assertEqual(location['project']['name'], 'demo') + self.assertIsNotNone(new_cloud.service_catalog) + + def test_users_and_groups(self): + group_name = self.getUniqueString('group') + self.addCleanup(self.operator_cloud.delete_group, group_name) + + # Create a group + group = self.operator_cloud.create_group(group_name, 'test group') + self.assertIsNotNone(group) + + # Create a user + user_name = self.user_prefix + '_ug' + user_email = 'nobody@nowhere.com' + user = self._create_user(name=user_name, email=user_email) + self.assertIsNotNone(user) + + # Add the user to the group + self.operator_cloud.add_user_to_group(user_name, group_name) + self.assertTrue( + self.operator_cloud.is_user_in_group(user_name, group_name) + ) + + # Remove them from the group + self.operator_cloud.remove_user_from_group(user_name, group_name) + self.assertFalse( + self.operator_cloud.is_user_in_group(user_name, group_name) + ) diff --git a/openstack/tests/functional/cloud/test_volume.py b/openstack/tests/functional/cloud/test_volume.py new file mode 100644 index 0000000000..727e38fcf6 --- /dev/null +++ b/openstack/tests/functional/cloud/test_volume.py @@ -0,0 +1,166 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_volume +---------------------------------- + +Functional tests for block storage methods. +""" + +from fixtures import TimeoutException +from testtools import content + +from openstack import exceptions +from openstack.tests.functional import base +from openstack import utils + + +class TestVolume(base.BaseFunctionalTest): + # Creating and deleting volumes is slow + TIMEOUT_SCALING_FACTOR = 1.5 + + def setUp(self): + super().setUp() + self.skipTest('Volume functional tests temporarily disabled') + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + + def test_volumes(self): + '''Test volume and snapshot functionality''' + volume_name = self.getUniqueString() + snapshot_name = self.getUniqueString() + self.addDetail('volume', content.text_content(volume_name)) + self.addCleanup(self.cleanup, volume_name, snapshot_name=snapshot_name) + volume = self.user_cloud.create_volume( + display_name=volume_name, size=1 + ) + snapshot = self.user_cloud.create_volume_snapshot( + volume['id'], display_name=snapshot_name + ) + + ret_volume = self.user_cloud.get_volume_by_id(volume['id']) + self.assertEqual(volume['id'], ret_volume['id']) + + volume_ids = [v['id'] for v in self.user_cloud.list_volumes()] + self.assertIn(volume['id'], volume_ids) + + snapshot_list = self.user_cloud.list_volume_snapshots() + snapshot_ids = [s['id'] for s in snapshot_list] + self.assertIn(snapshot['id'], snapshot_ids) + + ret_snapshot = self.user_cloud.get_volume_snapshot_by_id( + snapshot['id'] + ) + self.assertEqual(snapshot['id'], ret_snapshot['id']) + + self.user_cloud.delete_volume_snapshot(snapshot_name, wait=True) + self.user_cloud.delete_volume(volume_name, wait=True) + + def test_volume_to_image(self): + '''Test volume export to image functionality''' + volume_name = self.getUniqueString() + image_name = self.getUniqueString() + self.addDetail('volume', content.text_content(volume_name)) + self.addCleanup(self.cleanup, volume_name, image_name=image_name) + volume = self.user_cloud.create_volume( + display_name=volume_name, size=1 + ) + image = self.user_cloud.create_image( + image_name, volume=volume, wait=True + ) + + volume_ids = [v['id'] for v in self.user_cloud.list_volumes()] + self.assertIn(volume['id'], volume_ids) + + image_list = self.user_cloud.list_images() + image_ids = [s['id'] for s in image_list] + self.assertIn(image['id'], image_ids) + + self.user_cloud.delete_image(image_name, wait=True) + self.user_cloud.delete_volume(volume_name, wait=True) + + def cleanup(self, volume, snapshot_name=None, image_name=None): + # Need to delete snapshots before volumes + if snapshot_name: + snapshot = self.user_cloud.get_volume_snapshot(snapshot_name) + if snapshot: + self.user_cloud.delete_volume_snapshot( + snapshot_name, wait=True + ) + if image_name: + image = self.user_cloud.get_image(image_name) + if image: + self.user_cloud.delete_image(image_name, wait=True) + if not isinstance(volume, list): + self.user_cloud.delete_volume(volume, wait=True) + else: + # We have more than one volume to clean up - submit all of the + # deletes without wait, then poll until none of them are found + # in the volume list anymore + for v in volume: + self.user_cloud.delete_volume(v, wait=False) + try: + for count in utils.iterate_timeout( + 180, "Timeout waiting for volume cleanup" + ): + found = False + for existing in self.user_cloud.list_volumes(): + for v in volume: + if v['id'] == existing['id']: + found = True + break + if found: + break + if not found: + break + except (exceptions.ResourceTimeout, TimeoutException): + # NOTE(slaweq): ups, some volumes are still not removed + # so we should try to force delete it once again and move + # forward + for existing in self.user_cloud.list_volumes(): + for v in volume: + if v['id'] == existing['id']: + self.operator_cloud.delete_volume( + v, wait=False, force=True + ) + + def test_list_volumes_pagination(self): + '''Test pagination for list volumes functionality''' + + volumes = [] + # the number of created volumes needs to be higher than + # CONF.osapi_max_limit but not higher than volume quotas for + # the test user in the tenant(default quotas is set to 10) + num_volumes = 8 + for i in range(num_volumes): + name = self.getUniqueString() + v = self.user_cloud.create_volume(display_name=name, size=1) + volumes.append(v) + self.addCleanup(self.cleanup, volumes) + result = [] + for v in self.user_cloud.list_volumes(): + if v['name'] and v['name'].startswith(self.id()): + result.append(v['id']) + self.assertEqual(sorted([v['id'] for v in volumes]), sorted(result)) + + def test_update_volume(self): + name, desc = self.getUniqueString('name'), self.getUniqueString('desc') + self.addCleanup(self.cleanup, name) + volume = self.user_cloud.create_volume(1, name=name, description=desc) + self.assertEqual(volume.name, name) + self.assertEqual(volume.description, desc) + new_name = self.getUniqueString('name') + volume = self.user_cloud.update_volume(volume.id, name=new_name) + self.assertNotEqual(volume.name, name) + self.assertEqual(volume.name, new_name) + self.assertEqual(volume.description, desc) diff --git a/openstack/tests/functional/cloud/test_volume_backup.py b/openstack/tests/functional/cloud/test_volume_backup.py new file mode 100644 index 0000000000..acf6c78129 --- /dev/null +++ b/openstack/tests/functional/cloud/test_volume_backup.py @@ -0,0 +1,121 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.tests.functional import base + + +class TestVolume(base.BaseFunctionalTest): + # Creating a volume backup is incredibly slow. + TIMEOUT_SCALING_FACTOR = 1.5 + + def setUp(self): + super().setUp() + self.skipTest('Volume functional tests temporarily disabled') + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + + if not self.user_cloud.has_service('object-store'): + self.skipTest('volume backups require swift') + + def test_create_get_delete_volume_backup(self): + volume = self.user_cloud.create_volume( + display_name=self.getUniqueString(), size=1 + ) + self.addCleanup(self.user_cloud.delete_volume, volume['id']) + + backup_name_1 = self.getUniqueString() + backup_desc_1 = self.getUniqueString() + backup = self.user_cloud.create_volume_backup( + volume_id=volume['id'], + name=backup_name_1, + description=backup_desc_1, + wait=True, + ) + self.assertEqual(backup_name_1, backup['name']) + + backup = self.user_cloud.get_volume_backup(backup['id']) + self.assertEqual("available", backup['status']) + self.assertEqual(backup_desc_1, backup['description']) + + self.user_cloud.delete_volume_backup(backup['id'], wait=True) + self.assertIsNone(self.user_cloud.get_volume_backup(backup['id'])) + + def test_create_get_delete_volume_backup_from_snapshot(self): + volume = self.user_cloud.create_volume(size=1) + snapshot = self.user_cloud.create_volume_snapshot(volume['id']) + self.addCleanup(self.user_cloud.delete_volume, volume['id']) + self.addCleanup( + self.user_cloud.delete_volume_snapshot, snapshot['id'], wait=True + ) + + backup = self.user_cloud.create_volume_backup( + volume_id=volume['id'], snapshot_id=snapshot['id'], wait=True + ) + + backup = self.user_cloud.get_volume_backup(backup['id']) + self.assertEqual(backup['snapshot_id'], snapshot['id']) + + self.user_cloud.delete_volume_backup(backup['id'], wait=True) + self.assertIsNone(self.user_cloud.get_volume_backup(backup['id'])) + + def test_create_get_delete_incremental_volume_backup(self): + volume = self.user_cloud.create_volume(size=1) + self.addCleanup(self.user_cloud.delete_volume, volume['id']) + + full_backup = self.user_cloud.create_volume_backup( + volume_id=volume['id'], wait=True + ) + incr_backup = self.user_cloud.create_volume_backup( + volume_id=volume['id'], incremental=True, wait=True + ) + + full_backup = self.user_cloud.get_volume_backup(full_backup['id']) + incr_backup = self.user_cloud.get_volume_backup(incr_backup['id']) + self.assertEqual(full_backup['has_dependent_backups'], True) + self.assertEqual(incr_backup['is_incremental'], True) + + self.user_cloud.delete_volume_backup(incr_backup['id'], wait=True) + self.user_cloud.delete_volume_backup(full_backup['id'], wait=True) + self.assertIsNone(self.user_cloud.get_volume_backup(full_backup['id'])) + self.assertIsNone(self.user_cloud.get_volume_backup(incr_backup['id'])) + + def test_list_volume_backups(self): + vol1 = self.user_cloud.create_volume( + display_name=self.getUniqueString(), size=1 + ) + self.addCleanup(self.user_cloud.delete_volume, vol1['id']) + + # We create 2 volumes to create 2 backups. We could have created 2 + # backups from the same volume but taking 2 successive backups seems + # to be race-condition prone. And I didn't want to use an ugly sleep() + # here. + vol2 = self.user_cloud.create_volume( + display_name=self.getUniqueString(), size=1 + ) + self.addCleanup(self.user_cloud.delete_volume, vol2['id']) + + backup_name_1 = self.getUniqueString() + backup = self.user_cloud.create_volume_backup( + volume_id=vol1['id'], name=backup_name_1 + ) + self.addCleanup(self.user_cloud.delete_volume_backup, backup['id']) + + backup = self.user_cloud.create_volume_backup(volume_id=vol2['id']) + self.addCleanup(self.user_cloud.delete_volume_backup, backup['id']) + + backups = self.user_cloud.list_volume_backups() + self.assertEqual(2, len(backups)) + + backups = self.user_cloud.list_volume_backups( + filters={"name": backup_name_1} + ) + self.assertEqual(1, len(backups)) + self.assertEqual(backup_name_1, backups[0]['name']) diff --git a/openstack/tests/functional/cloud/test_volume_type.py b/openstack/tests/functional/cloud/test_volume_type.py new file mode 100644 index 0000000000..bfdf011c57 --- /dev/null +++ b/openstack/tests/functional/cloud/test_volume_type.py @@ -0,0 +1,122 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_volume +---------------------------------- + +Functional tests for block storage methods. +""" + +import testtools + +from openstack import exceptions +from openstack.tests.functional import base + + +class TestVolumeType(base.BaseFunctionalTest): + def _assert_project(self, volume_name_or_id, project_id, allowed=True): + acls = self.operator_cloud.get_volume_type_access(volume_name_or_id) + allowed_projects = [x.get('project_id') for x in acls] + self.assertEqual(allowed, project_id in allowed_projects) + + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + if not self.user_cloud.has_service('volume'): + self.skipTest('volume service not supported by cloud') + volume_type = { + "name": 'test-volume-type', + "description": None, + "os-volume-type-access:is_public": False, + } + self.operator_cloud.block_storage.post( + '/types', json={'volume_type': volume_type} + ) + + def tearDown(self): + ret = self.operator_cloud.get_volume_type('test-volume-type') + if ret.get('id'): + self.operator_cloud.block_storage.delete(f'/types/{ret.id}') + super().tearDown() + + def test_list_volume_types(self): + volume_types = self.operator_cloud.list_volume_types() + self.assertTrue(volume_types) + self.assertTrue( + any(x for x in volume_types if x.name == 'test-volume-type') + ) + + def test_add_remove_volume_type_access(self): + volume_type = self.operator_cloud.get_volume_type('test-volume-type') + self.assertEqual('test-volume-type', volume_type.name) + + self.operator_cloud.add_volume_type_access( + 'test-volume-type', self.operator_cloud.current_project_id + ) + self._assert_project( + 'test-volume-type', + self.operator_cloud.current_project_id, + allowed=True, + ) + + self.operator_cloud.remove_volume_type_access( + 'test-volume-type', self.operator_cloud.current_project_id + ) + self._assert_project( + 'test-volume-type', + self.operator_cloud.current_project_id, + allowed=False, + ) + + def test_add_volume_type_access_missing_project(self): + # Project id is not valitaded and it may not exist. + self.operator_cloud.add_volume_type_access( + 'test-volume-type', '00000000000000000000000000000000' + ) + + self.operator_cloud.remove_volume_type_access( + 'test-volume-type', '00000000000000000000000000000000' + ) + + def test_add_volume_type_access_missing_volume(self): + with testtools.ExpectedException( + exceptions.SDKException, "VolumeType not found.*" + ): + self.operator_cloud.add_volume_type_access( + 'MISSING_VOLUME_TYPE', self.operator_cloud.current_project_id + ) + + def test_remove_volume_type_access_missing_volume(self): + with testtools.ExpectedException( + exceptions.SDKException, "VolumeType not found.*" + ): + self.operator_cloud.remove_volume_type_access( + 'MISSING_VOLUME_TYPE', self.operator_cloud.current_project_id + ) + + def test_add_volume_type_access_bad_project(self): + with testtools.ExpectedException( + exceptions.BadRequestException, "Unable to authorize.*" + ): + self.operator_cloud.add_volume_type_access( + 'test-volume-type', 'BAD_PROJECT_ID' + ) + + def test_remove_volume_type_access_missing_project(self): + with testtools.ExpectedException( + exceptions.NotFoundException, "Unable to revoke.*" + ): + self.operator_cloud.remove_volume_type_access( + 'test-volume-type', '00000000000000000000000000000000' + ) diff --git a/openstack/tests/functional/cloud/test_zone.py b/openstack/tests/functional/cloud/test_zone.py new file mode 100644 index 0000000000..0542da6942 --- /dev/null +++ b/openstack/tests/functional/cloud/test_zone.py @@ -0,0 +1,87 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_zone +---------------------------------- + +Functional tests for zone methods. +""" + +from testtools import content + +from openstack.tests.functional import base + + +class TestZone(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.user_cloud.has_service('dns'): + self.skipTest('dns service not supported by cloud') + + def test_zones(self): + '''Test DNS zones functionality''' + name = 'example.net.' + zone_type = 'primary' + email = 'test@example.net' + description = 'Test zone' + ttl = 3600 + masters = None + + self.addDetail('zone', content.text_content(name)) + self.addCleanup(self.cleanup, name) + + # Test we can create a zone and we get it returned + zone = self.user_cloud.create_zone( + name=name, + zone_type=zone_type, + email=email, + description=description, + ttl=ttl, + masters=masters, + ) + self.assertEqual(zone['name'], name) + self.assertEqual(zone['type'], zone_type.upper()) + self.assertEqual(zone['email'], email) + self.assertEqual(zone['description'], description) + self.assertEqual(zone['ttl'], ttl) + self.assertEqual(zone['masters'], []) + + # Test that we can list zones + zones = self.user_cloud.list_zones() + self.assertIsNotNone(zones) + + # Test we get the same zone with the get_zone method + zone_get = self.user_cloud.get_zone(zone['id']) + self.assertEqual(zone_get['id'], zone['id']) + + # Test the get method also works by name + zone_get = self.user_cloud.get_zone(name) + self.assertEqual(zone_get['name'], zone['name']) + + # Test we can update a field on the zone and only that field + # is updated + zone_update = self.user_cloud.update_zone(zone['id'], ttl=7200) + self.assertEqual(zone_update['id'], zone['id']) + self.assertEqual(zone_update['name'], zone['name']) + self.assertEqual(zone_update['type'], zone['type']) + self.assertEqual(zone_update['email'], zone['email']) + self.assertEqual(zone_update['description'], zone['description']) + self.assertEqual(zone_update['ttl'], 7200) + self.assertEqual(zone_update['masters'], zone['masters']) + + # Test we can delete and get True returned + zone_delete = self.user_cloud.delete_zone(zone['id']) + self.assertTrue(zone_delete) + + def cleanup(self, name): + self.user_cloud.delete_zone(name) diff --git a/openstack/tests/functional/clustering/__init__.py b/openstack/tests/functional/clustering/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/clustering/v1/__init__.py b/openstack/tests/functional/clustering/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/clustering/v1/test_cluster.py b/openstack/tests/functional/clustering/v1/test_cluster.py new file mode 100644 index 0000000000..63bae64c80 --- /dev/null +++ b/openstack/tests/functional/clustering/v1/test_cluster.py @@ -0,0 +1,141 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import time + +from openstack.clustering.v1 import cluster +from openstack.tests.functional import base +from openstack.tests.functional.network.v2 import test_network + + +class TestCluster(base.BaseFunctionalTest): + _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_CLUSTER' + + def setUp(self): + super().setUp() + self.require_service('clustering') + + self.cidr = '10.99.99.0/16' + + self.network, self.subnet = test_network.create_network( + self.operator_cloud, self.getUniqueString(), self.cidr + ) + self.assertIsNotNone(self.network) + + profile_attrs = { + 'name': self.getUniqueString(), + 'spec': { + 'type': 'os.nova.server', + 'version': 1.0, + 'properties': { + 'name': self.getUniqueString(), + 'flavor': self.flavor.name, + 'image': self.image.name, + 'networks': [{'network': self.network.id}], + }, + }, + } + + self.profile = self.operator_cloud.clustering.create_profile( + **profile_attrs + ) + self.assertIsNotNone(self.profile) + + self.cluster_name = self.getUniqueString() + cluster_spec = { + "name": self.cluster_name, + "profile_id": self.profile.name, + "min_size": 0, + "max_size": -1, + "desired_capacity": 0, + } + + self.cluster = self.operator_cloud.clustering.create_cluster( + **cluster_spec + ) + self.operator_cloud.clustering.wait_for_status( + self.cluster, 'ACTIVE', wait=self._wait_for_timeout + ) + assert isinstance(self.cluster, cluster.Cluster) + + def tearDown(self): + if self.cluster: + self.operator_cloud.clustering.delete_cluster(self.cluster.id) + self.operator_cloud.clustering.wait_for_delete( + self.cluster, wait=self._wait_for_timeout + ) + + test_network.delete_network( + self.operator_cloud, self.network, self.subnet + ) + + self.operator_cloud.clustering.delete_profile(self.profile) + + super().tearDown() + + def test_find(self): + sot = self.operator_cloud.clustering.find_cluster(self.cluster.id) + self.assertEqual(self.cluster.id, sot.id) + + def test_get(self): + sot = self.operator_cloud.clustering.get_cluster(self.cluster) + self.assertEqual(self.cluster.id, sot.id) + + def test_list(self): + names = [o.name for o in self.operator_cloud.clustering.clusters()] + self.assertIn(self.cluster_name, names) + + def test_update(self): + new_cluster_name = self.getUniqueString() + sot = self.operator_cloud.clustering.update_cluster( + self.cluster, name=new_cluster_name, profile_only=False + ) + + time.sleep(2) + sot = self.operator_cloud.clustering.get_cluster(self.cluster) + self.assertEqual(new_cluster_name, sot.name) + + def test_delete(self): + cluster_delete_action = self.operator_cloud.clustering.delete_cluster( + self.cluster.id + ) + + self.operator_cloud.clustering.wait_for_delete( + self.cluster, wait=self._wait_for_timeout + ) + + action = self.operator_cloud.clustering.get_action( + cluster_delete_action.id + ) + self.assertEqual(action.target_id, self.cluster.id) + self.assertEqual(action.action, 'CLUSTER_DELETE') + self.assertEqual(action.status, 'SUCCEEDED') + + self.cluster = None + + def test_force_delete(self): + cluster_delete_action = self.operator_cloud.clustering.delete_cluster( + self.cluster.id, False, True + ) + + self.operator_cloud.clustering.wait_for_delete( + self.cluster, wait=self._wait_for_timeout + ) + + action = self.operator_cloud.clustering.get_action( + cluster_delete_action.id + ) + self.assertEqual(action.target_id, self.cluster.id) + self.assertEqual(action.action, 'CLUSTER_DELETE') + self.assertEqual(action.status, 'SUCCEEDED') + + self.cluster = None diff --git a/openstack/tests/functional/compute/v2/base.py b/openstack/tests/functional/compute/v2/base.py new file mode 100644 index 0000000000..00a2a36f7a --- /dev/null +++ b/openstack/tests/functional/compute/v2/base.py @@ -0,0 +1,49 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import _proxy as _block_storage_proxy +from openstack.compute.v2 import _proxy as _compute_proxy +from openstack.tests.functional import base +from openstack import utils + + +class BaseComputeTest(base.BaseFunctionalTest): + _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_COMPUTE' + + admin_compute_client: _compute_proxy.Proxy + compute_client: _compute_proxy.Proxy + + admin_block_storage_client: _block_storage_proxy.Proxy + block_storage_client: _block_storage_proxy.Proxy + + def setUp(self): + super().setUp() + self._set_user_cloud(compute_api_version='2') + if not self.user_cloud.has_service('compute', '2'): + self.skipTest('compute service not supported by cloud') + + self.admin_compute_client = utils.ensure_service_version( + self.operator_cloud.compute, '2' + ) + self.compute_client = utils.ensure_service_version( + self.user_cloud.compute, '2' + ) + + if not self.user_cloud.has_service('block-storage', '3'): + self.skipTest('block-storage service not supported by cloud') + + self.admin_block_storage_client = utils.ensure_service_version( + self.operator_cloud.block_storage, '3' + ) + self.block_storage_client = utils.ensure_service_version( + self.user_cloud.block_storage, '3' + ) diff --git a/openstack/tests/functional/compute/v2/test_extension.py b/openstack/tests/functional/compute/v2/test_extension.py index c60ead5147..547579a342 100644 --- a/openstack/tests/functional/compute/v2/test_extension.py +++ b/openstack/tests/functional/compute/v2/test_extension.py @@ -10,18 +10,15 @@ # License for the specific language governing permissions and limitations # under the License. -import six +from openstack.tests.functional.compute.v2 import base -from openstack.tests.functional import base - - -class TestExtension(base.BaseFunctionalTest): +class TestExtension(base.BaseComputeTest): def test_list(self): - extensions = list(self.conn.compute.extensions()) + extensions = list(self.admin_compute_client.extensions()) self.assertGreater(len(extensions), 0) for ext in extensions: - self.assertIsInstance(ext.name, six.string_types) - self.assertIsInstance(ext.namespace, six.string_types) - self.assertIsInstance(ext.alias, six.string_types) + self.assertIsInstance(ext.name, str) + self.assertIsInstance(ext.namespace, str) + self.assertIsInstance(ext.alias, str) diff --git a/openstack/tests/functional/compute/v2/test_flavor.py b/openstack/tests/functional/compute/v2/test_flavor.py index ec3f0bed5c..5fd64ec5f3 100644 --- a/openstack/tests/functional/compute/v2/test_flavor.py +++ b/openstack/tests/functional/compute/v2/test_flavor.py @@ -10,45 +10,179 @@ # License for the specific language governing permissions and limitations # under the License. -import six +import uuid -from openstack import exceptions -from openstack.tests.functional import base +from openstack.compute.v2 import flavor as _flavor +from openstack.tests.functional.compute.v2 import base -class TestFlavor(base.BaseFunctionalTest): +class TestFlavor(base.BaseComputeTest): + def setUp(self): + super().setUp() - @classmethod - def setUpClass(cls): - super(TestFlavor, cls).setUpClass() + self.public_flavor_name = uuid.uuid4().hex + self.private_flavor_name = uuid.uuid4().hex - cls.one_flavor = list(cls.conn.compute.flavors())[0] + def _delete_flavor(self, flavor): + ret = self.admin_compute_client.delete_flavor(flavor) + self.assertIsNone(ret) - def test_flavors(self): - flavors = list(self.conn.compute.flavors()) - self.assertGreater(len(flavors), 0) + def test_flavor(self): + # create flavors + # + # create a public and private flavor so we can test that they are both + # listed for an operator - for flavor in flavors: - self.assertIsInstance(flavor.id, six.string_types) - self.assertIsInstance(flavor.name, six.string_types) - self.assertIsInstance(flavor.disk, int) - self.assertIsInstance(flavor.ram, int) - self.assertIsInstance(flavor.vcpus, int) + public_flavor = self.admin_compute_client.create_flavor( + name=self.public_flavor_name, + ram=1024, + vcpus=2, + disk=10, + is_public=True, + ) + self.addCleanup(self._delete_flavor, public_flavor) + self.assertIsInstance(public_flavor, _flavor.Flavor) - def test_find_flavors_by_id(self): - rslt = self.conn.compute.find_flavor(self.one_flavor.id) - self.assertEqual(rslt.id, self.one_flavor.id) + private_flavor = self.admin_compute_client.create_flavor( + name=self.private_flavor_name, + ram=1024, + vcpus=2, + disk=10, + is_public=False, + ) + self.addCleanup(self._delete_flavor, private_flavor) + self.assertIsInstance(private_flavor, _flavor.Flavor) - def test_find_flavors_by_name(self): - rslt = self.conn.compute.find_flavor(self.one_flavor.name) - self.assertEqual(rslt.name, self.one_flavor.name) + # list all flavors + # + # flavor list will include the standard devstack flavors. We just want + # to make sure both of the flavors we just created are present. + flavors = list(self.admin_compute_client.flavors()) + self.assertIn(self.public_flavor_name, {x.name for x in flavors}) + self.assertIn(self.private_flavor_name, {x.name for x in flavors}) - def test_find_flavors_no_match_ignore_true(self): - rslt = self.conn.compute.find_flavor("not a flavor", - ignore_missing=True) - self.assertIsNone(rslt) + # get flavor by ID - def test_find_flavors_no_match_ignore_false(self): - self.assertRaises(exceptions.ResourceNotFound, - self.conn.compute.find_flavor, - "not a flavor", ignore_missing=False) + flavor = self.admin_compute_client.get_flavor(public_flavor.id) + self.assertEqual(flavor.id, public_flavor.id) + + # find flavor by name + + flavor = self.admin_compute_client.find_flavor(public_flavor.name) + self.assertEqual(flavor.name, public_flavor.name) + + # update a flavor + + self.admin_compute_client.update_flavor( + public_flavor, + description="updated description", + ) + + # fetch the updated flavor + + flavor = self.admin_compute_client.get_flavor(public_flavor.id) + self.assertEqual(flavor.description, "updated description") + + def test_flavor_access(self): + # create private flavor + + flavor_name = uuid.uuid4().hex + flavor = self.admin_compute_client.create_flavor( + name=flavor_name, ram=128, vcpus=1, disk=0, is_public=False + ) + self.addCleanup(self._delete_flavor, flavor) + self.assertIsInstance(flavor, _flavor.Flavor) + + # validate the 'demo' user cannot see the new flavor + + flavor = self.compute_client.find_flavor( + flavor_name, ignore_missing=True + ) + self.assertIsNone(flavor) + + # validate we can see the new flavor ourselves + + flavor = self.admin_compute_client.find_flavor( + flavor_name, ignore_missing=True + ) + self.assertIsNotNone(flavor) + self.assertEqual(flavor_name, flavor.name) + + # get demo project for access control + + project = self.operator_cloud.get_project('demo') + self.assertIsNotNone(project) + + # give 'demo' access to the flavor + + self.admin_compute_client.flavor_add_tenant_access( + flavor.id, project['id'] + ) + + # verify that the 'demo' user now has access to it + + flavor = self.compute_client.find_flavor( + flavor_name, ignore_missing=True + ) + self.assertIsNotNone(flavor) + + # remove 'demo' access and check we can't find it anymore + + self.admin_compute_client.flavor_remove_tenant_access( + flavor.id, project['id'] + ) + + flavor = self.compute_client.find_flavor( + flavor_name, ignore_missing=True + ) + self.assertIsNone(flavor) + + def test_flavor_extra_specs(self): + # create private flavor + + flavor_name = uuid.uuid4().hex + flavor = self.admin_compute_client.create_flavor( + is_public=False, name=flavor_name, ram=128, vcpus=1, disk=0 + ) + self.addCleanup(self._delete_flavor, flavor) + self.assertIsInstance(flavor, _flavor.Flavor) + + # create extra_specs + + specs = {'a': 'b'} + self.admin_compute_client.create_flavor_extra_specs( + flavor, extra_specs=specs + ) + + # verify specs were created correctly + + flavor_with_specs = self.admin_compute_client.fetch_flavor_extra_specs( + flavor + ) + self.assertDictEqual(specs, flavor_with_specs.extra_specs) + + # update/add a single extra spec property + + self.admin_compute_client.update_flavor_extra_specs_property( + flavor, 'c', 'd' + ) + + # fetch single property value + + prop_value = self.admin_compute_client.get_flavor_extra_specs_property( + flavor, 'c' + ) + self.assertEqual('d', prop_value) + + # delete the new property + + self.admin_compute_client.delete_flavor_extra_specs_property( + flavor, 'c' + ) + + # re-fetch and ensure we're back to the previous state + + flavor_with_specs = self.admin_compute_client.fetch_flavor_extra_specs( + flavor + ) + self.assertDictEqual(specs, flavor_with_specs.extra_specs) diff --git a/openstack/tests/functional/compute/v2/test_hypervisor.py b/openstack/tests/functional/compute/v2/test_hypervisor.py new file mode 100644 index 0000000000..025eb2b944 --- /dev/null +++ b/openstack/tests/functional/compute/v2/test_hypervisor.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.compute.v2 import hypervisor as _hypervisor +from openstack.tests.functional.compute.v2 import base + + +class TestHypervisor(base.BaseComputeTest): + def test_hypervisors(self): + hypervisors = list(self.admin_compute_client.hypervisors()) + self.assertIsNotNone(hypervisors) + + hypervisors = list(self.admin_compute_client.hypervisors(details=True)) + self.assertIsNotNone(hypervisors) + + hypervisor = self.admin_compute_client.get_hypervisor( + hypervisors[0].id + ) + self.assertIsInstance(hypervisor, _hypervisor.Hypervisor) + self.assertEqual(hypervisor.id, hypervisors[0].id) + + hypervisor = self.admin_compute_client.find_hypervisor( + hypervisors[0].name, ignore_missing=False + ) + self.assertIsInstance(hypervisor, _hypervisor.Hypervisor) + self.assertEqual(hypervisor.id, hypervisors[0].id) diff --git a/openstack/tests/functional/compute/v2/test_image.py b/openstack/tests/functional/compute/v2/test_image.py index 09069d697e..61bf751b55 100644 --- a/openstack/tests/functional/compute/v2/test_image.py +++ b/openstack/tests/functional/compute/v2/test_image.py @@ -10,100 +10,106 @@ # License for the specific language governing permissions and limitations # under the License. -import six - -from openstack.tests.functional import base +from openstack.compute.v2 import image as _image +from openstack.tests.functional.compute.v2 import base from openstack.tests.functional.image.v2.test_image import TEST_IMAGE_NAME -class TestImage(base.BaseFunctionalTest): +class TestImage(base.BaseComputeTest): + def setUp(self): + super().setUp() + + # get a non-test image to work with + images = self.admin_compute_client.images() + self.image = next(images) + + if self.image.name == TEST_IMAGE_NAME: + self.image = next(images) - def test_images(self): - images = list(self.conn.compute.images()) + def test_image(self): + # list all images + + images = list(self.admin_compute_client.images()) self.assertGreater(len(images), 0) for image in images: - self.assertIsInstance(image.id, six.string_types) - - def _get_non_test_image(self): - images = self.conn.compute.images() - image = next(images) - - if image.name == TEST_IMAGE_NAME: - image = next(images) - - return image - - def test_find_image(self): - image = self._get_non_test_image() - self.assertIsNotNone(image) - sot = self.conn.compute.find_image(image.id) - self.assertEqual(image.id, sot.id) - self.assertEqual(image.name, sot.name) - - def test_get_image(self): - image = self._get_non_test_image() - self.assertIsNotNone(image) - sot = self.conn.compute.get_image(image.id) - self.assertEqual(image.id, sot.id) - self.assertEqual(image.name, sot.name) - self.assertIsNotNone(image.links) - self.assertIsNotNone(image.min_disk) - self.assertIsNotNone(image.min_ram) - self.assertIsNotNone(image.metadata) - self.assertIsNotNone(image.progress) - self.assertIsNotNone(image.status) + self.assertIsInstance(image.id, str) - def test_image_metadata(self): - image = self._get_non_test_image() + # find image by name + + image = self.admin_compute_client.find_image(self.image.name) + self.assertIsInstance(image, _image.Image) + self.assertEqual(self.image.id, image.id) + self.assertEqual(self.image.name, image.name) + # get image by ID + + image = self.admin_compute_client.get_image(self.image.id) + self.assertIsInstance(image, _image.Image) + self.assertEqual(self.image.id, image.id) + self.assertEqual(self.image.name, image.name) + + def test_image_metadata(self): # delete pre-existing metadata - self.conn.compute.delete_image_metadata(image, image.metadata.keys()) - image = self.conn.compute.get_image_metadata(image) + + self.admin_compute_client.delete_image_metadata( + self.image, self.image.metadata.keys() + ) + image = self.admin_compute_client.get_image_metadata(self.image) self.assertFalse(image.metadata) - # get metadata - image = self.conn.compute.get_image_metadata(image) + # get metadata (should be empty) + + image = self.admin_compute_client.get_image_metadata(self.image) self.assertFalse(image.metadata) # set no metadata - self.conn.compute.set_image_metadata(image) - image = self.conn.compute.get_image_metadata(image) + + self.admin_compute_client.set_image_metadata(self.image) + image = self.admin_compute_client.get_image_metadata(self.image) self.assertFalse(image.metadata) # set empty metadata - self.conn.compute.set_image_metadata(image, k0='') - image = self.conn.compute.get_image_metadata(image) - self.assertFalse(image.metadata) + + self.admin_compute_client.set_image_metadata(self.image, k0='') + image = self.admin_compute_client.get_image_metadata(self.image) + self.assertIn('k0', image.metadata) + self.assertEqual('', image.metadata['k0']) # set metadata - self.conn.compute.set_image_metadata(image, k1='v1') - image = self.conn.compute.get_image_metadata(image) + + self.admin_compute_client.set_image_metadata(self.image, k1='v1') + image = self.admin_compute_client.get_image_metadata(self.image) self.assertTrue(image.metadata) - self.assertEqual(1, len(image.metadata)) + self.assertEqual(2, len(image.metadata)) self.assertIn('k1', image.metadata) self.assertEqual('v1', image.metadata['k1']) # set more metadata - self.conn.compute.set_image_metadata(image, k2='v2') - image = self.conn.compute.get_image_metadata(image) + + self.admin_compute_client.set_image_metadata(self.image, k2='v2') + image = self.admin_compute_client.get_image_metadata(self.image) self.assertTrue(image.metadata) - self.assertEqual(2, len(image.metadata)) + self.assertEqual(3, len(image.metadata)) self.assertIn('k1', image.metadata) self.assertEqual('v1', image.metadata['k1']) self.assertIn('k2', image.metadata) self.assertEqual('v2', image.metadata['k2']) # update metadata - self.conn.compute.set_image_metadata(image, k1='v1.1') - image = self.conn.compute.get_image_metadata(image) + + self.admin_compute_client.set_image_metadata(self.image, k1='v1.1') + image = self.admin_compute_client.get_image_metadata(self.image) self.assertTrue(image.metadata) - self.assertEqual(2, len(image.metadata)) + self.assertEqual(3, len(image.metadata)) self.assertIn('k1', image.metadata) self.assertEqual('v1.1', image.metadata['k1']) self.assertIn('k2', image.metadata) self.assertEqual('v2', image.metadata['k2']) - # delete metadata - self.conn.compute.delete_image_metadata(image, image.metadata.keys()) - image = self.conn.compute.get_image_metadata(image) + # delete all metadata (cleanup) + + self.admin_compute_client.delete_image_metadata( + self.image, image.metadata.keys() + ) + image = self.admin_compute_client.get_image_metadata(self.image) self.assertFalse(image.metadata) diff --git a/openstack/tests/functional/compute/v2/test_keypair.py b/openstack/tests/functional/compute/v2/test_keypair.py index 3b2f2f63ec..c560f92e29 100644 --- a/openstack/tests/functional/compute/v2/test_keypair.py +++ b/openstack/tests/functional/compute/v2/test_keypair.py @@ -10,40 +10,78 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid +from openstack.compute.v2 import keypair as _keypair +from openstack.tests.functional.compute.v2 import base -from openstack.compute.v2 import keypair -from openstack.tests.functional import base +class TestKeypair(base.BaseComputeTest): + def setUp(self): + super().setUp() -class TestKeypair(base.BaseFunctionalTest): + # Keypairs can't have .'s in the name. Because why? + self.keypair_name = self.getUniqueString().split('.')[-1] - NAME = uuid.uuid4().hex - ID = None + def _delete_keypair(self, keypair): + ret = self.compute_client.delete_keypair(keypair) + self.assertIsNone(ret) - @classmethod - def setUpClass(cls): - super(TestKeypair, cls).setUpClass() - sot = cls.conn.compute.create_keypair(name=cls.NAME) - assert isinstance(sot, keypair.Keypair) - cls.assertIs(cls.NAME, sot.name) - cls._keypair = sot - cls.ID = sot.id + def test_keypair(self): + # create the keypair - @classmethod - def tearDownClass(cls): - sot = cls.conn.compute.delete_keypair(cls._keypair) - cls.assertIs(None, sot) + keypair = self.compute_client.create_keypair( + name=self.keypair_name, type='ssh' + ) + self.assertIsInstance(keypair, _keypair.Keypair) + self.assertEqual(self.keypair_name, keypair.name) + self.addCleanup(self._delete_keypair, keypair) - def test_find(self): - sot = self.conn.compute.find_keypair(self.NAME) - self.assertEqual(self.ID, sot.id) + # retrieve details of the keypair by ID - def test_get(self): - sot = self.conn.compute.get_keypair(self.NAME) - self.assertEqual(self.NAME, sot.name) - self.assertEqual(self.ID, sot.id) + keypair = self.compute_client.get_keypair(self.keypair_name) + self.assertIsInstance(keypair, _keypair.Keypair) + self.assertEqual(self.keypair_name, keypair.name) + self.assertEqual(self.keypair_name, keypair.id) + self.assertEqual('ssh', keypair.type) - def test_list(self): - names = [o.name for o in self.conn.compute.keypairs()] - self.assertIn(self.NAME, names) + # retrieve details of the keypair by name + + keypair = self.compute_client.find_keypair(self.keypair_name) + self.assertIsInstance(keypair, _keypair.Keypair) + self.assertEqual(self.keypair_name, keypair.name) + self.assertEqual(self.keypair_name, keypair.id) + + # list all keypairs + + keypairs = list(self.compute_client.keypairs()) + self.assertIsInstance(keypair, _keypair.Keypair) + self.assertIn(self.keypair_name, {x.name for x in keypairs}) + + +class TestKeypairAdmin(base.BaseComputeTest): + def setUp(self): + super().setUp() + + self.keypair_name = self.getUniqueString().split('.')[-1] + self.user = self.operator_cloud.list_users()[0] + + def _delete_keypair(self, keypair): + ret = self.compute_client.delete_keypair(keypair) + self.assertIsNone(ret) + + def test_keypair(self): + # create the keypair (for another user) + keypair = self.admin_compute_client.create_keypair( + name=self.keypair_name, user_id=self.user.id + ) + self.assertIsInstance(keypair, _keypair.Keypair) + self.assertEqual(self.keypair_name, keypair.name) + self.addCleanup(self._delete_keypair, keypair) + + # retrieve details of the keypair by ID (for another user) + + keypair = self.admin_compute_client.get_keypair( + self.keypair_name, user_id=self.user.id + ) + self.assertEqual(self.keypair_name, keypair.name) + self.assertEqual(self.keypair_name, keypair.id) + self.assertEqual(self.user.id, keypair.user_id) diff --git a/openstack/tests/functional/compute/v2/test_limits.py b/openstack/tests/functional/compute/v2/test_limits.py index 8291b1c84c..51757c194d 100644 --- a/openstack/tests/functional/compute/v2/test_limits.py +++ b/openstack/tests/functional/compute/v2/test_limits.py @@ -10,15 +10,14 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack.tests.functional import base +from openstack.tests.functional.compute.v2 import base -class TestLimits(base.BaseFunctionalTest): - +class TestLimits(base.BaseComputeTest): def test_limits(self): - sot = self.conn.compute.get_limits() - self.assertIsNotNone('maxTotalInstances', sot.absolute) - self.assertIsNotNone('maxTotalRAMSize', sot.absolute) - self.assertIsNotNone('maxTotalKeypairs', sot.absolute) - self.assertIsNotNone('maxSecurityGroups', sot.absolute) - self.assertIsNotNone('maxSecurityGroupRules', sot.absolute) + sot = self.admin_compute_client.get_limits() + self.assertIsNotNone(sot.absolute['instances']) + self.assertIsNotNone(sot.absolute['total_ram']) + self.assertIsNotNone(sot.absolute['keypairs']) + self.assertIsNotNone(sot.absolute['security_groups']) + self.assertIsNotNone(sot.absolute['security_group_rules']) diff --git a/openstack/tests/functional/compute/v2/test_quota_set.py b/openstack/tests/functional/compute/v2/test_quota_set.py new file mode 100644 index 0000000000..276bd53fd1 --- /dev/null +++ b/openstack/tests/functional/compute/v2/test_quota_set.py @@ -0,0 +1,48 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.compute.v2 import quota_set as _quota_set +from openstack.tests.functional.compute.v2 import base + + +class TestQuotaSet(base.BaseComputeTest): + def setUp(self): + super().setUp() + + self.project = self.create_temporary_project() + + def test_quota_set(self): + # update quota + + quota_set = self.admin_compute_client.update_quota_set( + self.project.id, key_pairs=123 + ) + self.assertIsInstance(quota_set, _quota_set.QuotaSet) + self.assertEqual(quota_set.key_pairs, 123) + + # retrieve details of the (updated) quota + + quota_set = self.admin_compute_client.get_quota_set(self.project.id) + self.assertIsInstance(quota_set, _quota_set.QuotaSet) + self.assertEqual(quota_set.key_pairs, 123) + + # retrieve quota defaults + + defaults = self.admin_compute_client.get_quota_set_defaults( + self.project.id + ) + self.assertIsInstance(defaults, _quota_set.QuotaSet) + self.assertNotEqual(defaults.key_pairs, 123) + + # revert quota + + self.admin_compute_client.revert_quota_set(self.project.id) diff --git a/openstack/tests/functional/compute/v2/test_server.py b/openstack/tests/functional/compute/v2/test_server.py index 795e8180da..8c76e38e68 100644 --- a/openstack/tests/functional/compute/v2/test_server.py +++ b/openstack/tests/functional/compute/v2/test_server.py @@ -10,120 +10,229 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid - -from openstack.compute.v2 import server -from openstack.tests.functional import base +from openstack.compute.v2 import server as _server +from openstack.tests.functional.compute.v2 import base from openstack.tests.functional.network.v2 import test_network -class TestServer(base.BaseFunctionalTest): - - NAME = uuid.uuid4().hex - server = None - network = None - subnet = None - cidr = '10.99.99.0/16' - - @classmethod - def setUpClass(cls): - super(TestServer, cls).setUpClass() - flavor = cls.conn.compute.find_flavor(base.FLAVOR_NAME, - ignore_missing=False) - image = cls.conn.compute.find_image(base.IMAGE_NAME, - ignore_missing=False) - cls.network, cls.subnet = test_network.create_network(cls.conn, - cls.NAME, - cls.cidr) - if not cls.network: - # We can't call TestCase.fail from within the setUpClass - # classmethod, but we need to raise some exception in order - # to get this setup to fail and thusly fail the entire class. - raise Exception("Unable to create network for TestServer") - - sot = cls.conn.compute.create_server( - name=cls.NAME, flavor_id=flavor.id, image_id=image.id, - networks=[{"uuid": cls.network.id}]) - cls.conn.compute.wait_for_server(sot) - assert isinstance(sot, server.Server) - cls.assertIs(cls.NAME, sot.name) - cls.server = sot - - @classmethod - def tearDownClass(cls): - sot = cls.conn.compute.delete_server(cls.server.id) - cls.assertIs(None, sot) +class TestServerAdmin(base.BaseComputeTest): + def setUp(self): + super().setUp() + self.server_name = 'needstobeshortandlowercase' + self.user_data = 'SSdtIGFjdHVhbGx5IGEgZ29hdC4=' + + def _delete_server(self, server): + sot = self.admin_compute_client.delete_server(server.id) + self.admin_compute_client.wait_for_delete( + server, wait=self._wait_for_timeout + ) + self.assertIsNone(sot) + + def test_server(self): + # create server with volume + volume = self.admin_block_storage_client.create_volume(size=1) + self.admin_block_storage_client.wait_for_status( + volume, wait=self._wait_for_timeout + ) + server = self.admin_compute_client.create_server( + name=self.server_name, + flavor_id=self.flavor.id, + image_id=self.image.id, + networks='none', + user_data=self.user_data, + block_device_mapping=[ + { + 'uuid': volume.id, + 'source_type': 'volume', + 'boot_index': 0, + 'destination_type': 'volume', + 'delete_on_termination': True, + 'volume_size': 1, + }, + ], + ) + self.admin_compute_client.wait_for_server( + server, wait=self._wait_for_timeout + ) + self.assertIsInstance(server, _server.Server) + self.assertEqual(self.server_name, server.name) + self.addCleanup(self._delete_server, server) + + # get server details (admin-specific fields) + + server = self.admin_compute_client.get_server(server.id) + self.assertIsNotNone(server.reservation_id) + self.assertIsNotNone(server.launch_index) + self.assertIsNotNone(server.ramdisk_id) + self.assertIsNotNone(server.kernel_id) + self.assertEqual(self.server_name, server.hostname) + self.assertTrue(server.root_device_name.startswith('/dev')) + self.assertEqual(self.user_data, server.user_data) + self.assertTrue(server.attached_volumes[0]['delete_on_termination']) + + +class TestServer(base.BaseComputeTest): + def setUp(self): + super().setUp() + self.server_name = self.getUniqueString() + self.cidr = '10.99.99.0/16' + + # create network for server + + self.network, self.subnet = test_network.create_network( + self.user_cloud, self.server_name, self.cidr + ) + self.assertIsNotNone(self.network) + self.addCleanup(self._delete_network, self.network, self.subnet) + + def _delete_server(self, server): + sot = self.compute_client.delete_server(server.id) + self.assertIsNone(sot) # Need to wait for the stack to go away before network delete - cls.conn.compute.wait_for_delete(cls.server) - cls.linger_for_delete() - test_network.delete_network(cls.conn, cls.network, cls.subnet) + self.compute_client.wait_for_delete( + server, wait=self._wait_for_timeout + ) + + def _delete_network(self, network, subnet): + test_network.delete_network(self.user_cloud, network, subnet) + + def test_server(self): + # create server + + self.server = self.compute_client.create_server( + name=self.server_name, + flavor_id=self.flavor.id, + image_id=self.image.id, + networks=[{"uuid": self.network.id}], + ) + self.compute_client.wait_for_server( + self.server, wait=self._wait_for_timeout + ) + self.addCleanup(self._delete_server, self.server) + self.assertIsInstance(self.server, _server.Server) + self.assertEqual(self.server_name, self.server.name) + + # find server by name - def test_find(self): - sot = self.conn.compute.find_server(self.NAME) - self.assertEqual(self.server.id, sot.id) + server = self.compute_client.find_server(self.server_name) + self.assertEqual(self.server.id, server.id) - def test_get(self): - sot = self.conn.compute.get_server(self.server.id) - self.assertEqual(self.NAME, sot.name) - self.assertEqual(self.server.id, sot.id) + # get server by ID - def test_list(self): - names = [o.name for o in self.conn.compute.servers()] - self.assertIn(self.NAME, names) + server = self.compute_client.get_server(self.server.id) + self.assertEqual(self.server_name, server.name) + self.assertEqual(self.server.id, server.id) + + # list servers + + server = self.compute_client.servers() + self.assertIn(self.server_name, {x.name for x in server}) def test_server_metadata(self): - test_server = self.conn.compute.get_server(self.server.id) + # create server + + server = self.compute_client.create_server( + name=self.server_name, + flavor_id=self.flavor.id, + image_id=self.image.id, + networks=[{"uuid": self.network.id}], + ) + self.compute_client.wait_for_server( + server, wait=self._wait_for_timeout + ) + self.assertIsInstance(server, _server.Server) + self.addCleanup(self._delete_server, server) + + # get metadata (should be empty initially) - # get metadata - test_server = self.conn.compute.get_server_metadata(test_server) - self.assertFalse(test_server.metadata) + server = self.compute_client.get_server_metadata(server) + self.assertFalse(server.metadata) # set no metadata - self.conn.compute.set_server_metadata(test_server) - test_server = self.conn.compute.get_server_metadata(test_server) - self.assertFalse(test_server.metadata) + + self.compute_client.set_server_metadata(server) + server = self.compute_client.get_server_metadata(server) + self.assertFalse(server.metadata) # set empty metadata - self.conn.compute.set_server_metadata(test_server, k0='') - server = self.conn.compute.get_server_metadata(test_server) + + self.compute_client.set_server_metadata(server, k0='') + server = self.compute_client.get_server_metadata(server) self.assertTrue(server.metadata) # set metadata - self.conn.compute.set_server_metadata(test_server, k1='v1') - test_server = self.conn.compute.get_server_metadata(test_server) - self.assertTrue(test_server.metadata) - self.assertEqual(2, len(test_server.metadata)) - self.assertIn('k0', test_server.metadata) - self.assertEqual('', test_server.metadata['k0']) - self.assertIn('k1', test_server.metadata) - self.assertEqual('v1', test_server.metadata['k1']) + + self.compute_client.set_server_metadata(server, k1='v1') + server = self.compute_client.get_server_metadata(server) + self.assertTrue(server.metadata) + self.assertEqual(2, len(server.metadata)) + self.assertIn('k0', server.metadata) + self.assertEqual('', server.metadata['k0']) + self.assertIn('k1', server.metadata) + self.assertEqual('v1', server.metadata['k1']) # set more metadata - self.conn.compute.set_server_metadata(test_server, k2='v2') - test_server = self.conn.compute.get_server_metadata(test_server) - self.assertTrue(test_server.metadata) - self.assertEqual(3, len(test_server.metadata)) - self.assertIn('k0', test_server.metadata) - self.assertEqual('', test_server.metadata['k0']) - self.assertIn('k1', test_server.metadata) - self.assertEqual('v1', test_server.metadata['k1']) - self.assertIn('k2', test_server.metadata) - self.assertEqual('v2', test_server.metadata['k2']) + + self.compute_client.set_server_metadata(server, k2='v2') + server = self.compute_client.get_server_metadata(server) + self.assertTrue(server.metadata) + self.assertEqual(3, len(server.metadata)) + self.assertIn('k0', server.metadata) + self.assertEqual('', server.metadata['k0']) + self.assertIn('k1', server.metadata) + self.assertEqual('v1', server.metadata['k1']) + self.assertIn('k2', server.metadata) + self.assertEqual('v2', server.metadata['k2']) # update metadata - self.conn.compute.set_server_metadata(test_server, k1='v1.1') - test_server = self.conn.compute.get_server_metadata(test_server) - self.assertTrue(test_server.metadata) - self.assertEqual(3, len(test_server.metadata)) - self.assertIn('k0', test_server.metadata) - self.assertEqual('', test_server.metadata['k0']) - self.assertIn('k1', test_server.metadata) - self.assertEqual('v1.1', test_server.metadata['k1']) - self.assertIn('k2', test_server.metadata) - self.assertEqual('v2', test_server.metadata['k2']) - - # delete metadata - self.conn.compute.delete_server_metadata( - test_server, test_server.metadata.keys()) - test_server = self.conn.compute.get_server_metadata(test_server) - self.assertFalse(test_server.metadata) + + self.compute_client.set_server_metadata(server, k1='v1.1') + server = self.compute_client.get_server_metadata(server) + self.assertTrue(server.metadata) + self.assertEqual(3, len(server.metadata)) + self.assertIn('k0', server.metadata) + self.assertEqual('', server.metadata['k0']) + self.assertIn('k1', server.metadata) + self.assertEqual('v1.1', server.metadata['k1']) + self.assertIn('k2', server.metadata) + self.assertEqual('v2', server.metadata['k2']) + + # delete all metadata (cleanup) + + self.compute_client.delete_server_metadata( + server, server.metadata.keys() + ) + server = self.compute_client.get_server_metadata(server) + self.assertFalse(server.metadata) + + def test_server_remote_console(self): + # create network for server + + network, subnet = test_network.create_network( + self.user_cloud, self.server_name, self.cidr + ) + self.assertIsNotNone(network) + self.addCleanup(self._delete_network, network, subnet) + + # create server + + server = self.compute_client.create_server( + name=self.server_name, + flavor_id=self.flavor.id, + image_id=self.image.id, + networks=[{"uuid": network.id}], + ) + self.compute_client.wait_for_server( + server, wait=self._wait_for_timeout + ) + self.assertIsInstance(server, _server.Server) + self.addCleanup(self._delete_server, server) + + # create remote console + + console = self.compute_client.create_server_remote_console( + server, protocol='vnc', type='novnc' + ) + self.assertEqual('vnc', console.protocol) + self.assertEqual('novnc', console.type) + self.assertTrue(console.url.startswith('http')) diff --git a/openstack/tests/functional/compute/v2/test_service.py b/openstack/tests/functional/compute/v2/test_service.py new file mode 100644 index 0000000000..2062558dc1 --- /dev/null +++ b/openstack/tests/functional/compute/v2/test_service.py @@ -0,0 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.compute.v2 import base + + +class TestService(base.BaseComputeTest): + def test_service(self): + # list all services + services = list(self.admin_compute_client.services()) + self.assertIsNotNone(services) + + # find a service + self.admin_compute_client.find_service( + services[0].name, host=services[0].host, ignore_missing=False + ) diff --git a/openstack/tests/functional/compute/v2/test_volume_attachment.py b/openstack/tests/functional/compute/v2/test_volume_attachment.py new file mode 100644 index 0000000000..37edde41b1 --- /dev/null +++ b/openstack/tests/functional/compute/v2/test_volume_attachment.py @@ -0,0 +1,130 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import volume as _volume +from openstack.compute.v2 import server as _server +from openstack.compute.v2 import volume_attachment as _volume_attachment +from openstack.tests.functional.compute.v2 import base + + +class TestServerVolumeAttachment(base.BaseComputeTest): + def setUp(self): + super().setUp() + + self.server_name = self.getUniqueString() + self.volume_name = self.getUniqueString() + + # create the server and volume + + server = self.compute_client.create_server( + name=self.server_name, + flavor_id=self.flavor.id, + image_id=self.image.id, + networks='none', + ) + self.compute_client.wait_for_server( + server, wait=self._wait_for_timeout + ) + self.addCleanup(self._delete_server, server) + self.assertIsInstance(server, _server.Server) + self.assertEqual(self.server_name, server.name) + + volume = self.block_storage_client.create_volume( + name=self.volume_name, size=1 + ) + self.block_storage_client.wait_for_status( + volume, status='available', wait=self._wait_for_timeout + ) + self.addCleanup(self._delete_volume, volume) + self.assertIsInstance(volume, _volume.Volume) + self.assertEqual(self.volume_name, volume.name) + + self.server = server + self.volume = volume + + def _delete_server(self, server): + self.compute_client.delete_server(server.id) + self.compute_client.wait_for_delete( + server, wait=self._wait_for_timeout + ) + + def _delete_volume(self, volume): + self.block_storage_client.delete_volume(volume.id) + self.block_storage_client.wait_for_delete( + volume, wait=self._wait_for_timeout + ) + + def test_volume_attachment(self): + # create the volume attachment + + volume_attachment = self.compute_client.create_volume_attachment( + self.server, self.volume + ) + self.assertIsInstance( + volume_attachment, _volume_attachment.VolumeAttachment + ) + self.block_storage_client.wait_for_status( + self.volume, status='in-use', wait=self._wait_for_timeout + ) + + # list all attached volume attachments (there should only be one) + + volume_attachments = list( + self.compute_client.volume_attachments(self.server) + ) + self.assertEqual(1, len(volume_attachments)) + self.assertIsInstance( + volume_attachments[0], _volume_attachment.VolumeAttachment + ) + + # update the volume attachment + + volume_attachment = self.compute_client.update_volume_attachment( + self.server, self.volume, delete_on_termination=True + ) + self.assertIsInstance( + volume_attachment, _volume_attachment.VolumeAttachment + ) + + # retrieve details of the (updated) volume attachment + + volume_attachment = self.compute_client.get_volume_attachment( + self.server, self.volume + ) + self.assertIsInstance( + volume_attachment, _volume_attachment.VolumeAttachment + ) + self.assertTrue(volume_attachment.delete_on_termination) + + # delete the volume attachment + + result = self.compute_client.delete_volume_attachment( + self.server, self.volume, ignore_missing=False + ) + self.assertIsNone(result) + + self.block_storage_client.wait_for_status( + self.volume, status='available', wait=self._wait_for_timeout + ) + + # Wait for the attachment to be deleted. + # This is done to prevent a race between the BDM + # record being deleted and we trying to delete the server. + self.compute_client.wait_for_delete( + volume_attachment, wait=self._wait_for_timeout + ) + + # Verify the server doesn't have any volume attachment + volume_attachments = list( + self.compute_client.volume_attachments(self.server) + ) + self.assertEqual(0, len(volume_attachments)) diff --git a/openstack/tests/functional/dns/__init__.py b/openstack/tests/functional/dns/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/dns/v2/__init__.py b/openstack/tests/functional/dns/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/dns/v2/test_blacklist.py b/openstack/tests/functional/dns/v2/test_blacklist.py new file mode 100644 index 0000000000..2ea931ae2e --- /dev/null +++ b/openstack/tests/functional/dns/v2/test_blacklist.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import uuid + +from openstack.dns.v2 import blacklist as _blacklist +from openstack.tests.functional import base + + +class TestBlackList(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + self.require_service('dns') + + # Note: use a unique UUID pattern to avoid test collisions + self.pattern = rf".*\.test-{uuid.uuid4().hex}.com" + self.description = self.getUniqueString('blacklist') + + def _delete_blacklist(self, blacklist): + ret = self.operator_cloud.dns.delete_blacklist(blacklist.id) + self.assertIsNone(ret) + + def test_blacklist(self): + # create blacklist + blacklist = self.operator_cloud.dns.create_blacklist( + pattern=self.pattern, + description=self.description, + ) + self.assertIsNotNone(blacklist.id) + self.assertIsInstance(blacklist, _blacklist.Blacklist) + self.assertEqual(self.pattern, blacklist.pattern) + self.assertEqual(self.description, blacklist.description) + self.addCleanup(self._delete_blacklist, blacklist) + + # update blacklist + blacklist = self.operator_cloud.dns.update_blacklist( + blacklist, pattern=self.pattern, description=self.description + ) + self.assertIsInstance(blacklist, _blacklist.Blacklist) + self.assertEqual(self.pattern, blacklist.pattern) + self.assertEqual(self.description, blacklist.description) + + # get blacklist + blacklist = self.operator_cloud.dns.get_blacklist(blacklist.id) + self.assertIsInstance(blacklist, _blacklist.Blacklist) + self.assertEqual(self.pattern, blacklist.pattern) + self.assertEqual(self.description, blacklist.description) + + # list all blacklists + blacklists = list(self.operator_cloud.dns.blacklists()) + self.assertIsInstance(blacklists[0], _blacklist.Blacklist) + self.assertIn(self.pattern, {x.pattern for x in blacklists}) + self.operator_cloud.dns.delete_blacklist(blacklist.id) diff --git a/openstack/tests/functional/dns/v2/test_quota.py b/openstack/tests/functional/dns/v2/test_quota.py new file mode 100644 index 0000000000..3a642a60ce --- /dev/null +++ b/openstack/tests/functional/dns/v2/test_quota.py @@ -0,0 +1,71 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional import base + + +class TestQuota(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + + self.require_service("dns") + if not self._operator_cloud_name: + self.skip("Operator cloud must be set for this test") + + self.project = self.create_temporary_project() + + def test_quota(self): + # set quota + + attrs = { + "api_export_size": 1, + "recordset_records": 2, + "zone_records": 3, + "zone_recordsets": 4, + "zones": 5, + } + new_quota = self.operator_cloud.dns.update_quota( + self.project.id, **attrs + ) + self.assertEqual(attrs["api_export_size"], new_quota.api_export_size) + self.assertEqual( + attrs["recordset_records"], new_quota.recordset_records + ) + self.assertEqual(attrs["zone_records"], new_quota.zone_records) + self.assertEqual(attrs["zone_recordsets"], new_quota.zone_recordsets) + self.assertEqual(attrs["zones"], new_quota.zones) + + # get quota + + expected_keys = [ + "id", + "api_export_size", + "recordset_records", + "zone_records", + "zone_recordsets", + "zones", + ] + test_quota = self.operator_cloud.dns.get_quota(self.project.id) + for actual_key in test_quota._body.attributes.keys(): + self.assertIn(actual_key, expected_keys) + self.assertEqual(self.project.id, test_quota.id) + self.assertEqual(attrs["api_export_size"], test_quota.api_export_size) + self.assertEqual( + attrs["recordset_records"], test_quota.recordset_records + ) + self.assertEqual(attrs["zone_records"], test_quota.zone_records) + self.assertEqual(attrs["zone_recordsets"], test_quota.zone_recordsets) + self.assertEqual(attrs["zones"], test_quota.zones) + + # reset quota + + self.operator_cloud.dns.delete_quota(self.project.id) diff --git a/openstack/tests/functional/dns/v2/test_service_status.py b/openstack/tests/functional/dns/v2/test_service_status.py new file mode 100644 index 0000000000..0fa6e9c22c --- /dev/null +++ b/openstack/tests/functional/dns/v2/test_service_status.py @@ -0,0 +1,53 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional import base + + +class TestServiceStatus(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + self.require_service('dns') + + self.service_names = [ + "api", + "backend", + "central", + "mdns", + "producer", + "sink", + "storage", + "worker", + ] + self.service_status = ["UP", "DOWN"] + + def test_service_status(self): + service_statuses = list(self.operator_cloud.dns.service_statuses()) + if not service_statuses: + self.skipTest( + "The Service in Designate System is required for this test" + ) + + names = [f.service_name for f in service_statuses] + statuses = [f.status for f in service_statuses] + + self.assertTrue( + all(status in self.service_status for status in statuses) + ) + self.assertTrue(all(name in self.service_names for name in names)) + + # Test that we can fetch a service status + service_status = self.operator_cloud.dns.get_service_status( + service_statuses[0] + ) + self.assertIn(service_status.service_name, self.service_names) + self.assertIn(service_status.status, self.service_status) diff --git a/openstack/tests/functional/dns/v2/test_tld.py b/openstack/tests/functional/dns/v2/test_tld.py new file mode 100644 index 0000000000..ffb40de1be --- /dev/null +++ b/openstack/tests/functional/dns/v2/test_tld.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import tld as _tld +from openstack.tests.functional import base + + +class TestTLD(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + + self.require_service('dns') + + self.tld_name = 'xyz' + self.tld_description = 'The xyz TLD' + + def test_tld(self): + # create the tld + + tld = self.operator_cloud.dns.create_tld( + name=self.tld_name, description=self.tld_description + ) + self.assertIsInstance(tld, _tld.TLD) + self.assertEqual(self.tld_description, tld.description) + self.addCleanup(self.operator_cloud.dns.delete_tld, tld) + + # update the tld + + tld = self.operator_cloud.dns.update_tld( + tld, description=self.tld_description + ) + self.assertIsInstance(tld, _tld.TLD) + self.assertEqual(self.tld_description, tld.description) + + # retrieve details of the (updated) tld by ID + + tld = self.operator_cloud.dns.get_tld(tld.id) + self.assertIsInstance(tld, _tld.TLD) + self.assertEqual(self.tld_description, tld.description) + + # retrieve details of the (updated) tld by name + + tld = self.operator_cloud.dns.find_tld(tld.name) + self.assertIsInstance(tld, _tld.TLD) + self.assertEqual(self.tld_description, tld.description) + + # list all tlds + tlds = list(self.operator_cloud.dns.tlds()) + self.assertIsInstance(tlds[0], _tld.TLD) + self.assertIn( + self.tld_name, {x.name for x in self.operator_cloud.dns.tlds()} + ) diff --git a/openstack/tests/functional/dns/v2/test_zone.py b/openstack/tests/functional/dns/v2/test_zone.py new file mode 100644 index 0000000000..513e924c4f --- /dev/null +++ b/openstack/tests/functional/dns/v2/test_zone.py @@ -0,0 +1,103 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import random + +from openstack import exceptions +from openstack.tests.functional import base +from openstack import utils + + +class TestZone(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + self.require_service('dns') + + # Note: zone deletion is not an immediate operation, so each time + # chose a new zone name for a test + # getUniqueString is not guaranteed to return unique string between + # different tests of the same class. + self.ZONE_NAME = f'example-{random.randint(1, 10000)}.org.' + + self.zone = self.operator_cloud.dns.create_zone( + name=self.ZONE_NAME, + email='joe@example.org', + type='PRIMARY', + ttl=7200, + description='example zone', + ) + self.addCleanup(self.operator_cloud.dns.delete_zone, self.zone) + + def test_get_zone(self): + zone = self.operator_cloud.dns.get_zone(self.zone) + self.assertEqual(self.zone, zone) + + def test_list_zones(self): + names = [f.name for f in self.operator_cloud.dns.zones()] + self.assertIn(self.ZONE_NAME, names) + + def test_update_zone(self): + current_ttl = self.operator_cloud.dns.get_zone(self.zone)['ttl'] + self.operator_cloud.dns.update_zone(self.zone, ttl=current_ttl + 1) + updated_zone_ttl = self.operator_cloud.dns.get_zone(self.zone)['ttl'] + self.assertEqual( + current_ttl + 1, + updated_zone_ttl, + f'Failed, updated TTL value is:{updated_zone_ttl} instead of ' + f'expected:{current_ttl + 1}', + ) + + def test_create_rs(self): + zone = self.operator_cloud.dns.get_zone(self.zone) + self.assertIsNotNone( + self.operator_cloud.dns.create_recordset( + zone=zone, + name=f'www.{zone.name}', + type='A', + description='Example zone rec', + ttl=3600, + records=['192.168.1.1'], + ) + ) + + def test_delete_zone_with_shares(self): + # Make sure the API under test has shared zones support + if not utils.supports_version(self.operator_cloud.dns, '2.1'): + self.skipTest( + 'Designate API version does not support shared zones.' + ) + + zone_name = f'example-{random.randint(1, 10000)}.org.' + zone = self.operator_cloud.dns.create_zone( + name=zone_name, + email='joe@example.org', + type='PRIMARY', + ttl=7200, + description='example zone', + ) + self.addCleanup(self.operator_cloud.dns.delete_zone, zone) + + demo_project_id = self.operator_cloud.get_project('demo')['id'] + zone_share = self.operator_cloud.dns.create_zone_share( + zone, target_project_id=demo_project_id + ) + self.addCleanup( + self.operator_cloud.dns.delete_zone_share, zone, zone_share + ) + + # Test that we cannot delete a zone with shares + self.assertRaises( + exceptions.BadRequestException, + self.operator_cloud.dns.delete_zone, + zone, + ) + + self.operator_cloud.dns.delete_zone(zone, delete_shares=True) diff --git a/openstack/tests/functional/dns/v2/test_zone_share.py b/openstack/tests/functional/dns/v2/test_zone_share.py new file mode 100644 index 0000000000..f3c286e62a --- /dev/null +++ b/openstack/tests/functional/dns/v2/test_zone_share.py @@ -0,0 +1,167 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack import exceptions +from openstack.tests.functional import base +from openstack import utils + + +class TestZoneShare(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + self.require_service('dns') + if not self.user_cloud: + self.skipTest("The demo cloud is required for this test") + + # Note: zone deletion is not an immediate operation, so each time + # chose a new zone name for a test + # getUniqueString is not guaranteed to return unique string between + # different tests of the same class. + self.ZONE_NAME = f'example-{uuid.uuid4().hex}.org.' + + # Make sure the API under test has shared zones support + if not utils.supports_version(self.operator_cloud.dns, '2.1'): + self.skipTest( + 'Designate API version does not support shared zones.' + ) + + self.zone = self.operator_cloud.dns.create_zone( + name=self.ZONE_NAME, + email='joe@example.org', + type='PRIMARY', + ttl=7200, + description='example zone for sdk zone share tests', + ) + self.addCleanup( + self.operator_cloud.dns.delete_zone, + self.zone, + delete_shares=True, + ) + + self.project_id = self.operator_cloud.session.get_project_id() + self.demo_project_id = self.user_cloud.session.get_project_id() + + def test_create_delete_zone_share(self): + zone_share = self.operator_cloud.dns.create_zone_share( + self.zone, target_project_id=self.demo_project_id + ) + self.addCleanup( + self.operator_cloud.dns.delete_zone_share, + self.zone, + zone_share, + ) + + self.assertEqual(self.zone.id, zone_share.zone_id) + self.assertEqual(self.project_id, zone_share.project_id) + self.assertEqual(self.demo_project_id, zone_share.target_project_id) + self.assertIsNotNone(zone_share.id) + self.assertIsNotNone(zone_share.created_at) + self.assertIsNone(zone_share.updated_at) + + def test_get_zone_share(self): + orig_zone_share = self.operator_cloud.dns.create_zone_share( + self.zone, + target_project_id=self.demo_project_id, + ) + self.addCleanup( + self.operator_cloud.dns.delete_zone_share, + self.zone, + orig_zone_share, + ) + + zone_share = self.operator_cloud.dns.get_zone_share( + self.zone, + orig_zone_share, + ) + + self.assertEqual(self.zone.id, zone_share.zone_id) + self.assertEqual(self.project_id, zone_share.project_id) + self.assertEqual(self.demo_project_id, zone_share.target_project_id) + self.assertEqual(orig_zone_share.id, zone_share.id) + self.assertEqual(orig_zone_share.created_at, zone_share.created_at) + self.assertEqual(orig_zone_share.updated_at, zone_share.updated_at) + + def test_find_zone_share(self): + orig_zone_share = self.operator_cloud.dns.create_zone_share( + self.zone, target_project_id=self.demo_project_id + ) + self.addCleanup( + self.operator_cloud.dns.delete_zone_share, + self.zone, + orig_zone_share, + ) + + zone_share = self.operator_cloud.dns.find_zone_share( + self.zone, + orig_zone_share.id, + ) + + self.assertEqual(self.zone.id, zone_share.zone_id) + self.assertEqual(self.project_id, zone_share.project_id) + self.assertEqual(self.demo_project_id, zone_share.target_project_id) + self.assertEqual(orig_zone_share.id, zone_share.id) + self.assertEqual(orig_zone_share.created_at, zone_share.created_at) + self.assertEqual(orig_zone_share.updated_at, zone_share.updated_at) + + def test_find_zone_share_ignore_missing(self): + zone_share = self.operator_cloud.dns.find_zone_share( + self.zone, + 'bogus_id', + ) + self.assertIsNone(zone_share) + + def test_find_zone_share_ignore_missing_false(self): + self.assertRaises( + exceptions.NotFoundException, + self.operator_cloud.dns.find_zone_share, + self.zone, + 'bogus_id', + ignore_missing=False, + ) + + def test_list_zone_shares(self): + zone_share = self.operator_cloud.dns.create_zone_share( + self.zone, + target_project_id=self.demo_project_id, + ) + self.addCleanup( + self.operator_cloud.dns.delete_zone_share, + self.zone, + zone_share, + ) + + target_ids = [ + o.target_project_id + for o in self.operator_cloud.dns.zone_shares(self.zone) + ] + self.assertIn(self.demo_project_id, target_ids) + + def test_list_zone_shares_with_target_id(self): + zone_share = self.operator_cloud.dns.create_zone_share( + self.zone, + target_project_id=self.demo_project_id, + ) + self.addCleanup( + self.operator_cloud.dns.delete_zone_share, + self.zone, + zone_share, + ) + + target_ids = [ + o.target_project_id + for o in self.operator_cloud.dns.zone_shares( + self.zone, target_project_id=self.demo_project_id + ) + ] + self.assertIn(self.demo_project_id, target_ids) diff --git a/openstack/tests/functional/examples/__init__.py b/openstack/tests/functional/examples/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/examples/test_compute.py b/openstack/tests/functional/examples/test_compute.py new file mode 100644 index 0000000000..ea814b6b8b --- /dev/null +++ b/openstack/tests/functional/examples/test_compute.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from examples.compute import create +from examples.compute import delete +from examples.compute import find as compute_find +from examples.compute import list as compute_list +from examples.network import find as network_find +from examples.network import list as network_list + +from openstack.tests.functional import base + + +class TestCompute(base.BaseFunctionalTest): + """Test the compute examples + + The purpose of these tests is to ensure the examples run successfully. + """ + + def test_compute(self): + compute_list.list_servers(self.operator_cloud) + compute_list.list_images(self.operator_cloud) + compute_list.list_flavors(self.operator_cloud) + compute_list.list_keypairs(self.operator_cloud) + network_list.list_networks(self.operator_cloud) + + compute_find.find_image(self.operator_cloud) + compute_find.find_flavor(self.operator_cloud) + compute_find.find_keypair(self.operator_cloud) + network_find.find_network(self.operator_cloud) + + create.create_server(self.operator_cloud) + + delete.delete_keypair(self.operator_cloud) + delete.delete_server(self.operator_cloud) diff --git a/openstack/tests/functional/examples/test_identity.py b/openstack/tests/functional/examples/test_identity.py new file mode 100644 index 0000000000..285c1c6fb5 --- /dev/null +++ b/openstack/tests/functional/examples/test_identity.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from examples.identity import list as identity_list + +from openstack.tests.functional import base + + +class TestIdentity(base.BaseFunctionalTest): + """Test the identity examples + + The purpose of these tests is to ensure the examples run successfully. + """ + + def test_identity(self): + identity_list.list_users(self.operator_cloud) + identity_list.list_credentials(self.operator_cloud) + identity_list.list_projects(self.operator_cloud) + identity_list.list_domains(self.operator_cloud) + identity_list.list_groups(self.operator_cloud) + identity_list.list_services(self.operator_cloud) + identity_list.list_endpoints(self.operator_cloud) + identity_list.list_regions(self.operator_cloud) diff --git a/openstack/tests/functional/examples/test_image.py b/openstack/tests/functional/examples/test_image.py new file mode 100644 index 0000000000..157f4a84dd --- /dev/null +++ b/openstack/tests/functional/examples/test_image.py @@ -0,0 +1,31 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from examples.image import create as image_create +from examples.image import delete as image_delete +from examples.image import list as image_list + +from openstack.tests.functional import base + + +class TestImage(base.BaseFunctionalTest): + """Test the image examples + + The purpose of these tests is to ensure the examples run successfully. + """ + + def test_image(self): + image_list.list_images(self.operator_cloud) + + image_create.upload_image(self.operator_cloud) + + image_delete.delete_image(self.operator_cloud) diff --git a/openstack/tests/functional/examples/test_network.py b/openstack/tests/functional/examples/test_network.py new file mode 100644 index 0000000000..b1dceb6039 --- /dev/null +++ b/openstack/tests/functional/examples/test_network.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from examples.network import create as network_create +from examples.network import delete as network_delete +from examples.network import find as network_find +from examples.network import list as network_list + +from openstack.tests.functional import base + + +class TestNetwork(base.BaseFunctionalTest): + """Test the network examples + + The purpose of these tests is to ensure the examples run successfully. + """ + + def test_network(self): + network_list.list_networks(self.operator_cloud) + network_list.list_subnets(self.operator_cloud) + network_list.list_ports(self.operator_cloud) + network_list.list_security_groups(self.operator_cloud) + network_list.list_routers(self.operator_cloud) + + network_find.find_network(self.operator_cloud) + + network_create.create_network(self.operator_cloud) + network_delete.delete_network(self.operator_cloud) diff --git a/openstack/tests/functional/identity/__init__.py b/openstack/tests/functional/identity/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/identity/v3/__init__.py b/openstack/tests/functional/identity/v3/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/identity/v3/base.py b/openstack/tests/functional/identity/v3/base.py new file mode 100644 index 0000000000..7ca748def9 --- /dev/null +++ b/openstack/tests/functional/identity/v3/base.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import _proxy as _identity_v3 +from openstack.tests.functional import base +from openstack import utils + + +class BaseIdentityTest(base.BaseFunctionalTest): + admin_identity_client: _identity_v3.Proxy + system_admin_identity_client: _identity_v3.Proxy + + def setUp(self): + super().setUp() + if not self.operator_cloud.has_service('identity', '3'): + self.skipTest('identity service not supported by cloud') + + self.admin_identity_client = utils.ensure_service_version( + self.operator_cloud.identity, '3' + ) + self.system_admin_identity_client = utils.ensure_service_version( + self.system_admin_cloud.identity, '3' + ) diff --git a/openstack/tests/functional/identity/v3/test_access_rule.py b/openstack/tests/functional/identity/v3/test_access_rule.py new file mode 100644 index 0000000000..e6bda334ae --- /dev/null +++ b/openstack/tests/functional/identity/v3/test_access_rule.py @@ -0,0 +1,83 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack.tests.functional.identity.v3 import base + + +class TestAccessRule(base.BaseIdentityTest): + def setUp(self): + super().setUp() + self.user_id = self.operator_cloud.current_user_id + + def _create_application_credential_with_access_rule(self): + """create application credential with access_rule.""" + + app_cred = self.admin_identity_client.create_application_credential( + user=self.user_id, + name='app_cred', + access_rules=[ + { + "path": "/v2.0/metrics", + "service": "monitoring", + "method": "GET", + } + ], + ) + self.addCleanup( + self.admin_identity_client.delete_application_credential, + self.user_id, + app_cred['id'], + ) + return app_cred + + def test_get_access_rule(self): + app_cred = self._create_application_credential_with_access_rule() + access_rule_id = app_cred['access_rules'][0]['id'] + access_rule = self.admin_identity_client.get_access_rule( + user=self.user_id, access_rule=access_rule_id + ) + self.assertEqual(access_rule['id'], access_rule_id) + self.assertEqual(access_rule['user_id'], self.user_id) + + def test_list_access_rules(self): + app_cred = self._create_application_credential_with_access_rule() + access_rule_id = app_cred['access_rules'][0]['id'] + access_rules = self.admin_identity_client.access_rules( + user=self.user_id + ) + self.assertEqual(1, len(list(access_rules))) + for access_rule in access_rules: + self.assertEqual(app_cred['user_id'], self.user_id) + self.assertEqual(access_rule_id, access_rule['id']) + + def test_delete_access_rule(self): + app_cred = self._create_application_credential_with_access_rule() + access_rule_id = app_cred['access_rules'][0]['id'] + + # This is expected to raise an exception since access_rule is still + # in use for app_cred. + self.assertRaises( + exceptions.HttpException, + self.admin_identity_client.delete_access_rule, + user=self.user_id, + access_rule=access_rule_id, + ) + + # delete application credential first to delete access rule + self.admin_identity_client.delete_application_credential( + user=self.user_id, application_credential=app_cred['id'] + ) + # delete orphaned access rules + self.admin_identity_client.delete_access_rule( + user=self.user_id, access_rule=access_rule_id + ) diff --git a/openstack/tests/functional/identity/v3/test_application_credential.py b/openstack/tests/functional/identity/v3/test_application_credential.py new file mode 100644 index 0000000000..9b36e63e27 --- /dev/null +++ b/openstack/tests/functional/identity/v3/test_application_credential.py @@ -0,0 +1,71 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack.tests.functional.identity.v3 import base + + +class TestApplicationCredentials(base.BaseIdentityTest): + def setUp(self): + super().setUp() + self.user_id = self.operator_cloud.current_user_id + + def _create_application_credentials(self): + app_creds = self.admin_identity_client.create_application_credential( + user=self.user_id, name='app_cred' + ) + self.addCleanup( + self.admin_identity_client.delete_application_credential, + self.user_id, + app_creds['id'], + ) + return app_creds + + def test_create_application_credentials(self): + app_creds = self._create_application_credentials() + self.assertEqual(app_creds['user_id'], self.user_id) + + def test_get_application_credential(self): + app_creds = self._create_application_credentials() + app_cred = self.admin_identity_client.get_application_credential( + user=self.user_id, application_credential=app_creds['id'] + ) + self.assertEqual(app_cred['id'], app_creds['id']) + self.assertEqual(app_cred['user_id'], self.user_id) + + def test_application_credentials(self): + self._create_application_credentials() + app_creds = self.admin_identity_client.application_credentials( + user=self.user_id + ) + for app_cred in app_creds: + self.assertEqual(app_cred['user_id'], self.user_id) + + def test_find_application_credential(self): + app_creds = self._create_application_credentials() + app_cred = self.admin_identity_client.find_application_credential( + user=self.user_id, name_or_id=app_creds['id'] + ) + self.assertEqual(app_cred['id'], app_creds['id']) + self.assertEqual(app_cred['user_id'], self.user_id) + + def test_delete_application_credential(self): + app_creds = self._create_application_credentials() + self.admin_identity_client.delete_application_credential( + user=self.user_id, application_credential=app_creds['id'] + ) + self.assertRaises( + exceptions.NotFoundException, + self.admin_identity_client.get_application_credential, + user=self.user_id, + application_credential=app_creds['id'], + ) diff --git a/openstack/tests/functional/identity/v3/test_domain_config.py b/openstack/tests/functional/identity/v3/test_domain_config.py new file mode 100644 index 0000000000..e2159bb939 --- /dev/null +++ b/openstack/tests/functional/identity/v3/test_domain_config.py @@ -0,0 +1,83 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.identity.v3 import domain as _domain +from openstack.identity.v3 import domain_config as _domain_config +from openstack.tests.functional.identity.v3 import base + + +class TestDomainConfig(base.BaseIdentityTest): + def setUp(self): + super().setUp() + + self.domain_name = self.getUniqueString() + + # create the domain and domain config + + self.domain = self.operator_cloud.create_domain( + name=self.domain_name, + ) + self.assertIsInstance(self.domain, _domain.Domain) + self.addCleanup(self._delete_domain) + + def _delete_domain(self): + self.admin_identity_client.update_domain( + self.domain, + enabled=False, + ) + self.admin_identity_client.delete_domain(self.domain) + + def test_domain_config(self): + # create the domain config + + domain_config = self.admin_identity_client.create_domain_config( + self.domain, + identity={'driver': uuid.uuid4().hex}, + ldap={'url': uuid.uuid4().hex}, + ) + self.assertIsInstance( + domain_config, + _domain_config.DomainConfig, + ) + + # update the domain config + + ldap_url = uuid.uuid4().hex + domain_config = self.admin_identity_client.update_domain_config( + self.domain, + ldap={'url': ldap_url}, + ) + self.assertIsInstance( + domain_config, + _domain_config.DomainConfig, + ) + + # retrieve details of the (updated) domain config + + domain_config = self.admin_identity_client.get_domain_config( + self.domain, + ) + self.assertIsInstance( + domain_config, + _domain_config.DomainConfig, + ) + self.assertEqual(ldap_url, domain_config.ldap.url) + + # delete the domain config + + result = self.admin_identity_client.delete_domain_config( + self.domain, + ignore_missing=False, + ) + self.assertIsNone(result) diff --git a/openstack/tests/functional/identity/v3/test_endpoint.py b/openstack/tests/functional/identity/v3/test_endpoint.py new file mode 100644 index 0000000000..2f81b0e0d5 --- /dev/null +++ b/openstack/tests/functional/identity/v3/test_endpoint.py @@ -0,0 +1,134 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import endpoint as _endpoint +from openstack.tests.functional.identity.v3 import base + + +class TestEndpoint(base.BaseIdentityTest): + def setUp(self): + super().setUp() + + self.service_name = self.getUniqueString('service') + self.service_type = self.getUniqueString('type') + self.service = self.admin_identity_client.create_service( + name=self.service_name, + type=self.service_type, + ) + self.addCleanup( + self.admin_identity_client.delete_service, self.service + ) + + self.region_name = self.getUniqueString('region') + self.region = self.admin_identity_client.create_region( + name=self.region_name + ) + self.addCleanup(self.admin_identity_client.delete_region, self.region) + + unique_base = self.getUniqueString('endpoint') + self.test_url = f'https://{unique_base}.example.com/v1' + self.updated_url = f'https://{unique_base}.example.com/v2' + + def _delete_endpoint(self, endpoint): + ret = self.admin_identity_client.delete_endpoint(endpoint) + self.assertIsNone(ret) + + def test_endpoint(self): + # Create public endpoint + public_endpoint = self.admin_identity_client.create_endpoint( + service_id=self.service.id, + interface='public', + url=self.test_url, + region_id=self.region.id, + is_enabled=True, + ) + self.addCleanup(self._delete_endpoint, public_endpoint) + self.assertIsInstance(public_endpoint, _endpoint.Endpoint) + self.assertIsNotNone(public_endpoint.id) + self.assertEqual(self.service.id, public_endpoint.service_id) + self.assertEqual('public', public_endpoint.interface) + self.assertEqual(self.test_url, public_endpoint.url) + self.assertEqual(self.region.id, public_endpoint.region_id) + self.assertTrue(public_endpoint.is_enabled) + + # Create internal endpoint for filter testing + internal_endpoint = self.admin_identity_client.create_endpoint( + service_id=self.service.id, + interface='internal', + url=self.test_url, + region_id=self.region.id, + ) + self.addCleanup(self._delete_endpoint, internal_endpoint) + self.assertIsInstance(internal_endpoint, _endpoint.Endpoint) + self.assertIsNotNone(internal_endpoint.id) + self.assertEqual('internal', internal_endpoint.interface) + + # Update public endpoint + public_endpoint = self.admin_identity_client.update_endpoint( + public_endpoint, + url=self.updated_url, + is_enabled=False, + ) + self.assertIsInstance(public_endpoint, _endpoint.Endpoint) + self.assertEqual(self.updated_url, public_endpoint.url) + self.assertFalse(public_endpoint.is_enabled) + + # Get endpoint by ID + public_endpoint = self.admin_identity_client.get_endpoint( + public_endpoint.id + ) + self.assertIsInstance(public_endpoint, _endpoint.Endpoint) + self.assertEqual(self.updated_url, public_endpoint.url) + self.assertFalse(public_endpoint.is_enabled) + + # Find endpoint + found_endpoint = self.admin_identity_client.find_endpoint( + public_endpoint.id + ) + self.assertIsInstance(found_endpoint, _endpoint.Endpoint) + self.assertEqual(public_endpoint.id, found_endpoint.id) + + # List endpoints + endpoints = list(self.admin_identity_client.endpoints()) + self.assertIsInstance(endpoints[0], _endpoint.Endpoint) + endpoint_ids = {ep.id for ep in endpoints} + self.assertIn(public_endpoint.id, endpoint_ids) + self.assertIn(internal_endpoint.id, endpoint_ids) + + # Test service filter + service_endpoints = list( + self.admin_identity_client.endpoints(service_id=self.service.id) + ) + service_endpoint_ids = {ep.id for ep in service_endpoints} + self.assertIn(public_endpoint.id, service_endpoint_ids) + self.assertIn(internal_endpoint.id, service_endpoint_ids) + + # Test interface filter + public_endpoints = list( + self.admin_identity_client.endpoints(interface='public') + ) + public_endpoint_ids = {ep.id for ep in public_endpoints} + self.assertIn(public_endpoint.id, public_endpoint_ids) + + internal_endpoints = list( + self.admin_identity_client.endpoints(interface='internal') + ) + internal_endpoint_ids = {ep.id for ep in internal_endpoints} + self.assertIn(internal_endpoint.id, internal_endpoint_ids) + + # Test region filter + region_endpoints = list( + self.admin_identity_client.endpoints(region_id=self.region.id) + ) + region_endpoint_ids = {ep.id for ep in region_endpoints} + self.assertIn(public_endpoint.id, region_endpoint_ids) + self.assertIn(internal_endpoint.id, region_endpoint_ids) diff --git a/openstack/tests/functional/identity/v3/test_group.py b/openstack/tests/functional/identity/v3/test_group.py new file mode 100644 index 0000000000..a8a3b4c568 --- /dev/null +++ b/openstack/tests/functional/identity/v3/test_group.py @@ -0,0 +1,99 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import group as _group +from openstack.identity.v3 import user as _user +from openstack.tests.functional.identity.v3 import base + + +class TestGroup(base.BaseIdentityTest): + def setUp(self): + super().setUp() + + self.group_name = self.getUniqueString('group') + self.group_description = self.getUniqueString('group') + self.user_name = self.getUniqueString('user') + self.user_email = f"{self.user_name}@example.com" + + self.user = self.admin_identity_client.create_user( + name=self.user_name, + email=self.user_email, + ) + self.addCleanup(self._delete_user, self.user) + + def _delete_group(self, group): + ret = self.admin_identity_client.delete_group(group) + self.assertIsNone(ret) + + def _delete_user(self, user): + ret = self.admin_identity_client.delete_user(user) + self.assertIsNone(ret) + + def test_group(self): + # create the group + + group = self.admin_identity_client.create_group( + name=self.group_name, + ) + self.addCleanup(self._delete_group, group) + self.assertIsInstance(group, _group.Group) + self.assertEqual('', group.description) + + # update the group + + group = self.admin_identity_client.update_group( + group, description=self.group_description + ) + self.assertIsInstance(group, _group.Group) + self.assertEqual(self.group_description, group.description) + + # retrieve details of the (updated) group by ID + + group = self.admin_identity_client.get_group(group.id) + self.assertIsInstance(group, _group.Group) + self.assertEqual(self.group_description, group.description) + + # retrieve details of the (updated) group by name + + group = self.admin_identity_client.find_group(group.name) + self.assertIsInstance(group, _group.Group) + self.assertEqual(self.group_description, group.description) + + # list all groups + + groups = list(self.admin_identity_client.groups()) + self.assertIsInstance(groups[0], _group.Group) + self.assertIn( + self.group_name, + {x.name for x in groups}, + ) + + # add user to group + self.admin_identity_client.add_user_to_group(self.user, group) + + is_in_group = self.admin_identity_client.check_user_in_group( + self.user, group + ) + self.assertTrue(is_in_group) + + group_users = list(self.admin_identity_client.group_users(group)) + self.assertIsInstance(group_users[0], _user.User) + self.assertIn(self.user_name, {x.name for x in group_users}) + + # remove user from group + + self.admin_identity_client.remove_user_from_group(self.user, group) + + is_in_group = self.admin_identity_client.check_user_in_group( + self.user, group + ) + self.assertFalse(is_in_group) diff --git a/openstack/tests/functional/identity/v3/test_limit.py b/openstack/tests/functional/identity/v3/test_limit.py new file mode 100644 index 0000000000..59be355fcf --- /dev/null +++ b/openstack/tests/functional/identity/v3/test_limit.py @@ -0,0 +1,98 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import limit as _limit +from openstack.tests.functional.identity.v3 import base + + +class TestLimit(base.BaseIdentityTest): + def setUp(self): + super().setUp() + + self.service_name = self.getUniqueString('service') + self.service_type = self.getUniqueString('type') + self.service = self.system_admin_identity_client.create_service( + name=self.service_name, + type=self.service_type, + ) + self.addCleanup( + self.system_admin_identity_client.delete_service, self.service + ) + + self.resource_name = self.getUniqueString('resource') + self.registered_limit = ( + self.system_admin_identity_client.create_registered_limit( + resource_name=self.resource_name, + service_id=self.service.id, + default_limit=10, + ) + ) + self.addCleanup( + self.system_admin_identity_client.delete_registered_limit, + self.registered_limit, + ) + + self.project_name = self.getUniqueString('project') + self.project = self.system_admin_identity_client.create_project( + name=self.project_name, + ) + self.addCleanup( + self.system_admin_identity_client.delete_project, self.project + ) + + self.limit_description = self.getUniqueString('limit') + + def _delete_limit(self, limit): + ret = self.system_admin_identity_client.delete_limit(limit) + self.assertIsNone(ret) + + def test_limit(self): + # create the limit + + limit = self.system_admin_identity_client.create_limit( + resource_name=self.resource_name, + service_id=self.service.id, + project_id=self.project.id, + resource_limit=50, + ) + self.addCleanup(self._delete_limit, limit) + self.assertIsInstance(limit, _limit.Limit) + self.assertIsNotNone(limit.id) + self.assertIsNone(limit.description) + self.assertEqual(self.service.id, limit.service_id) + self.assertEqual(self.project.id, limit.project_id) + self.assertEqual(50, limit.resource_limit) + + # update the limit + + limit = self.system_admin_identity_client.update_limit( + limit, description=self.limit_description + ) + self.assertIsInstance(limit, _limit.Limit) + self.assertEqual(self.limit_description, limit.description) + + # retrieve details of the (updated) limit by ID + + limit = self.system_admin_identity_client.get_limit(limit.id) + self.assertIsInstance(limit, _limit.Limit) + self.assertEqual(self.limit_description, limit.description) + + # (there's no name, so no way to retrieve by name) + + # list all limits + + limits = list(self.system_admin_identity_client.limits()) + self.assertIsInstance(limits[0], _limit.Limit) + self.assertIn( + self.resource_name, + {x.resource_name for x in limits}, + ) diff --git a/openstack/tests/functional/identity/v3/test_project.py b/openstack/tests/functional/identity/v3/test_project.py new file mode 100644 index 0000000000..20455bd559 --- /dev/null +++ b/openstack/tests/functional/identity/v3/test_project.py @@ -0,0 +1,79 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import project as _project +from openstack.tests.functional.identity.v3 import base + + +class TestProject(base.BaseIdentityTest): + def setUp(self): + super().setUp() + + self.project_name = self.getUniqueString('project') + self.project_description = self.getUniqueString('project') + + def _delete_project(self, project): + ret = self.admin_identity_client.delete_project(project) + self.assertIsNone(ret) + + def test_project(self): + # create the project + + project = self.admin_identity_client.create_project( + name=self.project_name, + ) + self.assertIsInstance(project, _project.Project) + self.assertEqual('', project.description) + self.addCleanup(self._delete_project, project) + + # update the project + + project = self.admin_identity_client.update_project( + project, description=self.project_description + ) + self.assertIsInstance(project, _project.Project) + self.assertEqual(self.project_description, project.description) + + # retrieve details of the (updated) project by ID + + project = self.admin_identity_client.get_project(project.id) + self.assertIsInstance(project, _project.Project) + self.assertEqual(self.project_description, project.description) + + # retrieve details of the (updated) project by name + + project = self.admin_identity_client.find_project(project.name) + self.assertIsInstance(project, _project.Project) + self.assertEqual(self.project_description, project.description) + + # list all projects + + projects = list(self.admin_identity_client.projects()) + self.assertIsInstance(projects[0], _project.Project) + self.assertIn( + self.project_name, + {x.name for x in projects}, + ) + + def test_user_project(self): + # list all user projects + + user_projects = list( + self.admin_identity_client.user_projects( + self.operator_cloud.current_user_id + ) + ) + self.assertIsInstance(user_projects[0], _project.UserProject) + self.assertIn( + self.operator_cloud.current_project_id, + {x.id for x in user_projects}, + ) diff --git a/openstack/tests/functional/identity/v3/test_registered_limit.py b/openstack/tests/functional/identity/v3/test_registered_limit.py new file mode 100644 index 0000000000..d9c3d82302 --- /dev/null +++ b/openstack/tests/functional/identity/v3/test_registered_limit.py @@ -0,0 +1,111 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import registered_limit as _registered_limit +from openstack.tests.functional.identity.v3 import base + + +class TestRegisteredLimit(base.BaseIdentityTest): + def setUp(self): + super().setUp() + + self.region_name = self.getUniqueString('region') + self.region = self.system_admin_identity_client.create_region( + name=self.region_name + ) + self.addCleanup( + self.system_admin_identity_client.delete_region, self.region + ) + + self.service_name = self.getUniqueString('service') + self.service_type = self.getUniqueString('type') + self.service = self.system_admin_identity_client.create_service( + name=self.service_name, + type=self.service_type, + ) + self.addCleanup( + self.system_admin_identity_client.delete_service, self.service + ) + + self.resource_name = self.getUniqueString('resource') + self.registered_limit_description = self.getUniqueString( + 'registered_limit' + ) + + def _delete_registered_limit(self, registered_limit): + ret = self.system_admin_identity_client.delete_registered_limit( + registered_limit + ) + self.assertIsNone(ret) + + def test_registered_limit(self): + # create the registered limit + + registered_limit = ( + self.system_admin_identity_client.create_registered_limit( + resource_name=self.resource_name, + service_id=self.service.id, + region_id=self.region.id, + default_limit=10, + ) + ) + self.addCleanup(self._delete_registered_limit, registered_limit) + self.assertIsInstance( + registered_limit, _registered_limit.RegisteredLimit + ) + self.assertIsNotNone(registered_limit.id) + self.assertIsNone(registered_limit.description) + self.assertEqual(self.service.id, registered_limit.service_id) + self.assertEqual(self.region.id, registered_limit.region_id) + + # update the registered limit + + registered_limit = ( + self.system_admin_identity_client.update_registered_limit( + registered_limit, description=self.registered_limit_description + ) + ) + self.assertIsInstance( + registered_limit, _registered_limit.RegisteredLimit + ) + self.assertEqual( + self.registered_limit_description, registered_limit.description + ) + + # retrieve details of the (updated) registered limit by ID + + registered_limit = ( + self.system_admin_identity_client.get_registered_limit( + registered_limit.id + ) + ) + self.assertIsInstance( + registered_limit, _registered_limit.RegisteredLimit + ) + self.assertEqual( + self.registered_limit_description, registered_limit.description + ) + + # (there's no name, so no way to retrieve by name) + + # list all registered limits + + registered_limits = list( + self.system_admin_identity_client.registered_limits() + ) + self.assertIsInstance( + registered_limits[0], _registered_limit.RegisteredLimit + ) + self.assertIn( + self.resource_name, + {x.resource_name for x in registered_limits}, + ) diff --git a/openstack/tests/functional/identity/v3/test_user.py b/openstack/tests/functional/identity/v3/test_user.py new file mode 100644 index 0000000000..a10399a3b8 --- /dev/null +++ b/openstack/tests/functional/identity/v3/test_user.py @@ -0,0 +1,71 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import user as _user +from openstack.tests.functional.identity.v3 import base + + +class TestUser(base.BaseIdentityTest): + def setUp(self): + super().setUp() + + self.username = self.getUniqueString('user') + self.password = "test_password_123" + self.email = f"{self.username}@example.com" + self.description = "Test user for functional testing" + + def _delete_user(self, user): + ret = self.admin_identity_client.delete_user(user) + self.assertIsNone(ret) + + def test_user(self): + # Create user + user = self.admin_identity_client.create_user( + name=self.username, + password=self.password, + email=self.email, + description=self.description, + ) + self.addCleanup(self._delete_user, user) + self.assertIsInstance(user, _user.User) + self.assertIsNotNone(user.id) + self.assertEqual(self.username, user.name) + self.assertEqual(self.email, user.email) + self.assertEqual(self.description, user.description) + + # Update user + new_email = f"updated_{self.username}@example.com" + new_description = "Updated description for test user" + + updated_user = self.admin_identity_client.update_user( + user.id, email=new_email, description=new_description + ) + self.assertIsInstance(updated_user, _user.User) + self.assertEqual(new_email, updated_user.email) + self.assertEqual(new_description, updated_user.description) + self.assertEqual( + self.username, updated_user.name + ) # Name should remain unchanged + + # Read user list + users = list(self.admin_identity_client.users()) + self.assertIsInstance(users[0], _user.User) + user_ids = {ep.id for ep in users} + self.assertIn(user.id, user_ids) + + # Read user by ID + user = self.admin_identity_client.get_user(user.id) + self.assertIsInstance(user, _user.User) + self.assertEqual(user.id, user.id) + self.assertEqual(self.username, user.name) + self.assertEqual(new_email, user.email) + self.assertEqual(new_description, user.description) diff --git a/openstack/tests/functional/image/v2/base.py b/openstack/tests/functional/image/v2/base.py new file mode 100644 index 0000000000..cc698ed378 --- /dev/null +++ b/openstack/tests/functional/image/v2/base.py @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import _proxy as _image_v2 +from openstack.tests.functional import base +from openstack import utils + + +class BaseImageTest(base.BaseFunctionalTest): + _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_IMAGE' + + admin_image_client: _image_v2.Proxy + image_client: _image_v2.Proxy + + def setUp(self): + super().setUp() + self._set_user_cloud(image_api_version='2') + self._set_operator_cloud(image_api_version='2') + + if not self.user_cloud.has_service('image', '2'): + self.skipTest('image service not supported by cloud') + self.admin_image_client = utils.ensure_service_version( + self.operator_cloud.image, '2' + ) + self.image_client = utils.ensure_service_version( + self.user_cloud.image, '2' + ) diff --git a/openstack/tests/functional/image/v2/test_image.py b/openstack/tests/functional/image/v2/test_image.py index 3008097a59..4b92342292 100644 --- a/openstack/tests/functional/image/v2/test_image.py +++ b/openstack/tests/functional/image/v2/test_image.py @@ -10,36 +10,81 @@ # License for the specific language governing permissions and limitations # under the License. -from openstack import connection -from openstack.tests.functional import base +from openstack.image.v2 import image as _image +from openstack.tests.functional.image.v2 import base +# NOTE(stephenfin): This is referenced in the Compute functional tests to avoid +# attempts to boot from it. TEST_IMAGE_NAME = 'Test Image' -class TestImage(base.BaseFunctionalTest): +class TestImage(base.BaseImageTest): + def setUp(self): + super().setUp() - class ImageOpts(object): - def __init__(self): - self.image_api_version = '2' + with open('CONTRIBUTING.rst', 'rb') as fh: + data = fh.read() - @classmethod - def setUpClass(cls): - opts = cls.ImageOpts() - cls.conn = connection.from_config(cloud_name=base.TEST_CLOUD, - options=opts) - - cls.img = cls.conn.image.upload_image( + # there's a limit on name length + self.image = self.admin_image_client.create_image( name=TEST_IMAGE_NAME, disk_format='raw', container_format='bare', - properties='{"description": "This is not an image"}', - data=open('CONTRIBUTING.rst', 'r') + properties={ + 'description': 'This is not an image', + }, + data=data, ) + self.assertIsInstance(self.image, _image.Image) + self.assertEqual(TEST_IMAGE_NAME, self.image.name) + + def tearDown(self): + # we do this in tearDown rather than via 'addCleanup' since we want to + # wait for the deletion of the resource to ensure it completes + self.admin_image_client.delete_image(self.image) + self.admin_image_client.wait_for_delete(self.image) + + super().tearDown() + + def test_images(self): + # get image + image = self.admin_image_client.get_image(self.image.id) + self.assertEqual(self.image.name, image.name) + + # find image + image = self.admin_image_client.find_image(self.image.name) + self.assertEqual(self.image.id, image.id) + + # list + images = list(self.admin_image_client.images()) + # there are many other images so we don't assert that this is the + # *only* image present + self.assertIn(self.image.id, {i.id for i in images}) + + # update + image_name = self.getUniqueString() + image = self.admin_image_client.update_image( + self.image, + name=image_name, + ) + self.assertIsInstance(image, _image.Image) + image = self.admin_image_client.get_image(self.image.id) + self.assertEqual(image_name, image.name) + + def test_tags(self): + # add tag + image = self.admin_image_client.get_image(self.image) + self.admin_image_client.add_tag(image, 't1') + self.admin_image_client.add_tag(image, 't2') - @classmethod - def tearDownClass(cls): - cls.conn.image.delete_image(cls.img) + # filter image by tags + image = next(iter(self.admin_image_client.images(tag=['t1', 't2']))) + self.assertEqual(image.id, image.id) + self.assertIn('t1', image.tags) + self.assertIn('t2', image.tags) - def test_get_image(self): - img2 = self.conn.image.get_image(self.img) - self.assertEqual(self.img, img2) + # remove tag + self.admin_image_client.remove_tag(image, 't1') + image = self.admin_image_client.get_image(self.image) + self.assertIn('t2', image.tags) + self.assertNotIn('t1', image.tags) diff --git a/openstack/tests/functional/image/v2/test_member.py b/openstack/tests/functional/image/v2/test_member.py new file mode 100644 index 0000000000..5b96a721ae --- /dev/null +++ b/openstack/tests/functional/image/v2/test_member.py @@ -0,0 +1,87 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions as sdk_exc +from openstack.image.v2 import image as _image +from openstack.image.v2 import member as _member +from openstack.tests.functional.image.v2 import base + + +TEST_IMAGE_NAME = 'Test Image for Sharing' +MEMBER_STATUS_PENDING = 'pending' +MEMBER_STATUS_ACCEPTED = 'accepted' + + +class TestImageMember(base.BaseImageTest): + def setUp(self): + super().setUp() + + # NOTE(jbeen): 1-byte dummy image data for sharing tests; not bootable. + self.image = self.admin_image_client.create_image( + name=TEST_IMAGE_NAME, + disk_format='raw', + container_format='bare', + visibility='shared', + data=b'0', + ) + self.assertIsInstance(self.image, _image.Image) + self.assertEqual(TEST_IMAGE_NAME, self.image.name) + + self.member_id = self.user_cloud.session.get_project_id() + self.assertIsNotNone(self.member_id) + + def tearDown(self): + self.admin_image_client.delete_image(self.image) + self.admin_image_client.wait_for_delete(self.image) + + super().tearDown() + + def test_image_members(self): + # add member + member = self.admin_image_client.add_member( + image=self.image, member=self.member_id + ) + self.assertIsInstance(member, _member.Member) + self.assertEqual(self.member_id, member.member_id) + self.assertEqual(MEMBER_STATUS_PENDING, member.status) + + # get member + member = self.admin_image_client.get_member( + image=self.image, member=self.member_id + ) + self.assertIsInstance(member, _member.Member) + self.assertEqual(self.member_id, member.member_id) + + # list members + members = list(self.admin_image_client.members(image=self.image)) + self.assertIn(self.member_id, {m.id for m in members}) + + # update member + member = self.image_client.update_member( + image=self.image, + member=self.member_id, + status=MEMBER_STATUS_ACCEPTED, + ) + self.assertIsInstance(member, _member.Member) + self.assertEqual(self.member_id, member.member_id) + self.assertEqual(MEMBER_STATUS_ACCEPTED, member.status) + + # remove member + self.admin_image_client.remove_member( + image=self.image, member=self.member_id + ) + self.assertRaises( + sdk_exc.NotFoundException, + self.admin_image_client.get_member, + image=self.image, + member=self.member_id, + ) diff --git a/openstack/tests/functional/image/v2/test_metadef_namespace.py b/openstack/tests/functional/image/v2/test_metadef_namespace.py new file mode 100644 index 0000000000..353091ae20 --- /dev/null +++ b/openstack/tests/functional/image/v2/test_metadef_namespace.py @@ -0,0 +1,135 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import metadef_namespace as _metadef_namespace +from openstack.tests.functional.image.v2 import base + + +class TestMetadefNamespace(base.BaseImageTest): + # TODO(stephenfin): We should use setUpClass here for MOAR SPEED!!! + def setUp(self): + super().setUp() + + # there's a limit on namespace length + namespace = self.getUniqueString().split('.')[-1] + self.metadef_namespace = ( + self.admin_image_client.create_metadef_namespace( + namespace=namespace, + ) + ) + self.assertIsInstance( + self.metadef_namespace, + _metadef_namespace.MetadefNamespace, + ) + self.assertEqual(namespace, self.metadef_namespace.namespace) + + def tearDown(self): + # we do this in tearDown rather than via 'addCleanup' since we want to + # wait for the deletion of the resource to ensure it completes + self.admin_image_client.delete_metadef_namespace( + self.metadef_namespace + ) + self.admin_image_client.wait_for_delete(self.metadef_namespace) + + super().tearDown() + + def test_metadef_namespace(self): + # get + metadef_namespace = self.admin_image_client.get_metadef_namespace( + self.metadef_namespace.namespace + ) + self.assertEqual( + self.metadef_namespace.namespace, + metadef_namespace.namespace, + ) + + # (no find_metadef_namespace method) + + # list + metadef_namespaces = list(self.admin_image_client.metadef_namespaces()) + # there are a load of default metadef namespaces so we don't assert + # that this is the *only* metadef namespace present + self.assertIn( + self.metadef_namespace.namespace, + {n.namespace for n in metadef_namespaces}, + ) + + # update + # there's a limit on display name and description lengths and no + # inherent need for randomness so we use fixed strings + metadef_namespace_display_name = 'A display name' + metadef_namespace_description = 'A description' + metadef_namespace = self.admin_image_client.update_metadef_namespace( + self.metadef_namespace, + display_name=metadef_namespace_display_name, + description=metadef_namespace_description, + ) + self.assertIsInstance( + metadef_namespace, + _metadef_namespace.MetadefNamespace, + ) + metadef_namespace = self.admin_image_client.get_metadef_namespace( + self.metadef_namespace.namespace + ) + self.assertEqual( + metadef_namespace_display_name, + metadef_namespace.display_name, + ) + self.assertEqual( + metadef_namespace_description, + metadef_namespace.description, + ) + + def test_tags(self): + # add tag + metadef_namespace = self.admin_image_client.get_metadef_namespace( + self.metadef_namespace.namespace + ) + metadef_namespace.add_tag(self.admin_image_client, 't1') + metadef_namespace.add_tag(self.admin_image_client, 't2') + + # list tags + metadef_namespace.fetch_tags(self.admin_image_client) + md_tags = [tag['name'] for tag in metadef_namespace.tags] + self.assertIn('t1', md_tags) + self.assertIn('t2', md_tags) + + # remove tag + metadef_namespace.remove_tag(self.admin_image_client, 't1') + metadef_namespace = self.admin_image_client.get_metadef_namespace( + self.metadef_namespace.namespace + ) + md_tags = [tag['name'] for tag in metadef_namespace.tags] + self.assertIn('t2', md_tags) + self.assertNotIn('t1', md_tags) + + # add tags without append + metadef_namespace.set_tags(self.admin_image_client, ["t1", "t2"]) + metadef_namespace.fetch_tags(self.admin_image_client) + md_tags = [tag['name'] for tag in metadef_namespace.tags] + self.assertIn('t1', md_tags) + self.assertIn('t2', md_tags) + + # add tags with append + metadef_namespace.set_tags( + self.admin_image_client, ["t3", "t4"], append=True + ) + metadef_namespace.fetch_tags(self.admin_image_client) + md_tags = [tag['name'] for tag in metadef_namespace.tags] + self.assertIn('t1', md_tags) + self.assertIn('t2', md_tags) + self.assertIn('t3', md_tags) + self.assertIn('t4', md_tags) + + # remove all tags + metadef_namespace.remove_all_tags(self.admin_image_client) + self.assertEqual([], metadef_namespace.tags) diff --git a/openstack/tests/functional/image/v2/test_metadef_object.py b/openstack/tests/functional/image/v2/test_metadef_object.py new file mode 100644 index 0000000000..ac27c05970 --- /dev/null +++ b/openstack/tests/functional/image/v2/test_metadef_object.py @@ -0,0 +1,109 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import metadef_namespace as _metadef_namespace +from openstack.image.v2 import metadef_object as _metadef_object +from openstack.tests.functional.image.v2 import base + + +class TestMetadefObject(base.BaseImageTest): + def setUp(self): + super().setUp() + + # create namespace for object + namespace = self.getUniqueString().split('.')[-1] + self.metadef_namespace = ( + self.admin_image_client.create_metadef_namespace( + namespace=namespace, + ) + ) + self.assertIsInstance( + self.metadef_namespace, + _metadef_namespace.MetadefNamespace, + ) + self.assertEqual(namespace, self.metadef_namespace.namespace) + + # create object + object = self.getUniqueString().split('.')[-1] + self.metadef_object = self.admin_image_client.create_metadef_object( + name=object, + namespace=self.metadef_namespace, + ) + self.assertIsInstance( + self.metadef_object, + _metadef_object.MetadefObject, + ) + self.assertEqual(object, self.metadef_object.name) + + def tearDown(self): + self.admin_image_client.delete_metadef_object( + self.metadef_object, + self.metadef_object.namespace_name, + ) + self.admin_image_client.wait_for_delete(self.metadef_object) + + self.admin_image_client.delete_metadef_namespace( + self.metadef_namespace + ) + self.admin_image_client.wait_for_delete(self.metadef_namespace) + + super().tearDown() + + def test_metadef_objects(self): + # get + metadef_object = self.admin_image_client.get_metadef_object( + self.metadef_object.name, + self.metadef_namespace, + ) + self.assertEqual( + self.metadef_object.namespace_name, + metadef_object.namespace_name, + ) + self.assertEqual( + self.metadef_object.name, + metadef_object.name, + ) + + # list + metadef_objects = list( + self.admin_image_client.metadef_objects( + self.metadef_object.namespace_name + ) + ) + # there are a load of default metadef objects so we don't assert + # that this is the *only* metadef objects present + self.assertIn( + self.metadef_object.name, + {o.name for o in metadef_objects}, + ) + + # update + metadef_object_new_name = 'New object name' + metadef_object_new_description = 'New object description' + metadef_object = self.admin_image_client.update_metadef_object( + self.metadef_object.name, + namespace=self.metadef_object.namespace_name, + name=metadef_object_new_name, + description=metadef_object_new_description, + ) + self.assertIsInstance( + metadef_object, + _metadef_object.MetadefObject, + ) + self.assertEqual( + metadef_object_new_name, + metadef_object.name, + ) + self.assertEqual( + metadef_object_new_description, + metadef_object.description, + ) diff --git a/openstack/tests/functional/image/v2/test_metadef_property.py b/openstack/tests/functional/image/v2/test_metadef_property.py new file mode 100644 index 0000000000..0b24af698f --- /dev/null +++ b/openstack/tests/functional/image/v2/test_metadef_property.py @@ -0,0 +1,135 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random +import string + +from openstack.image.v2 import metadef_namespace as _metadef_namespace +from openstack.image.v2 import metadef_property as _metadef_property +from openstack.tests.functional.image.v2 import base + + +class TestMetadefProperty(base.BaseImageTest): + def setUp(self): + super().setUp() + + # there's a limit on namespace length + namespace = 'test_' + ''.join( + random.choice(string.ascii_lowercase) for _ in range(75) + ) + self.metadef_namespace = ( + self.admin_image_client.create_metadef_namespace( + namespace=namespace, + ) + ) + self.assertIsInstance( + self.metadef_namespace, + _metadef_namespace.MetadefNamespace, + ) + self.assertEqual(namespace, self.metadef_namespace.namespace) + + # there's a limit on property length + property_name = 'test_' + ''.join( + random.choice(string.ascii_lowercase) for _ in range(75) + ) + self.attrs = { + 'name': property_name, + 'title': property_name, + 'type': 'string', + 'description': 'Web Server port', + 'enum': ["80", "443"], + } + self.metadef_property = ( + self.admin_image_client.create_metadef_property( + self.metadef_namespace.namespace, **self.attrs + ) + ) + self.assertIsInstance( + self.metadef_property, _metadef_property.MetadefProperty + ) + self.assertEqual(self.attrs['name'], self.metadef_property.name) + self.assertEqual(self.attrs['title'], self.metadef_property.title) + self.assertEqual(self.attrs['type'], self.metadef_property.type) + self.assertEqual( + self.attrs['description'], self.metadef_property.description + ) + self.assertEqual(self.attrs['enum'], self.metadef_property.enum) + + def tearDown(self): + # we do this in tearDown rather than via 'addCleanup' since we want to + # wait for the deletion of the resource to ensure it completes + self.admin_image_client.delete_metadef_property( + self.metadef_property, self.metadef_namespace + ) + self.admin_image_client.delete_metadef_namespace( + self.metadef_namespace + ) + self.admin_image_client.wait_for_delete(self.metadef_namespace) + + super().tearDown() + + def test_metadef_property(self): + # get metadef property + metadef_property = self.admin_image_client.get_metadef_property( + self.metadef_property, self.metadef_namespace + ) + self.assertIsNotNone(metadef_property) + self.assertIsInstance( + metadef_property, _metadef_property.MetadefProperty + ) + self.assertEqual(self.attrs['name'], metadef_property.name) + self.assertEqual(self.attrs['title'], metadef_property.title) + self.assertEqual(self.attrs['type'], metadef_property.type) + self.assertEqual( + self.attrs['description'], metadef_property.description + ) + self.assertEqual(self.attrs['enum'], metadef_property.enum) + + # (no find_metadef_property method) + + # list + metadef_properties = list( + self.admin_image_client.metadef_properties(self.metadef_namespace) + ) + self.assertIsNotNone(metadef_properties) + self.assertIsInstance( + metadef_properties[0], _metadef_property.MetadefProperty + ) + + # update + self.attrs['title'] = ''.join( + random.choice(string.ascii_lowercase) for _ in range(10) + ) + self.attrs['description'] = ''.join( + random.choice(string.ascii_lowercase) for _ in range(10) + ) + metadef_property = self.admin_image_client.update_metadef_property( + self.metadef_property, + self.metadef_namespace.namespace, + **self.attrs, + ) + self.assertIsNotNone(metadef_property) + self.assertIsInstance( + metadef_property, + _metadef_property.MetadefProperty, + ) + metadef_property = self.admin_image_client.get_metadef_property( + self.metadef_property.name, self.metadef_namespace + ) + self.assertEqual( + self.attrs['title'], + metadef_property.title, + ) + self.assertEqual( + self.attrs['description'], + metadef_property.description, + ) diff --git a/openstack/tests/functional/image/v2/test_metadef_resource_type.py b/openstack/tests/functional/image/v2/test_metadef_resource_type.py new file mode 100644 index 0000000000..ada01115b0 --- /dev/null +++ b/openstack/tests/functional/image/v2/test_metadef_resource_type.py @@ -0,0 +1,83 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import metadef_namespace as _metadef_namespace +from openstack.image.v2 import metadef_resource_type as _metadef_resource_type +from openstack.tests.functional.image.v2 import base + + +class TestMetadefResourceType(base.BaseImageTest): + def setUp(self): + super().setUp() + + # there's a limit on namespace length + namespace = self.getUniqueString().split('.')[-1] + self.metadef_namespace = ( + self.admin_image_client.create_metadef_namespace( + namespace=namespace, + ) + ) + self.assertIsInstance( + self.metadef_namespace, + _metadef_namespace.MetadefNamespace, + ) + self.assertEqual(namespace, self.metadef_namespace.namespace) + + resource_type_name = 'test-resource-type' + resource_type = {'name': resource_type_name} + self.metadef_resource_type = ( + self.admin_image_client.create_metadef_resource_type_association( + metadef_namespace=namespace, **resource_type + ) + ) + self.assertIsInstance( + self.metadef_resource_type, + _metadef_resource_type.MetadefResourceTypeAssociation, + ) + self.assertEqual(resource_type_name, self.metadef_resource_type.name) + + def tearDown(self): + # we do this in tearDown rather than via 'addCleanup' since we want to + # wait for the deletion of the resource to ensure it completes + self.admin_image_client.delete_metadef_namespace( + self.metadef_namespace + ) + self.admin_image_client.wait_for_delete(self.metadef_namespace) + + super().tearDown() + + def test_metadef_resource_types(self): + # list resource type associations + associations = list( + self.admin_image_client.metadef_resource_type_associations( + metadef_namespace=self.metadef_namespace + ) + ) + + self.assertIn( + self.metadef_resource_type.name, {a.name for a in associations} + ) + + # (no find_metadef_resource_type_association method) + + # list resource types + resource_types = list(self.admin_image_client.metadef_resource_types()) + + self.assertIn( + self.metadef_resource_type.name, {t.name for t in resource_types} + ) + + # delete + self.admin_image_client.delete_metadef_resource_type_association( + self.metadef_resource_type, + metadef_namespace=self.metadef_namespace, + ) diff --git a/openstack/tests/functional/image/v2/test_metadef_schema.py b/openstack/tests/functional/image/v2/test_metadef_schema.py new file mode 100644 index 0000000000..083559f14e --- /dev/null +++ b/openstack/tests/functional/image/v2/test_metadef_schema.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import metadef_schema as _metadef_schema +from openstack.tests.functional.image.v2 import base + + +class TestMetadefSchema(base.BaseImageTest): + def test_get_metadef_namespace_schema(self): + metadef_schema = self.admin_image_client.get_metadef_namespace_schema() + self.assertIsNotNone(metadef_schema) + self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) + + def test_get_metadef_namespaces_schema(self): + metadef_schema = ( + self.admin_image_client.get_metadef_namespaces_schema() + ) + self.assertIsNotNone(metadef_schema) + self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) + + def test_get_metadef_resource_type_schema(self): + metadef_schema = ( + self.admin_image_client.get_metadef_resource_type_schema() + ) + self.assertIsNotNone(metadef_schema) + self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) + + def test_get_metadef_resource_types_schema(self): + metadef_schema = ( + self.admin_image_client.get_metadef_resource_types_schema() + ) + self.assertIsNotNone(metadef_schema) + self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) + + def test_get_metadef_object_schema(self): + metadef_schema = self.admin_image_client.get_metadef_object_schema() + self.assertIsNotNone(metadef_schema) + self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) + + def test_get_metadef_objects_schema(self): + metadef_schema = self.admin_image_client.get_metadef_objects_schema() + self.assertIsNotNone(metadef_schema) + self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) + + def test_get_metadef_property_schema(self): + metadef_schema = self.admin_image_client.get_metadef_property_schema() + self.assertIsNotNone(metadef_schema) + self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) + + def test_get_metadef_properties_schema(self): + metadef_schema = ( + self.admin_image_client.get_metadef_properties_schema() + ) + self.assertIsNotNone(metadef_schema) + self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) + + def test_get_metadef_tag_schema(self): + metadef_schema = self.admin_image_client.get_metadef_tag_schema() + self.assertIsNotNone(metadef_schema) + self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) + + def test_get_metadef_tags_schema(self): + metadef_schema = self.admin_image_client.get_metadef_tags_schema() + self.assertIsNotNone(metadef_schema) + self.assertIsInstance(metadef_schema, _metadef_schema.MetadefSchema) diff --git a/openstack/tests/functional/image/v2/test_schema.py b/openstack/tests/functional/image/v2/test_schema.py new file mode 100644 index 0000000000..7a732585fe --- /dev/null +++ b/openstack/tests/functional/image/v2/test_schema.py @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import schema as _schema +from openstack.tests.functional.image.v2 import base + + +class TestSchema(base.BaseImageTest): + def test_get_images_schema(self): + schema = self.admin_image_client.get_images_schema() + self.assertIsNotNone(schema) + self.assertIsInstance(schema, _schema.Schema) + + def test_get_image_schema(self): + schema = self.admin_image_client.get_image_schema() + self.assertIsNotNone(schema) + self.assertIsInstance(schema, _schema.Schema) + + def test_get_members_schema(self): + schema = self.admin_image_client.get_members_schema() + self.assertIsNotNone(schema) + self.assertIsInstance(schema, _schema.Schema) + + def test_get_member_schema(self): + schema = self.admin_image_client.get_member_schema() + self.assertIsNotNone(schema) + self.assertIsInstance(schema, _schema.Schema) diff --git a/openstack/tests/functional/image/v2/test_task.py b/openstack/tests/functional/image/v2/test_task.py new file mode 100644 index 0000000000..31bcf700d2 --- /dev/null +++ b/openstack/tests/functional/image/v2/test_task.py @@ -0,0 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.image.v2 import base + + +class TestTask(base.BaseImageTest): + def test_tasks(self): + tasks = list(self.admin_image_client.tasks()) + # NOTE(stephenfin): Yes, this is a dumb test. Basically all that we're + # checking is that the API endpoint is correct. It would be nice to + # have a proper check here that includes creation of tasks but we don't + # currently have the ability to do this and I'm not even sure if tasks + # are still really a supported thing. A potential future work item, + # perhaps. + self.assertIsInstance(tasks, list) diff --git a/openstack/tests/functional/instance_ha/__init__.py b/openstack/tests/functional/instance_ha/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/instance_ha/v1/__init__.py b/openstack/tests/functional/instance_ha/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/instance_ha/v1/test_host.py b/openstack/tests/functional/instance_ha/v1/test_host.py new file mode 100644 index 0000000000..2058b7bed5 --- /dev/null +++ b/openstack/tests/functional/instance_ha/v1/test_host.py @@ -0,0 +1,89 @@ +# Copyright (C) 2018 NTT DATA +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.compute.v2 import hypervisor +from openstack import connection +from openstack.tests.functional import base + +HYPERVISORS: list[hypervisor.Hypervisor] = [] + + +def hypervisors(): + global HYPERVISORS + if HYPERVISORS: + return True + HYPERVISORS = connection.Connection.list_hypervisors( + connection.from_config(cloud_name=base.TEST_CLOUD_NAME) + ) + return bool(HYPERVISORS) + + +class TestHost(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + self.require_service('instance-ha') + self.NAME = self.getUniqueString() + + if not hypervisors(): + self.skipTest( + "Skip TestHost as there are no hypervisors configured in nova" + ) + + # Create segment + self.segment = self.operator_cloud.ha.create_segment( + name=self.NAME, recovery_method='auto', service_type='COMPUTE' + ) + + # Create valid host + self.NAME = HYPERVISORS[0].name + self.host = self.operator_cloud.ha.create_host( + segment_id=self.segment.uuid, + name=self.NAME, + type='COMPUTE', + control_attributes='SSH', + ) + + # Delete host + self.addCleanup( + self.operator_cloud.ha.delete_host, + self.segment.uuid, + self.host.uuid, + ) + # Delete segment + self.addCleanup( + self.operator_cloud.ha.delete_segment, self.segment.uuid + ) + + def test_list(self): + names = [ + o.name + for o in self.operator_cloud.ha.hosts( + self.segment.uuid, + failover_segment_id=self.segment.uuid, + type='COMPUTE', + ) + ] + self.assertIn(self.NAME, names) + + def test_update(self): + updated_host = self.operator_cloud.ha.update_host( + self.host['uuid'], + segment_id=self.segment.uuid, + on_maintenance='True', + ) + get_host = self.operator_cloud.ha.get_host( + updated_host.uuid, updated_host.segment_id + ) + self.assertEqual(True, get_host.on_maintenance) diff --git a/openstack/tests/functional/instance_ha/v1/test_segment.py b/openstack/tests/functional/instance_ha/v1/test_segment.py new file mode 100644 index 0000000000..0d147bc5e2 --- /dev/null +++ b/openstack/tests/functional/instance_ha/v1/test_segment.py @@ -0,0 +1,49 @@ +# Copyright (C) 2018 NTT DATA +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional import base + + +class TestSegment(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + self.require_service('instance-ha') + self.NAME = self.getUniqueString() + + # Create segment + self.segment = self.operator_cloud.ha.create_segment( + name=self.NAME, recovery_method='auto', service_type='COMPUTE' + ) + + # Delete segment + self.addCleanup( + self.operator_cloud.ha.delete_segment, self.segment['uuid'] + ) + + def test_list(self): + names = [ + o.name + for o in self.operator_cloud.ha.segments(recovery_method='auto') + ] + self.assertIn(self.NAME, names) + + def test_update(self): + updated_segment = self.operator_cloud.ha.update_segment( + self.segment['uuid'], name='UPDATED-NAME' + ) + get_updated_segment = self.operator_cloud.ha.get_segment( + updated_segment.uuid + ) + self.assertEqual('UPDATED-NAME', get_updated_segment.name) diff --git a/openstack/tests/functional/key_manager/__init__.py b/openstack/tests/functional/key_manager/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/key_manager/v1/__init__.py b/openstack/tests/functional/key_manager/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/key_manager/v1/test_project_quota.py b/openstack/tests/functional/key_manager/v1/test_project_quota.py new file mode 100644 index 0000000000..9ed9748896 --- /dev/null +++ b/openstack/tests/functional/key_manager/v1/test_project_quota.py @@ -0,0 +1,88 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions as sdk_exc +from openstack.identity.v3 import _proxy as _identity_v3 +from openstack.key_manager.v1 import project_quota as _project_quota +from openstack.tests.functional import base + +# NOTE(jbeen): Barbican policy may require 'key-manager:service-admin' for +# project quotas. Create and assign it per test project to avoid 403 errors. +ADMIN_ROLE_NAME = 'key-manager:service-admin' + + +class TestProjectQuota(base.BaseFunctionalTest): + _identity: _identity_v3.Proxy + + def setUp(self): + super().setUp() + self.require_service('key-manager') + + identity = self.system_admin_cloud.identity + assert identity.api_version == '3' + self._identity = identity + + self.project_name = self.getUniqueString('project') + self.project = self._identity.create_project( + name=self.project_name, + ) + self.addCleanup(self._identity.delete_project, self.project) + + self.role = self._identity.create_role(name=ADMIN_ROLE_NAME) + self.addCleanup(self._identity.delete_role, self.role.id) + + self.user_id = self.system_admin_cloud.current_user_id + self._identity.assign_project_role_to_user( + project=self.project, user=self.user_id, role=self.role + ) + self.addCleanup( + self._identity.unassign_project_role_from_user, + project=self.project, + user=self.user_id, + role=self.role, + ) + + self._set_operator_cloud(project_id=self.project.id) + + def test_project_quotas(self): + # update project quota + project_quota = self.operator_cloud.key_manager.update_project_quota( + self.project.id, + secrets=1, + orders=2, + containers=3, + consumers=4, + cas=5, + ) + + self.assertIsInstance(project_quota, _project_quota.ProjectQuota) + self.assertIsNotNone(project_quota.id) + self.assertEqual(1, project_quota.secrets) + self.assertEqual(2, project_quota.orders) + self.assertEqual(3, project_quota.containers) + self.assertEqual(4, project_quota.consumers) + self.assertEqual(5, project_quota.cas) + + # get project quota + project_id = self.project.id + project_quota = self.operator_cloud.key_manager.get_project_quota( + project_id + ) + self.assertIsInstance(project_quota, _project_quota.ProjectQuota) + + # delete project quota + self.operator_cloud.key_manager.delete_project_quota(project_quota) + self.assertRaises( + sdk_exc.NotFoundException, + self.operator_cloud.key_manager.get_project_quota, + project_quota, + ) diff --git a/openstack/tests/functional/key_manager/v1/test_secret_store.py b/openstack/tests/functional/key_manager/v1/test_secret_store.py new file mode 100644 index 0000000000..8c1b5b0f8c --- /dev/null +++ b/openstack/tests/functional/key_manager/v1/test_secret_store.py @@ -0,0 +1,55 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.key_manager.v1 import secret_store as _secret_store +from openstack.tests.functional import base + + +class TestSecretStore(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + self.require_service('key-manager') + + def test_secret_store(self): + """Test Secret Store operations""" + key_manager = self.operator_cloud.key_manager + + # Test list secret stores + secret_stores = list(key_manager.secret_stores()) + self.assertIsInstance(secret_stores, list) + + for store in secret_stores: + self.assertIsInstance(store, _secret_store.SecretStore) + self.assertIsNotNone(store.name) + self.assertIsNotNone(store.status) + + # Test list secret stores with filters + global_default_stores = list( + key_manager.secret_stores(global_default=True) + ) + self.assertIsInstance(global_default_stores, list) + + active_stores = list(key_manager.secret_stores(status="ACTIVE")) + self.assertIsInstance(active_stores, list) + + # Test get global default secret store + if global_default_stores: + default_store = key_manager.get_global_default_secret_store() + self.assertIsInstance(default_store, _secret_store.SecretStore) + self.assertIsNotNone(default_store.name) + self.assertTrue(default_store.global_default) + + # Test get preferred secret store + if secret_stores: + preferred_store = key_manager.get_preferred_secret_store() + self.assertIsInstance(preferred_store, _secret_store.SecretStore) + self.assertIsNotNone(preferred_store.name) diff --git a/openstack/tests/functional/load_balancer/__init__.py b/openstack/tests/functional/load_balancer/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/load_balancer/v2/__init__.py b/openstack/tests/functional/load_balancer/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/load_balancer/v2/test_load_balancer.py b/openstack/tests/functional/load_balancer/v2/test_load_balancer.py new file mode 100644 index 0000000000..9837e66595 --- /dev/null +++ b/openstack/tests/functional/load_balancer/v2/test_load_balancer.py @@ -0,0 +1,946 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.load_balancer.v2 import availability_zone +from openstack.load_balancer.v2 import availability_zone_profile +from openstack.load_balancer.v2 import flavor +from openstack.load_balancer.v2 import flavor_profile +from openstack.load_balancer.v2 import health_monitor +from openstack.load_balancer.v2 import l7_policy +from openstack.load_balancer.v2 import l7_rule +from openstack.load_balancer.v2 import listener +from openstack.load_balancer.v2 import load_balancer +from openstack.load_balancer.v2 import member +from openstack.load_balancer.v2 import pool +from openstack.load_balancer.v2 import quota +from openstack.tests.functional import base + + +class TestLoadBalancer(base.BaseFunctionalTest): + HM_ID = None + L7POLICY_ID = None + LB_ID = None + LISTENER_ID = None + MEMBER_ID = None + POOL_ID = None + VIP_SUBNET_ID = None + PROJECT_ID = None + FLAVOR_PROFILE_ID = None + FLAVOR_ID = None + AVAILABILITY_ZONE_PROFILE_ID = None + AMPHORA_ID = None + PROTOCOL = 'HTTP' + PROTOCOL_PORT = 80 + LB_ALGORITHM = 'ROUND_ROBIN' + MEMBER_ADDRESS = '192.0.2.16' + WEIGHT = 10 + DELAY = 2 + TIMEOUT = 1 + MAX_RETRY = 3 + HM_TYPE = 'HTTP' + ACTION = 'REDIRECT_TO_URL' + REDIRECT_URL = 'http://www.example.com' + COMPARE_TYPE = 'CONTAINS' + L7RULE_TYPE = 'HOST_NAME' + L7RULE_VALUE = 'example' + AMPHORA = 'amphora' + FLAVOR_DATA = '{"loadbalancer_topology": "SINGLE"}' + AVAILABILITY_ZONE_DATA = '{"compute_zone": "nova"}' + DESCRIPTION = 'Test description' + + _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_LOAD_BALANCER' + + # TODO(shade): Creating load balancers can be slow on some hosts due to + # nova instance boot times (up to ten minutes). This used to + # use setUpClass, but that's a whole other pile of bad, so + # we may need to engineer something pleasing here. + def setUp(self): + super().setUp() + self.require_service('load-balancer') + + self.HM_NAME = self.getUniqueString() + self.L7POLICY_NAME = self.getUniqueString() + self.LB_NAME = self.getUniqueString() + self.LISTENER_NAME = self.getUniqueString() + self.MEMBER_NAME = self.getUniqueString() + self.POOL_NAME = self.getUniqueString() + self.UPDATE_NAME = self.getUniqueString() + self.UPDATE_DESCRIPTION = self.getUniqueString() + self.FLAVOR_PROFILE_NAME = self.getUniqueString() + self.FLAVOR_NAME = self.getUniqueString() + self.AVAILABILITY_ZONE_PROFILE_NAME = self.getUniqueString() + self.AVAILABILITY_ZONE_NAME = self.getUniqueString() + subnets = list(self.operator_cloud.network.subnets()) + self.VIP_SUBNET_ID = subnets[0].id + self.PROJECT_ID = self.operator_cloud.session.get_project_id() + test_quota = self.operator_cloud.load_balancer.update_quota( + self.PROJECT_ID, + **{ + 'load_balancer': 100, + 'pool': 100, + 'listener': 100, + 'health_monitor': 100, + 'member': 100, + }, + ) + assert isinstance(test_quota, quota.Quota) + self.assertEqual(self.PROJECT_ID, test_quota.id) + + test_flavor_profile = ( + self.operator_cloud.load_balancer.create_flavor_profile( + name=self.FLAVOR_PROFILE_NAME, + provider_name=self.AMPHORA, + flavor_data=self.FLAVOR_DATA, + ) + ) + assert isinstance(test_flavor_profile, flavor_profile.FlavorProfile) + self.assertEqual(self.FLAVOR_PROFILE_NAME, test_flavor_profile.name) + self.FLAVOR_PROFILE_ID = test_flavor_profile.id + + test_flavor = self.operator_cloud.load_balancer.create_flavor( + name=self.FLAVOR_NAME, + flavor_profile_id=self.FLAVOR_PROFILE_ID, + is_enabled=True, + description=self.DESCRIPTION, + ) + assert isinstance(test_flavor, flavor.Flavor) + self.assertEqual(self.FLAVOR_NAME, test_flavor.name) + self.FLAVOR_ID = test_flavor.id + + test_az_profile = ( + self.operator_cloud.load_balancer.create_availability_zone_profile( + name=self.AVAILABILITY_ZONE_PROFILE_NAME, + provider_name=self.AMPHORA, + availability_zone_data=self.AVAILABILITY_ZONE_DATA, + ) + ) + assert isinstance( + test_az_profile, availability_zone_profile.AvailabilityZoneProfile + ) + self.assertEqual( + self.AVAILABILITY_ZONE_PROFILE_NAME, test_az_profile.name + ) + self.AVAILABILITY_ZONE_PROFILE_ID = test_az_profile.id + + test_az = self.operator_cloud.load_balancer.create_availability_zone( + name=self.AVAILABILITY_ZONE_NAME, + availability_zone_profile_id=self.AVAILABILITY_ZONE_PROFILE_ID, + is_enabled=True, + description=self.DESCRIPTION, + ) + assert isinstance(test_az, availability_zone.AvailabilityZone) + self.assertEqual(self.AVAILABILITY_ZONE_NAME, test_az.name) + + test_lb = self.operator_cloud.load_balancer.create_load_balancer( + name=self.LB_NAME, + vip_subnet_id=self.VIP_SUBNET_ID, + project_id=self.PROJECT_ID, + ) + assert isinstance(test_lb, load_balancer.LoadBalancer) + self.assertEqual(self.LB_NAME, test_lb.name) + # Wait for the LB to go ACTIVE. On non-virtualization enabled hosts + # it can take nova up to ten minutes to boot a VM. + self.operator_cloud.load_balancer.wait_for_load_balancer( + test_lb.id, interval=1, wait=self._wait_for_timeout + ) + self.LB_ID = test_lb.id + + amphorae = self.operator_cloud.load_balancer.amphorae( + loadbalancer_id=self.LB_ID + ) + for amp in amphorae: + self.AMPHORA_ID = amp.id + + test_listener = self.operator_cloud.load_balancer.create_listener( + name=self.LISTENER_NAME, + protocol=self.PROTOCOL, + protocol_port=self.PROTOCOL_PORT, + loadbalancer_id=self.LB_ID, + ) + assert isinstance(test_listener, listener.Listener) + self.assertEqual(self.LISTENER_NAME, test_listener.name) + self.LISTENER_ID = test_listener.id + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + test_pool = self.operator_cloud.load_balancer.create_pool( + name=self.POOL_NAME, + protocol=self.PROTOCOL, + lb_algorithm=self.LB_ALGORITHM, + listener_id=self.LISTENER_ID, + ) + assert isinstance(test_pool, pool.Pool) + self.assertEqual(self.POOL_NAME, test_pool.name) + self.POOL_ID = test_pool.id + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + test_member = self.operator_cloud.load_balancer.create_member( + pool=self.POOL_ID, + name=self.MEMBER_NAME, + address=self.MEMBER_ADDRESS, + protocol_port=self.PROTOCOL_PORT, + weight=self.WEIGHT, + ) + assert isinstance(test_member, member.Member) + self.assertEqual(self.MEMBER_NAME, test_member.name) + self.MEMBER_ID = test_member.id + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + test_hm = self.operator_cloud.load_balancer.create_health_monitor( + pool_id=self.POOL_ID, + name=self.HM_NAME, + delay=self.DELAY, + timeout=self.TIMEOUT, + max_retries=self.MAX_RETRY, + type=self.HM_TYPE, + ) + assert isinstance(test_hm, health_monitor.HealthMonitor) + self.assertEqual(self.HM_NAME, test_hm.name) + self.HM_ID = test_hm.id + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + test_l7policy = self.operator_cloud.load_balancer.create_l7_policy( + listener_id=self.LISTENER_ID, + name=self.L7POLICY_NAME, + action=self.ACTION, + redirect_url=self.REDIRECT_URL, + ) + assert isinstance(test_l7policy, l7_policy.L7Policy) + self.assertEqual(self.L7POLICY_NAME, test_l7policy.name) + self.L7POLICY_ID = test_l7policy.id + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + test_l7rule = self.operator_cloud.load_balancer.create_l7_rule( + l7_policy=self.L7POLICY_ID, + compare_type=self.COMPARE_TYPE, + type=self.L7RULE_TYPE, + value=self.L7RULE_VALUE, + ) + assert isinstance(test_l7rule, l7_rule.L7Rule) + self.assertEqual(self.COMPARE_TYPE, test_l7rule.compare_type) + self.L7RULE_ID = test_l7rule.id + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + def tearDown(self): + self.operator_cloud.load_balancer.get_load_balancer(self.LB_ID) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + self.operator_cloud.load_balancer.delete_quota( + self.PROJECT_ID, ignore_missing=False + ) + + self.operator_cloud.load_balancer.delete_l7_rule( + self.L7RULE_ID, l7_policy=self.L7POLICY_ID, ignore_missing=False + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + self.operator_cloud.load_balancer.delete_l7_policy( + self.L7POLICY_ID, ignore_missing=False + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + self.operator_cloud.load_balancer.delete_health_monitor( + self.HM_ID, ignore_missing=False + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + self.operator_cloud.load_balancer.delete_member( + self.MEMBER_ID, self.POOL_ID, ignore_missing=False + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + self.operator_cloud.load_balancer.delete_pool( + self.POOL_ID, ignore_missing=False + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + self.operator_cloud.load_balancer.delete_listener( + self.LISTENER_ID, ignore_missing=False + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + + self.operator_cloud.load_balancer.delete_load_balancer( + self.LB_ID, ignore_missing=False + ) + super().tearDown() + + self.operator_cloud.load_balancer.delete_flavor( + self.FLAVOR_ID, ignore_missing=False + ) + + self.operator_cloud.load_balancer.delete_flavor_profile( + self.FLAVOR_PROFILE_ID, ignore_missing=False + ) + + self.operator_cloud.load_balancer.delete_availability_zone( + self.AVAILABILITY_ZONE_NAME, ignore_missing=False + ) + + self.operator_cloud.load_balancer.delete_availability_zone_profile( + self.AVAILABILITY_ZONE_PROFILE_ID, ignore_missing=False + ) + + def test_lb_find(self): + test_lb = self.operator_cloud.load_balancer.find_load_balancer( + self.LB_NAME + ) + self.assertEqual(self.LB_ID, test_lb.id) + + def test_lb_get(self): + test_lb = self.operator_cloud.load_balancer.get_load_balancer( + self.LB_ID + ) + self.assertEqual(self.LB_NAME, test_lb.name) + self.assertEqual(self.LB_ID, test_lb.id) + self.assertEqual(self.VIP_SUBNET_ID, test_lb.vip_subnet_id) + + def test_lb_get_stats(self): + test_lb_stats = ( + self.operator_cloud.load_balancer.get_load_balancer_statistics( + self.LB_ID + ) + ) + self.assertEqual(0, test_lb_stats.active_connections) + self.assertEqual(0, test_lb_stats.bytes_in) + self.assertEqual(0, test_lb_stats.bytes_out) + self.assertEqual(0, test_lb_stats.request_errors) + self.assertEqual(0, test_lb_stats.total_connections) + + def test_lb_list(self): + names = [ + lb.name + for lb in self.operator_cloud.load_balancer.load_balancers() + ] + self.assertIn(self.LB_NAME, names) + + def test_lb_update(self): + self.operator_cloud.load_balancer.update_load_balancer( + self.LB_ID, name=self.UPDATE_NAME + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_lb = self.operator_cloud.load_balancer.get_load_balancer( + self.LB_ID + ) + self.assertEqual(self.UPDATE_NAME, test_lb.name) + + self.operator_cloud.load_balancer.update_load_balancer( + self.LB_ID, name=self.LB_NAME + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_lb = self.operator_cloud.load_balancer.get_load_balancer( + self.LB_ID + ) + self.assertEqual(self.LB_NAME, test_lb.name) + + def test_lb_failover(self): + self.operator_cloud.load_balancer.failover_load_balancer(self.LB_ID) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_lb = self.operator_cloud.load_balancer.get_load_balancer( + self.LB_ID + ) + self.assertEqual(self.LB_NAME, test_lb.name) + + def test_listener_find(self): + test_listener = self.operator_cloud.load_balancer.find_listener( + self.LISTENER_NAME + ) + self.assertEqual(self.LISTENER_ID, test_listener.id) + + def test_listener_get(self): + test_listener = self.operator_cloud.load_balancer.get_listener( + self.LISTENER_ID + ) + self.assertEqual(self.LISTENER_NAME, test_listener.name) + self.assertEqual(self.LISTENER_ID, test_listener.id) + self.assertEqual(self.PROTOCOL, test_listener.protocol) + self.assertEqual(self.PROTOCOL_PORT, test_listener.protocol_port) + + def test_listener_get_stats(self): + test_listener_stats = ( + self.operator_cloud.load_balancer.get_listener_statistics( + self.LISTENER_ID + ) + ) + self.assertEqual(0, test_listener_stats.active_connections) + self.assertEqual(0, test_listener_stats.bytes_in) + self.assertEqual(0, test_listener_stats.bytes_out) + self.assertEqual(0, test_listener_stats.request_errors) + self.assertEqual(0, test_listener_stats.total_connections) + + def test_listener_list(self): + names = [ + ls.name for ls in self.operator_cloud.load_balancer.listeners() + ] + self.assertIn(self.LISTENER_NAME, names) + + def test_listener_update(self): + self.operator_cloud.load_balancer.get_load_balancer(self.LB_ID) + + self.operator_cloud.load_balancer.update_listener( + self.LISTENER_ID, name=self.UPDATE_NAME + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_listener = self.operator_cloud.load_balancer.get_listener( + self.LISTENER_ID + ) + self.assertEqual(self.UPDATE_NAME, test_listener.name) + + self.operator_cloud.load_balancer.update_listener( + self.LISTENER_ID, name=self.LISTENER_NAME + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_listener = self.operator_cloud.load_balancer.get_listener( + self.LISTENER_ID + ) + self.assertEqual(self.LISTENER_NAME, test_listener.name) + + def test_pool_find(self): + test_pool = self.operator_cloud.load_balancer.find_pool(self.POOL_NAME) + self.assertEqual(self.POOL_ID, test_pool.id) + + def test_pool_get(self): + test_pool = self.operator_cloud.load_balancer.get_pool(self.POOL_ID) + self.assertEqual(self.POOL_NAME, test_pool.name) + self.assertEqual(self.POOL_ID, test_pool.id) + self.assertEqual(self.PROTOCOL, test_pool.protocol) + + def test_pool_list(self): + names = [ + pool.name for pool in self.operator_cloud.load_balancer.pools() + ] + self.assertIn(self.POOL_NAME, names) + + def test_pool_update(self): + self.operator_cloud.load_balancer.get_load_balancer(self.LB_ID) + + self.operator_cloud.load_balancer.update_pool( + self.POOL_ID, name=self.UPDATE_NAME + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_pool = self.operator_cloud.load_balancer.get_pool(self.POOL_ID) + self.assertEqual(self.UPDATE_NAME, test_pool.name) + + self.operator_cloud.load_balancer.update_pool( + self.POOL_ID, name=self.POOL_NAME + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_pool = self.operator_cloud.load_balancer.get_pool(self.POOL_ID) + self.assertEqual(self.POOL_NAME, test_pool.name) + + def test_member_find(self): + test_member = self.operator_cloud.load_balancer.find_member( + self.MEMBER_NAME, self.POOL_ID + ) + self.assertEqual(self.MEMBER_ID, test_member.id) + + def test_member_get(self): + test_member = self.operator_cloud.load_balancer.get_member( + self.MEMBER_ID, self.POOL_ID + ) + self.assertEqual(self.MEMBER_NAME, test_member.name) + self.assertEqual(self.MEMBER_ID, test_member.id) + self.assertEqual(self.MEMBER_ADDRESS, test_member.address) + self.assertEqual(self.PROTOCOL_PORT, test_member.protocol_port) + self.assertEqual(self.WEIGHT, test_member.weight) + + def test_member_list(self): + names = [ + mb.name + for mb in self.operator_cloud.load_balancer.members(self.POOL_ID) + ] + self.assertIn(self.MEMBER_NAME, names) + + def test_member_update(self): + self.operator_cloud.load_balancer.get_load_balancer(self.LB_ID) + + self.operator_cloud.load_balancer.update_member( + self.MEMBER_ID, self.POOL_ID, name=self.UPDATE_NAME + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_member = self.operator_cloud.load_balancer.get_member( + self.MEMBER_ID, self.POOL_ID + ) + self.assertEqual(self.UPDATE_NAME, test_member.name) + + self.operator_cloud.load_balancer.update_member( + self.MEMBER_ID, self.POOL_ID, name=self.MEMBER_NAME + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_member = self.operator_cloud.load_balancer.get_member( + self.MEMBER_ID, self.POOL_ID + ) + self.assertEqual(self.MEMBER_NAME, test_member.name) + + def test_health_monitor_find(self): + test_hm = self.operator_cloud.load_balancer.find_health_monitor( + self.HM_NAME + ) + self.assertEqual(self.HM_ID, test_hm.id) + + def test_health_monitor_get(self): + test_hm = self.operator_cloud.load_balancer.get_health_monitor( + self.HM_ID + ) + self.assertEqual(self.HM_NAME, test_hm.name) + self.assertEqual(self.HM_ID, test_hm.id) + self.assertEqual(self.DELAY, test_hm.delay) + self.assertEqual(self.TIMEOUT, test_hm.timeout) + self.assertEqual(self.MAX_RETRY, test_hm.max_retries) + self.assertEqual(self.HM_TYPE, test_hm.type) + + def test_health_monitor_list(self): + names = [ + hm.name + for hm in self.operator_cloud.load_balancer.health_monitors() + ] + self.assertIn(self.HM_NAME, names) + + def test_health_monitor_update(self): + self.operator_cloud.load_balancer.get_load_balancer(self.LB_ID) + + self.operator_cloud.load_balancer.update_health_monitor( + self.HM_ID, name=self.UPDATE_NAME + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_hm = self.operator_cloud.load_balancer.get_health_monitor( + self.HM_ID + ) + self.assertEqual(self.UPDATE_NAME, test_hm.name) + + self.operator_cloud.load_balancer.update_health_monitor( + self.HM_ID, name=self.HM_NAME + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_hm = self.operator_cloud.load_balancer.get_health_monitor( + self.HM_ID + ) + self.assertEqual(self.HM_NAME, test_hm.name) + + def test_l7_policy_find(self): + test_l7_policy = self.operator_cloud.load_balancer.find_l7_policy( + self.L7POLICY_NAME + ) + self.assertEqual(self.L7POLICY_ID, test_l7_policy.id) + + def test_l7_policy_get(self): + test_l7_policy = self.operator_cloud.load_balancer.get_l7_policy( + self.L7POLICY_ID + ) + self.assertEqual(self.L7POLICY_NAME, test_l7_policy.name) + self.assertEqual(self.L7POLICY_ID, test_l7_policy.id) + self.assertEqual(self.ACTION, test_l7_policy.action) + + def test_l7_policy_list(self): + names = [ + l7.name for l7 in self.operator_cloud.load_balancer.l7_policies() + ] + self.assertIn(self.L7POLICY_NAME, names) + + def test_l7_policy_update(self): + self.operator_cloud.load_balancer.get_load_balancer(self.LB_ID) + + self.operator_cloud.load_balancer.update_l7_policy( + self.L7POLICY_ID, name=self.UPDATE_NAME + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_l7_policy = self.operator_cloud.load_balancer.get_l7_policy( + self.L7POLICY_ID + ) + self.assertEqual(self.UPDATE_NAME, test_l7_policy.name) + + self.operator_cloud.load_balancer.update_l7_policy( + self.L7POLICY_ID, name=self.L7POLICY_NAME + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_l7_policy = self.operator_cloud.load_balancer.get_l7_policy( + self.L7POLICY_ID + ) + self.assertEqual(self.L7POLICY_NAME, test_l7_policy.name) + + def test_l7_rule_find(self): + test_l7_rule = self.operator_cloud.load_balancer.find_l7_rule( + self.L7RULE_ID, self.L7POLICY_ID + ) + self.assertEqual(self.L7RULE_ID, test_l7_rule.id) + self.assertEqual(self.L7RULE_TYPE, test_l7_rule.type) + + def test_l7_rule_get(self): + test_l7_rule = self.operator_cloud.load_balancer.get_l7_rule( + self.L7RULE_ID, l7_policy=self.L7POLICY_ID + ) + self.assertEqual(self.L7RULE_ID, test_l7_rule.id) + self.assertEqual(self.COMPARE_TYPE, test_l7_rule.compare_type) + self.assertEqual(self.L7RULE_TYPE, test_l7_rule.type) + self.assertEqual(self.L7RULE_VALUE, test_l7_rule.rule_value) + + def test_l7_rule_list(self): + ids = [ + l7.id + for l7 in self.operator_cloud.load_balancer.l7_rules( + l7_policy=self.L7POLICY_ID + ) + ] + self.assertIn(self.L7RULE_ID, ids) + + def test_l7_rule_update(self): + self.operator_cloud.load_balancer.get_load_balancer(self.LB_ID) + + self.operator_cloud.load_balancer.update_l7_rule( + self.L7RULE_ID, + l7_policy=self.L7POLICY_ID, + rule_value=self.UPDATE_NAME, + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_l7_rule = self.operator_cloud.load_balancer.get_l7_rule( + self.L7RULE_ID, l7_policy=self.L7POLICY_ID + ) + self.assertEqual(self.UPDATE_NAME, test_l7_rule.rule_value) + + self.operator_cloud.load_balancer.update_l7_rule( + self.L7RULE_ID, + l7_policy=self.L7POLICY_ID, + rule_value=self.L7RULE_VALUE, + ) + self.operator_cloud.load_balancer.wait_for_load_balancer( + self.LB_ID, wait=self._wait_for_timeout + ) + test_l7_rule = self.operator_cloud.load_balancer.get_l7_rule( + self.L7RULE_ID, + l7_policy=self.L7POLICY_ID, + ) + self.assertEqual(self.L7RULE_VALUE, test_l7_rule.rule_value) + + def test_quota_list(self): + for qot in self.operator_cloud.load_balancer.quotas(): + self.assertIsNotNone(qot.project_id) + + def test_quota_get(self): + test_quota = self.operator_cloud.load_balancer.get_quota( + self.PROJECT_ID + ) + self.assertEqual(self.PROJECT_ID, test_quota.id) + + def test_quota_update(self): + attrs = {'load_balancer': 12345, 'pool': 67890} + for project_quota in self.operator_cloud.load_balancer.quotas(): + self.operator_cloud.load_balancer.update_quota( + project_quota, **attrs + ) + new_quota = self.operator_cloud.load_balancer.get_quota( + project_quota.project_id + ) + self.assertEqual(12345, new_quota.load_balancers) + self.assertEqual(67890, new_quota.pools) + + def test_default_quota(self): + self.operator_cloud.load_balancer.get_quota_default() + + def test_providers(self): + providers = self.operator_cloud.load_balancer.providers() + # Make sure our default provider is in the list + self.assertTrue( + any(prov['name'] == self.AMPHORA for prov in providers) + ) + + def test_provider_flavor_capabilities(self): + capabilities = ( + self.operator_cloud.load_balancer.provider_flavor_capabilities( + self.AMPHORA + ) + ) + # Make sure a known capability is in the default provider + self.assertTrue( + any(cap['name'] == 'loadbalancer_topology' for cap in capabilities) + ) + + def test_flavor_profile_find(self): + test_profile = self.operator_cloud.load_balancer.find_flavor_profile( + self.FLAVOR_PROFILE_NAME + ) + self.assertEqual(self.FLAVOR_PROFILE_ID, test_profile.id) + + def test_flavor_profile_get(self): + test_flavor_profile = ( + self.operator_cloud.load_balancer.get_flavor_profile( + self.FLAVOR_PROFILE_ID + ) + ) + self.assertEqual(self.FLAVOR_PROFILE_NAME, test_flavor_profile.name) + self.assertEqual(self.FLAVOR_PROFILE_ID, test_flavor_profile.id) + self.assertEqual(self.AMPHORA, test_flavor_profile.provider_name) + self.assertEqual(self.FLAVOR_DATA, test_flavor_profile.flavor_data) + + def test_flavor_profile_list(self): + names = [ + fv.name + for fv in self.operator_cloud.load_balancer.flavor_profiles() + ] + self.assertIn(self.FLAVOR_PROFILE_NAME, names) + + def test_flavor_profile_update(self): + self.operator_cloud.load_balancer.update_flavor_profile( + self.FLAVOR_PROFILE_ID, name=self.UPDATE_NAME + ) + test_flavor_profile = ( + self.operator_cloud.load_balancer.get_flavor_profile( + self.FLAVOR_PROFILE_ID + ) + ) + self.assertEqual(self.UPDATE_NAME, test_flavor_profile.name) + + self.operator_cloud.load_balancer.update_flavor_profile( + self.FLAVOR_PROFILE_ID, name=self.FLAVOR_PROFILE_NAME + ) + test_flavor_profile = ( + self.operator_cloud.load_balancer.get_flavor_profile( + self.FLAVOR_PROFILE_ID + ) + ) + self.assertEqual(self.FLAVOR_PROFILE_NAME, test_flavor_profile.name) + + def test_flavor_find(self): + test_flavor = self.operator_cloud.load_balancer.find_flavor( + self.FLAVOR_NAME + ) + self.assertEqual(self.FLAVOR_ID, test_flavor.id) + + def test_flavor_get(self): + test_flavor = self.operator_cloud.load_balancer.get_flavor( + self.FLAVOR_ID + ) + self.assertEqual(self.FLAVOR_NAME, test_flavor.name) + self.assertEqual(self.FLAVOR_ID, test_flavor.id) + self.assertEqual(self.DESCRIPTION, test_flavor.description) + self.assertEqual(self.FLAVOR_PROFILE_ID, test_flavor.flavor_profile_id) + + def test_flavor_list(self): + names = [fv.name for fv in self.operator_cloud.load_balancer.flavors()] + self.assertIn(self.FLAVOR_NAME, names) + + def test_flavor_update(self): + self.operator_cloud.load_balancer.update_flavor( + self.FLAVOR_ID, name=self.UPDATE_NAME + ) + test_flavor = self.operator_cloud.load_balancer.get_flavor( + self.FLAVOR_ID + ) + self.assertEqual(self.UPDATE_NAME, test_flavor.name) + + self.operator_cloud.load_balancer.update_flavor( + self.FLAVOR_ID, name=self.FLAVOR_NAME + ) + test_flavor = self.operator_cloud.load_balancer.get_flavor( + self.FLAVOR_ID + ) + self.assertEqual(self.FLAVOR_NAME, test_flavor.name) + + def test_amphora_list(self): + amp_ids = [ + amp.id for amp in self.operator_cloud.load_balancer.amphorae() + ] + self.assertIn(self.AMPHORA_ID, amp_ids) + + def test_amphora_find(self): + test_amphora = self.operator_cloud.load_balancer.find_amphora( + self.AMPHORA_ID + ) + self.assertEqual(self.AMPHORA_ID, test_amphora.id) + + def test_amphora_get(self): + test_amphora = self.operator_cloud.load_balancer.get_amphora( + self.AMPHORA_ID + ) + self.assertEqual(self.AMPHORA_ID, test_amphora.id) + + def test_amphora_configure(self): + self.operator_cloud.load_balancer.configure_amphora(self.AMPHORA_ID) + test_amp = self.operator_cloud.load_balancer.get_amphora( + self.AMPHORA_ID + ) + self.assertEqual(self.AMPHORA_ID, test_amp.id) + + def test_amphora_failover(self): + self.operator_cloud.load_balancer.failover_amphora(self.AMPHORA_ID) + test_amp = self.operator_cloud.load_balancer.get_amphora( + self.AMPHORA_ID + ) + self.assertEqual(self.AMPHORA_ID, test_amp.id) + + def test_availability_zone_profile_find(self): + test_profile = ( + self.operator_cloud.load_balancer.find_availability_zone_profile( + self.AVAILABILITY_ZONE_PROFILE_NAME + ) + ) + self.assertEqual(self.AVAILABILITY_ZONE_PROFILE_ID, test_profile.id) + + def test_availability_zone_profile_get(self): + test_availability_zone_profile = ( + self.operator_cloud.load_balancer.get_availability_zone_profile( + self.AVAILABILITY_ZONE_PROFILE_ID + ) + ) + self.assertEqual( + self.AVAILABILITY_ZONE_PROFILE_NAME, + test_availability_zone_profile.name, + ) + self.assertEqual( + self.AVAILABILITY_ZONE_PROFILE_ID, + test_availability_zone_profile.id, + ) + self.assertEqual( + self.AMPHORA, test_availability_zone_profile.provider_name + ) + self.assertEqual( + self.AVAILABILITY_ZONE_DATA, + test_availability_zone_profile.availability_zone_data, + ) + + def test_availability_zone_profile_list(self): + names = [ + az.name + for az in self.operator_cloud.load_balancer.availability_zone_profiles() # noqa: E501 + ] + self.assertIn(self.AVAILABILITY_ZONE_PROFILE_NAME, names) + + def test_availability_zone_profile_update(self): + self.operator_cloud.load_balancer.update_availability_zone_profile( + self.AVAILABILITY_ZONE_PROFILE_ID, name=self.UPDATE_NAME + ) + test_availability_zone_profile = ( + self.operator_cloud.load_balancer.get_availability_zone_profile( + self.AVAILABILITY_ZONE_PROFILE_ID + ) + ) + self.assertEqual(self.UPDATE_NAME, test_availability_zone_profile.name) + + self.operator_cloud.load_balancer.update_availability_zone_profile( + self.AVAILABILITY_ZONE_PROFILE_ID, + name=self.AVAILABILITY_ZONE_PROFILE_NAME, + ) + test_availability_zone_profile = ( + self.operator_cloud.load_balancer.get_availability_zone_profile( + self.AVAILABILITY_ZONE_PROFILE_ID + ) + ) + self.assertEqual( + self.AVAILABILITY_ZONE_PROFILE_NAME, + test_availability_zone_profile.name, + ) + + def test_availability_zone_find(self): + test_availability_zone = ( + self.operator_cloud.load_balancer.find_availability_zone( + self.AVAILABILITY_ZONE_NAME + ) + ) + self.assertEqual( + self.AVAILABILITY_ZONE_NAME, test_availability_zone.name + ) + + def test_availability_zone_get(self): + test_availability_zone = ( + self.operator_cloud.load_balancer.get_availability_zone( + self.AVAILABILITY_ZONE_NAME + ) + ) + self.assertEqual( + self.AVAILABILITY_ZONE_NAME, test_availability_zone.name + ) + self.assertEqual(self.DESCRIPTION, test_availability_zone.description) + self.assertEqual( + self.AVAILABILITY_ZONE_PROFILE_ID, + test_availability_zone.availability_zone_profile_id, + ) + + def test_availability_zone_list(self): + names = [ + az.name + for az in self.operator_cloud.load_balancer.availability_zones() + ] + self.assertIn(self.AVAILABILITY_ZONE_NAME, names) + + def test_availability_zone_update(self): + self.operator_cloud.load_balancer.update_availability_zone( + self.AVAILABILITY_ZONE_NAME, description=self.UPDATE_DESCRIPTION + ) + test_availability_zone = ( + self.operator_cloud.load_balancer.get_availability_zone( + self.AVAILABILITY_ZONE_NAME + ) + ) + self.assertEqual( + self.UPDATE_DESCRIPTION, test_availability_zone.description + ) + + self.operator_cloud.load_balancer.update_availability_zone( + self.AVAILABILITY_ZONE_NAME, description=self.DESCRIPTION + ) + test_availability_zone = ( + self.operator_cloud.load_balancer.get_availability_zone( + self.AVAILABILITY_ZONE_NAME + ) + ) + self.assertEqual(self.DESCRIPTION, test_availability_zone.description) diff --git a/openstack/tests/functional/network/v2/common.py b/openstack/tests/functional/network/v2/common.py new file mode 100644 index 0000000000..cbabe01063 --- /dev/null +++ b/openstack/tests/functional/network/v2/common.py @@ -0,0 +1,112 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack import exceptions +from openstack.tests.functional import base + + +# NOTE: method to make mypy happy. +def _get_command(*args): + return mock.Mock() + + +class TestTagNeutron(base.BaseFunctionalTest): + get_command = _get_command + + def test_set_tags(self): + sot = self.get_command(self.ID) + self.assertEqual([], sot.tags) + + self.user_cloud.network.set_tags(sot, ["blue"]) + sot = self.get_command(self.ID) + self.assertEqual(["blue"], sot.tags) + + self.user_cloud.network.set_tags(sot, []) + sot = self.get_command(self.ID) + self.assertEqual([], sot.tags) + + def test_get_tags(self): + sot = self.get_command(self.ID) + self.assertEqual([], sot.tags) + + self.user_cloud.network.set_tags(sot, ["blue", "red"]) + tags = self.user_cloud.network.get_tags(sot) + self.assertEqual(["blue", "red"], tags) + + def test_add_tag(self): + sot = self.get_command(self.ID) + self.assertEqual([], sot.tags) + + self.user_cloud.network.add_tag(sot, "blue") + tags = self.user_cloud.network.get_tags(sot) + self.assertEqual(["blue"], tags) + + # The operation is idempotent. + self.user_cloud.network.add_tag(sot, "blue") + tags = self.user_cloud.network.get_tags(sot) + self.assertEqual(["blue"], tags) + + def test_remove_tag(self): + sot = self.get_command(self.ID) + self.assertEqual([], sot.tags) + + self.user_cloud.network.set_tags(sot, ["blue"]) + tags = self.user_cloud.network.get_tags(sot) + self.assertEqual(["blue"], tags) + + self.user_cloud.network.remove_tag(sot, "blue") + tags = self.user_cloud.network.get_tags(sot) + self.assertEqual([], tags) + + # The operation is not idempotent. + self.assertRaises( + exceptions.NotFoundException, + self.user_cloud.network.remove_tag, + sot, + "blue", + ) + + def test_remove_all_tags(self): + sot = self.get_command(self.ID) + self.assertEqual([], sot.tags) + + self.user_cloud.network.set_tags(sot, ["blue", "red"]) + sot = self.get_command(self.ID) + self.assertEqual(["blue", "red"], sot.tags) + + self.user_cloud.network.remove_all_tags(sot) + sot = self.get_command(self.ID) + self.assertEqual([], sot.tags) + + def test_add_tags(self): + # Skip the test if tag-creation extension is not enabled. + if not self.user_cloud.network.find_extension("tag-creation"): + self.skipTest("Network tag-creation extension disabled") + + sot = self.get_command(self.ID) + self.assertEqual([], sot.tags) + + self.user_cloud.network.add_tags(sot, ["red", "green"]) + self.user_cloud.network.add_tags(sot, ["blue", "yellow"]) + sot = self.get_command(self.ID) + self.assertEqual(["blue", "green", "red", "yellow"], sot.tags) + + # The operation is idempotent. + self.user_cloud.network.add_tags(sot, ["blue", "yellow"]) + sot = self.get_command(self.ID) + self.assertEqual(["blue", "green", "red", "yellow"], sot.tags) + + self.user_cloud.network.add_tags(sot, []) + sot = self.get_command(self.ID) + self.assertEqual(["blue", "green", "red", "yellow"], sot.tags) diff --git a/openstack/tests/functional/network/v2/test_address_group.py b/openstack/tests/functional/network/v2/test_address_group.py new file mode 100644 index 0000000000..f507c957bf --- /dev/null +++ b/openstack/tests/functional/network/v2/test_address_group.py @@ -0,0 +1,90 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.network.v2 import address_group as _address_group +from openstack.tests.functional import base + + +class TestAddressGroup(base.BaseFunctionalTest): + ADDRESS_GROUP_ID = None + ADDRESSES = ["10.0.0.1/32", "2001:db8::/32"] + + def setUp(self): + super().setUp() + + # Skip the tests if address group extension is not enabled. + if not self.user_cloud.network.find_extension("address-group"): + self.skipTest("Network Address Group extension disabled") + + self.ADDRESS_GROUP_NAME = self.getUniqueString() + self.ADDRESS_GROUP_DESCRIPTION = self.getUniqueString() + self.ADDRESS_GROUP_NAME_UPDATED = self.getUniqueString() + self.ADDRESS_GROUP_DESCRIPTION_UPDATED = self.getUniqueString() + address_group = self.user_cloud.network.create_address_group( + name=self.ADDRESS_GROUP_NAME, + description=self.ADDRESS_GROUP_DESCRIPTION, + addresses=self.ADDRESSES, + ) + assert isinstance(address_group, _address_group.AddressGroup) + self.assertEqual(self.ADDRESS_GROUP_NAME, address_group.name) + self.assertEqual( + self.ADDRESS_GROUP_DESCRIPTION, address_group.description + ) + self.assertCountEqual(self.ADDRESSES, address_group.addresses) + self.ADDRESS_GROUP_ID = address_group.id + + def tearDown(self): + sot = self.user_cloud.network.delete_address_group( + self.ADDRESS_GROUP_ID + ) + self.assertIsNone(sot) + super().tearDown() + + def test_find(self): + sot = self.user_cloud.network.find_address_group( + self.ADDRESS_GROUP_NAME + ) + self.assertEqual(self.ADDRESS_GROUP_ID, sot.id) + + def test_get(self): + sot = self.user_cloud.network.get_address_group(self.ADDRESS_GROUP_ID) + self.assertEqual(self.ADDRESS_GROUP_NAME, sot.name) + + def test_list(self): + names = [ag.name for ag in self.user_cloud.network.address_groups()] + self.assertIn(self.ADDRESS_GROUP_NAME, names) + + def test_update(self): + assert self.ADDRESS_GROUP_ID is not None + sot = self.user_cloud.network.update_address_group( + self.ADDRESS_GROUP_ID, + name=self.ADDRESS_GROUP_NAME_UPDATED, + description=self.ADDRESS_GROUP_DESCRIPTION_UPDATED, + ) + self.assertEqual(self.ADDRESS_GROUP_NAME_UPDATED, sot.name) + self.assertEqual( + self.ADDRESS_GROUP_DESCRIPTION_UPDATED, sot.description + ) + + def test_add_remove_addresses(self): + addrs = ["127.0.0.1/32", "fe80::/10"] + sot = self.user_cloud.network.add_addresses_to_address_group( + self.ADDRESS_GROUP_ID, addrs + ) + updated_addrs = self.ADDRESSES.copy() + updated_addrs.extend(addrs) + self.assertCountEqual(updated_addrs, sot.addresses) + sot = self.user_cloud.network.remove_addresses_from_address_group( + self.ADDRESS_GROUP_ID, addrs + ) + self.assertCountEqual(self.ADDRESSES, sot.addresses) diff --git a/openstack/tests/functional/network/v2/test_address_scope.py b/openstack/tests/functional/network/v2/test_address_scope.py index 223f157326..766a5fa97d 100644 --- a/openstack/tests/functional/network/v2/test_address_scope.py +++ b/openstack/tests/functional/network/v2/test_address_scope.py @@ -10,53 +10,54 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid from openstack.network.v2 import address_scope as _address_scope from openstack.tests.functional import base class TestAddressScope(base.BaseFunctionalTest): - ADDRESS_SCOPE_ID = None - ADDRESS_SCOPE_NAME = uuid.uuid4().hex - ADDRESS_SCOPE_NAME_UPDATED = uuid.uuid4().hex IS_SHARED = False IP_VERSION = 4 - @classmethod - def setUpClass(cls): - super(TestAddressScope, cls).setUpClass() - address_scope = cls.conn.network.create_address_scope( - ip_version=cls.IP_VERSION, - name=cls.ADDRESS_SCOPE_NAME, - shared=cls.IS_SHARED, + def setUp(self): + super().setUp() + self.ADDRESS_SCOPE_NAME = self.getUniqueString() + self.ADDRESS_SCOPE_NAME_UPDATED = self.getUniqueString() + address_scope = self.user_cloud.network.create_address_scope( + ip_version=self.IP_VERSION, + name=self.ADDRESS_SCOPE_NAME, + shared=self.IS_SHARED, ) assert isinstance(address_scope, _address_scope.AddressScope) - cls.assertIs(cls.ADDRESS_SCOPE_NAME, address_scope.name) - cls.ADDRESS_SCOPE_ID = address_scope.id + self.assertEqual(self.ADDRESS_SCOPE_NAME, address_scope.name) + self.ADDRESS_SCOPE_ID = address_scope.id - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_address_scope(cls.ADDRESS_SCOPE_ID) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.user_cloud.network.delete_address_scope( + self.ADDRESS_SCOPE_ID + ) + self.assertIsNone(sot) + super().tearDown() def test_find(self): - sot = self.conn.network.find_address_scope(self.ADDRESS_SCOPE_NAME) + sot = self.user_cloud.network.find_address_scope( + self.ADDRESS_SCOPE_NAME + ) self.assertEqual(self.ADDRESS_SCOPE_ID, sot.id) def test_get(self): - sot = self.conn.network.get_address_scope(self.ADDRESS_SCOPE_ID) + sot = self.user_cloud.network.get_address_scope(self.ADDRESS_SCOPE_ID) self.assertEqual(self.ADDRESS_SCOPE_NAME, sot.name) self.assertEqual(self.IS_SHARED, sot.is_shared) self.assertEqual(self.IP_VERSION, sot.ip_version) def test_list(self): - names = [o.name for o in self.conn.network.address_scopes()] + names = [o.name for o in self.user_cloud.network.address_scopes()] self.assertIn(self.ADDRESS_SCOPE_NAME, names) def test_update(self): - sot = self.conn.network.update_address_scope( - self.ADDRESS_SCOPE_ID, - name=self.ADDRESS_SCOPE_NAME_UPDATED) + sot = self.user_cloud.network.update_address_scope( + self.ADDRESS_SCOPE_ID, name=self.ADDRESS_SCOPE_NAME_UPDATED + ) self.assertEqual(self.ADDRESS_SCOPE_NAME_UPDATED, sot.name) diff --git a/openstack/tests/functional/network/v2/test_agent.py b/openstack/tests/functional/network/v2/test_agent.py index 2cc96525bb..21763dc620 100644 --- a/openstack/tests/functional/network/v2/test_agent.py +++ b/openstack/tests/functional/network/v2/test_agent.py @@ -17,9 +17,8 @@ class TestAgent(base.BaseFunctionalTest): - - AGENT = None - DESC = 'test descrition' + AGENT: agent.Agent + DESC = "test description" def validate_uuid(self, s): try: @@ -28,24 +27,29 @@ def validate_uuid(self, s): return False return True - @classmethod - def setUpClass(cls): - super(TestAgent, cls).setUpClass() - agent_list = list(cls.conn.network.agents()) - cls.AGENT = agent_list[0] - assert isinstance(cls.AGENT, agent.Agent) + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension("agent"): + self.skipTest("Neutron agent extension is required for this test") + + agent_list = list(self.user_cloud.network.agents()) + if len(agent_list) == 0: + self.skipTest("No agents available") + self.AGENT = agent_list[0] + assert isinstance(self.AGENT, agent.Agent) def test_list(self): - agent_list = list(self.conn.network.agents()) + agent_list = list(self.user_cloud.network.agents()) self.AGENT = agent_list[0] assert isinstance(self.AGENT, agent.Agent) self.assertTrue(self.validate_uuid(self.AGENT.id)) def test_get(self): - sot = self.conn.network.get_agent(self.AGENT.id) + sot = self.user_cloud.network.get_agent(self.AGENT.id) self.assertEqual(self.AGENT.id, sot.id) def test_update(self): - sot = self.conn.network.update_agent(self.AGENT.id, - description=self.DESC) + sot = self.user_cloud.network.update_agent( + self.AGENT.id, description=self.DESC + ) self.assertEqual(self.DESC, sot.description) diff --git a/openstack/tests/functional/network/v2/test_agent_add_remove_network.py b/openstack/tests/functional/network/v2/test_agent_add_remove_network.py index 4aed6e6dd0..a00f3fba2b 100644 --- a/openstack/tests/functional/network/v2/test_agent_add_remove_network.py +++ b/openstack/tests/functional/network/v2/test_agent_add_remove_network.py @@ -10,54 +10,57 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid - +from openstack.network.v2 import agent from openstack.network.v2 import network from openstack.tests.functional import base class TestAgentNetworks(base.BaseFunctionalTest): + NETWORK_ID: str + AGENT: agent.Agent + AGENT_ID: str - NETWORK_NAME = 'network-name'.join(uuid.uuid4().hex) - NETWORK_ID = None - AGENT = None - AGENT_ID = None - - @classmethod - def setUpClass(cls): - super(TestAgentNetworks, cls).setUpClass() + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension("agent"): + self.skipTest("Neutron agent extension is required for this test") - net = cls.conn.network.create_network(name=cls.NETWORK_NAME) + self.NETWORK_NAME = self.getUniqueString("network") + net = self.user_cloud.network.create_network(name=self.NETWORK_NAME) + self.addCleanup(self.user_cloud.network.delete_network, net.id) assert isinstance(net, network.Network) - cls.NETWORK_ID = net.id - agent_list = list(cls.conn.network.agents()) - agents = [agent for agent in agent_list - if agent.agent_type == 'DHCP agent'] - cls.AGENT = agents[0] - cls.AGENT_ID = cls.AGENT.id - - @classmethod - def tearDownClass(cls): - net = cls.conn.network.delete_router(cls.NETWORK_ID, - ignore_missing=False) - cls.assertIs(None, net) - - def test_add_agent_to_network(self): - net = self.AGENT.add_agent_to_network(self.conn.session, - network_id=self.NETWORK_ID) + self.NETWORK_ID = net.id + agent_list = list(self.user_cloud.network.agents()) + agents = [ + agent for agent in agent_list if agent.agent_type == "DHCP agent" + ] + if len(agent_list) == 0: + self.skipTest("No agents available") + + self.AGENT = agents[0] + self.AGENT_ID = self.AGENT.id + + def test_add_remove_agent(self): + net = self.AGENT.add_agent_to_network( + self.user_cloud.network, network_id=self.NETWORK_ID + ) self._verify_add(net) - def test_remove_agent_from_network(self): - net = self.AGENT.remove_agent_from_network(self.conn.session, - network_id=self.NETWORK_ID) + net = self.AGENT.remove_agent_from_network( + self.user_cloud.network, network_id=self.NETWORK_ID + ) self._verify_remove(net) def _verify_add(self, network): - net = self.conn.network.dhcp_agent_hosting_networks(self.AGENT_ID) + net = self.user_cloud.network.dhcp_agent_hosting_networks( + self.AGENT_ID + ) net_ids = [n.id for n in net] - self.asserIn(self.NETWORK_ID, net_ids) + self.assertIn(self.NETWORK_ID, net_ids) - def _verify_network(self, network): - net = self.conn.network.dhcp_agent_hosting_networks(self.AGENT_ID) + def _verify_remove(self, network): + net = self.user_cloud.network.dhcp_agent_hosting_networks( + self.AGENT_ID + ) net_ids = [n.id for n in net] self.assertNotIn(self.NETWORK_ID, net_ids) diff --git a/openstack/tests/functional/network/v2/test_agent_add_remove_router.py b/openstack/tests/functional/network/v2/test_agent_add_remove_router.py new file mode 100644 index 0000000000..550ddada4a --- /dev/null +++ b/openstack/tests/functional/network/v2/test_agent_add_remove_router.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import agent +from openstack.network.v2 import router +from openstack.tests.functional import base + + +class TestAgentRouters(base.BaseFunctionalTest): + ROUTER: router.Router + AGENT: agent.Agent + + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension("agent"): + self.skipTest("Neutron agent extension is required for this test") + + self.ROUTER_NAME = "router-name-" + self.getUniqueString("router-name") + self.ROUTER = self.user_cloud.network.create_router( + name=self.ROUTER_NAME + ) + self.addCleanup(self.user_cloud.network.delete_router, self.ROUTER) + assert isinstance(self.ROUTER, router.Router) + agent_list = list(self.user_cloud.network.agents()) + agents = [ + agent for agent in agent_list if agent.agent_type == "L3 agent" + ] + if len(agent_list) == 0: + self.skipTest("No agents available") + + self.AGENT = agents[0] + + def test_add_router_to_agent(self): + self.user_cloud.network.add_router_to_agent(self.AGENT, self.ROUTER) + rots = self.user_cloud.network.agent_hosted_routers(self.AGENT) + routers = [router.id for router in rots] + self.assertIn(self.ROUTER.id, routers) + + def test_remove_router_from_agent(self): + self.user_cloud.network.remove_router_from_agent( + self.AGENT, self.ROUTER + ) + rots = self.user_cloud.network.agent_hosted_routers(self.AGENT) + routers = [router.id for router in rots] + self.assertNotIn(self.ROUTER.id, routers) diff --git a/openstack/tests/functional/network/v2/test_auto_allocated_topology.py b/openstack/tests/functional/network/v2/test_auto_allocated_topology.py index 629957b808..ff1bdd636a 100644 --- a/openstack/tests/functional/network/v2/test_auto_allocated_topology.py +++ b/openstack/tests/functional/network/v2/test_auto_allocated_topology.py @@ -14,48 +14,102 @@ class TestAutoAllocatedTopology(base.BaseFunctionalTest): - - NETWORK_NAME = 'auto_allocated_network' + NETWORK_NAME = "auto_allocated_network" NETWORK_ID = None PROJECT_ID = None - @classmethod - def setUpClass(cls): - super(TestAutoAllocatedTopology, cls).setUpClass() - projects = [o.project_id for o in cls.conn.network.networks()] - cls.PROJECT_ID = projects[0] + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") - @classmethod - def tearDownClass(cls): - res = cls.conn.network.delete_auto_allocated_topology(cls.PROJECT_ID) - cls.assertIs(None, res) + if not self.operator_cloud._has_neutron_extension( + "auto-allocated-topology" + ): + self.skipTest( + "Neutron auto-allocated-topology extension is " + "required for this test" + ) + + project = self._create_project() + self.PROJECT_ID = project['id'] + self.test_cloud = self.operator_cloud.connect_as_project(project) - def test_dry_run_option_pass(self): # Dry run will only pass if there is a public network - networks = self.conn.network.networks() - self._set_network_external(networks) + self._set_network_external() + + def tearDown(self): + res = self.test_cloud.network.delete_auto_allocated_topology( + self.PROJECT_ID + ) + self.assertIsNone(res) + self._destroy_project() + super().tearDown() + + def _create_project(self): + project_name = 'auto_allocated_topology_test_project' + project = self.operator_cloud.get_project(project_name) + if not project: + params = { + 'name': project_name, + 'description': ( + 'test project used only for the ' + 'TestAutoAllocatedTopology tests class' + ), + 'domain_id': self.operator_cloud.get_domain('default')['id'], + } + + project = self.operator_cloud.create_project(**params) + + user_id = self.operator_cloud.current_user_id + # Grant the current user access to the project + role_assignment = self.operator_cloud.list_role_assignments( + {'user': user_id, 'project': project['id']} + ) + if not role_assignment: + self.operator_cloud.grant_role( + 'member', user=user_id, project=project['id'], wait=True + ) + return project + + def _destroy_project(self): + self.operator_cloud.revoke_role( + 'member', + user=self.operator_cloud.current_user_id, + project=self.PROJECT_ID, + ) + self.operator_cloud.delete_project(self.PROJECT_ID) + def test_auto_allocated_topology(self): + # First test validation with the 'dry-run' call # Dry run option will return "dry-run=pass" in the 'id' resource - top = self.conn.network.validate_auto_allocated_topology( - self.PROJECT_ID) + top = self.test_cloud.network.validate_auto_allocated_topology( + self.PROJECT_ID + ) self.assertEqual(self.PROJECT_ID, top.project) - self.assertEqual('dry-run=pass', top.id) + self.assertEqual("dry-run=pass", top.id) - def test_show_no_project_option(self): - top = self.conn.network.get_auto_allocated_topology() - project = self.conn.session.get_project_id() - network = self.conn.network.get_network(top.id) + # test show auto_allocated_network without project id in the request + top = self.test_cloud.network.get_auto_allocated_topology() + project = self.test_cloud.session.get_project_id() + network = self.test_cloud.network.get_network(top.id) self.assertEqual(top.project_id, project) self.assertEqual(top.id, network.id) - def test_show_project_option(self): - top = self.conn.network.get_auto_allocated_topology(self.PROJECT_ID) - network = self.conn.network.get_network(top.id) + # test show auto_allocated_network with project id in the request + top = self.test_cloud.network.get_auto_allocated_topology( + self.PROJECT_ID + ) + network = self.test_cloud.network.get_network(top.id) self.assertEqual(top.project_id, network.project_id) self.assertEqual(top.id, network.id) - self.assertEqual(network.name, 'auto_allocated_network') + self.assertEqual(network.name, "auto_allocated_network") - def _set_network_external(self, networks): + def _set_network_external(self): + networks = self.test_cloud.network.networks() for network in networks: - if network.name == 'public': - self.conn.network.update_network(network, is_default=True) + if network.name == "public": + self.test_cloud.network.update_network( + network, is_default=True + ) diff --git a/openstack/tests/functional/network/v2/test_availability_zone.py b/openstack/tests/functional/network/v2/test_availability_zone.py index 64fc1910b9..6828c7c0d4 100644 --- a/openstack/tests/functional/network/v2/test_availability_zone.py +++ b/openstack/tests/functional/network/v2/test_availability_zone.py @@ -10,18 +10,15 @@ # License for the specific language governing permissions and limitations # under the License. -import six from openstack.tests.functional import base class TestAvailabilityZone(base.BaseFunctionalTest): - def test_list(self): - availability_zones = list(self.conn.network.availability_zones()) - self.assertGreater(len(availability_zones), 0) - - for az in availability_zones: - self.assertIsInstance(az.name, six.string_types) - self.assertIsInstance(az.resource, six.string_types) - self.assertIsInstance(az.state, six.string_types) + availability_zones = list(self.user_cloud.network.availability_zones()) + if len(availability_zones) > 0: + for az in availability_zones: + self.assertIsInstance(az.name, str) + self.assertIsInstance(az.resource, str) + self.assertIsInstance(az.state, str) diff --git a/openstack/tests/functional/network/v2/test_bgp.py b/openstack/tests/functional/network/v2/test_bgp.py new file mode 100644 index 0000000000..5c16986202 --- /dev/null +++ b/openstack/tests/functional/network/v2/test_bgp.py @@ -0,0 +1,135 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import bgp_peer as _bgp_peer +from openstack.network.v2 import bgp_speaker as _bgp_speaker +from openstack.tests.functional import base + + +class TestBGPSpeaker(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + self.LOCAL_AS = 101 + self.IP_VERSION = 4 + self.REMOTE_AS = 42 + self.PEER_IP = '172.200.12.3' + self.SPEAKER_NAME = 'my_speaker' + self.getUniqueString() + self.PEER_NAME = 'my_peer' + self.getUniqueString() + + if not self.user_cloud.network.find_extension("bgp"): + self.skipTest("Neutron BGP Dynamic Routing Extension disabled") + + bgp_speaker = self.operator_cloud.network.create_bgp_speaker( + ip_version=self.IP_VERSION, + local_as=self.LOCAL_AS, + name=self.SPEAKER_NAME, + ) + assert isinstance(bgp_speaker, _bgp_speaker.BgpSpeaker) + self.SPEAKER = bgp_speaker + + bgp_peer = self.operator_cloud.network.create_bgp_peer( + name=self.PEER_NAME, + auth_type='none', + remote_as=self.REMOTE_AS, + peer_ip=self.PEER_IP, + ) + assert isinstance(bgp_peer, _bgp_peer.BgpPeer) + self.PEER = bgp_peer + + def tearDown(self): + sot = self.operator_cloud.network.delete_bgp_peer(self.PEER.id) + self.assertIsNone(sot) + sot = self.operator_cloud.network.delete_bgp_speaker(self.SPEAKER.id) + self.assertIsNone(sot) + super().tearDown() + + def test_find_bgp_speaker(self): + sot = self.operator_cloud.network.find_bgp_speaker(self.SPEAKER.name) + self.assertEqual(self.IP_VERSION, sot.ip_version) + self.assertEqual(self.LOCAL_AS, sot.local_as) + # Check defaults + self.assertTrue(sot.advertise_floating_ip_host_routes) + self.assertTrue(sot.advertise_tenant_networks) + + def test_get_bgp_speaker(self): + sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id) + self.assertEqual(self.IP_VERSION, sot.ip_version) + self.assertEqual(self.LOCAL_AS, sot.local_as) + + def test_list_bgp_speakers(self): + speaker_ids = [ + sp.id for sp in self.operator_cloud.network.bgp_speakers() + ] + self.assertIn(self.SPEAKER.id, speaker_ids) + + def test_update_bgp_speaker(self): + sot = self.operator_cloud.network.update_bgp_speaker( + self.SPEAKER.id, advertise_floating_ip_host_routes=False + ) + self.assertFalse(sot.advertise_floating_ip_host_routes) + + def test_find_bgp_peer(self): + sot = self.operator_cloud.network.find_bgp_peer(self.PEER.name) + self.assertEqual(self.PEER_IP, sot.peer_ip) + self.assertEqual(self.REMOTE_AS, sot.remote_as) + + def test_get_bgp_peer(self): + sot = self.operator_cloud.network.get_bgp_peer(self.PEER.id) + self.assertEqual(self.PEER_IP, sot.peer_ip) + self.assertEqual(self.REMOTE_AS, sot.remote_as) + + def test_list_bgp_peers(self): + peer_ids = [pe.id for pe in self.operator_cloud.network.bgp_peers()] + self.assertIn(self.PEER.id, peer_ids) + + def test_update_bgp_peer(self): + name = 'new_peer_name' + self.getUniqueString() + sot = self.operator_cloud.network.update_bgp_peer( + self.PEER.id, name=name + ) + self.assertEqual(name, sot.name) + + def test_add_remove_peer_to_speaker(self): + self.operator_cloud.network.add_bgp_peer_to_speaker( + self.SPEAKER.id, self.PEER.id + ) + sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id) + self.assertEqual([self.PEER.id], sot.peers) + + # Remove the peer + self.operator_cloud.network.remove_bgp_peer_from_speaker( + self.SPEAKER.id, self.PEER.id + ) + sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id) + self.assertEqual([], sot.peers) + + def test_add_remove_gw_network_to_speaker(self): + net_name = 'my_network' + self.getUniqueString() + net = self.user_cloud.create_network(name=net_name) + self.operator_cloud.network.add_gateway_network_to_speaker( + self.SPEAKER.id, net.id + ) + sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id) + self.assertEqual([net.id], sot.networks) + + # Remove the network + self.operator_cloud.network.remove_gateway_network_from_speaker( + self.SPEAKER.id, net.id + ) + sot = self.operator_cloud.network.get_bgp_speaker(self.SPEAKER.id) + self.assertEqual([], sot.networks) + + def test_get_advertised_routes_of_speaker(self): + sot = self.operator_cloud.network.get_advertised_routes_of_speaker( + self.SPEAKER.id + ) + self.assertEqual({'advertised_routes': []}, sot) diff --git a/openstack/tests/functional/network/v2/test_bgpvpn.py b/openstack/tests/functional/network/v2/test_bgpvpn.py new file mode 100644 index 0000000000..1766481823 --- /dev/null +++ b/openstack/tests/functional/network/v2/test_bgpvpn.py @@ -0,0 +1,211 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import bgpvpn as _bgpvpn +from openstack.network.v2 import ( + bgpvpn_network_association as _bgpvpn_net_assoc, +) +from openstack.network.v2 import bgpvpn_port_association as _bgpvpn_port_assoc +from openstack.network.v2 import ( + bgpvpn_router_association as _bgpvpn_router_assoc, +) +from openstack.network.v2 import network as _network +from openstack.network.v2 import port as _port +from openstack.network.v2 import router as _router +from openstack.network.v2 import subnet as _subnet +from openstack.tests.functional import base + + +class TestBGPVPN(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + + self.BGPVPN_NAME = 'my_bgpvpn' + self.getUniqueString() + self.NET_NAME = 'my_net' + self.getUniqueString() + self.SUBNET_NAME = 'my_subnet' + self.getUniqueString() + self.PORT_NAME = 'my_port' + self.getUniqueString() + self.ROUTER_NAME = 'my_router' + self.getUniqueString() + self.CIDR = "10.101.0.0/24" + self.ROUTE_DISTINGUISHERS = ['64512:1777', '64512:1888', '64512:1999'] + self.VNI = 1000 + self.ROUTE_TARGETS = ('64512:1444',) + self.IMPORT_TARGETS = ('64512:1555',) + self.EXPORT_TARGETS = '64512:1666' + self.TYPE = 'l3' + + if not self.user_cloud.network.find_extension("bgpvpn"): + self.skipTest("Neutron BGPVPN Extension disabled") + bgpvpn = self.operator_cloud.network.create_bgpvpn( + name=self.BGPVPN_NAME, + route_distinguishers=self.ROUTE_DISTINGUISHERS, + route_targets=self.ROUTE_TARGETS, + import_targets=self.IMPORT_TARGETS, + export_targets=self.EXPORT_TARGETS, + ) + assert isinstance(bgpvpn, _bgpvpn.BgpVpn) + self.BGPVPN = bgpvpn + + net = self.operator_cloud.network.create_network(name=self.NET_NAME) + assert isinstance(net, _network.Network) + self.NETWORK = net + subnet = self.operator_cloud.network.create_subnet( + name=self.SUBNET_NAME, + ip_version=4, + network_id=self.NETWORK.id, + cidr=self.CIDR, + ) + assert isinstance(subnet, _subnet.Subnet) + self.SUBNET = subnet + + port = self.operator_cloud.network.create_port( + name=self.PORT_NAME, network_id=self.NETWORK.id + ) + assert isinstance(port, _port.Port) + self.PORT = port + + router = self.operator_cloud.network.create_router( + name=self.ROUTER_NAME + ) + assert isinstance(router, _router.Router) + self.ROUTER = router + + net_assoc = ( + self.operator_cloud.network.create_bgpvpn_network_association( + self.BGPVPN, network_id=self.NETWORK.id + ) + ) + assert isinstance( + net_assoc, _bgpvpn_net_assoc.BgpVpnNetworkAssociation + ) + self.NET_ASSOC = net_assoc + + port_assoc = ( + self.operator_cloud.network.create_bgpvpn_port_association( + self.BGPVPN, port_id=self.PORT.id + ) + ) + assert isinstance(port_assoc, _bgpvpn_port_assoc.BgpVpnPortAssociation) + self.PORT_ASSOC = port_assoc + + router_assoc = ( + self.operator_cloud.network.create_bgpvpn_router_association( + self.BGPVPN, router_id=self.ROUTER.id + ) + ) + assert isinstance( + router_assoc, _bgpvpn_router_assoc.BgpVpnRouterAssociation + ) + self.ROUTER_ASSOC = router_assoc + + def tearDown(self): + sot = self.operator_cloud.network.delete_bgpvpn(self.BGPVPN.id) + self.assertIsNone(sot) + sot = self.operator_cloud.network.delete_bgpvpn_network_association( + self.BGPVPN.id, self.NET_ASSOC.id + ) + self.assertIsNone(sot) + + sot = self.operator_cloud.network.delete_bgpvpn_port_association( + self.BGPVPN.id, self.PORT_ASSOC.id + ) + self.assertIsNone(sot) + sot = self.operator_cloud.network.delete_bgpvpn_router_association( + self.BGPVPN.id, self.ROUTER_ASSOC.id + ) + self.assertIsNone(sot) + + sot = self.operator_cloud.network.delete_router(self.ROUTER) + self.assertIsNone(sot) + sot = self.operator_cloud.network.delete_port(self.PORT) + self.assertIsNone(sot) + sot = self.operator_cloud.network.delete_subnet(self.SUBNET) + self.assertIsNone(sot) + sot = self.operator_cloud.network.delete_network(self.NETWORK) + self.assertIsNone(sot) + + super().tearDown() + + def test_find_bgpvpn(self): + sot = self.operator_cloud.network.find_bgpvpn(self.BGPVPN.name) + self.assertEqual(list(self.ROUTE_TARGETS), sot.route_targets) + self.assertEqual(list(self.IMPORT_TARGETS), sot.import_targets) + # Check defaults + self.assertEqual(self.TYPE, sot.type) + + def test_get_bgpvpn(self): + sot = self.operator_cloud.network.get_bgpvpn(self.BGPVPN.id) + self.assertEqual(list(self.ROUTE_TARGETS), sot.route_targets) + self.assertEqual([self.EXPORT_TARGETS], sot.export_targets) + self.assertEqual(list(self.IMPORT_TARGETS), sot.import_targets) + + def test_list_bgpvpns(self): + bgpvpn_ids = [ + bgpvpn.id for bgpvpn in self.operator_cloud.network.bgpvpns() + ] + self.assertIn(self.BGPVPN.id, bgpvpn_ids) + + def test_update_bgpvpn(self): + sot = self.operator_cloud.network.update_bgpvpn( + self.BGPVPN.id, import_targets='64512:1333' + ) + self.assertEqual(['64512:1333'], sot.import_targets) + + def test_get_bgpvpnnetwork_association(self): + sot = self.operator_cloud.network.get_bgpvpn_network_association( + self.BGPVPN.id, self.NET_ASSOC.id + ) + self.assertEqual(self.NETWORK.id, sot.network_id) + + def test_list_bgpvpn_network_associations(self): + net_assoc_ids = [ + net_assoc.id + for net_assoc in ( + self.operator_cloud.network.bgpvpn_network_associations( + self.BGPVPN.id + ) + ) + ] + self.assertIn(self.NET_ASSOC.id, net_assoc_ids) + + def test_get_bgpvpn_port_association(self): + sot = self.operator_cloud.network.get_bgpvpn_port_association( + self.BGPVPN.id, self.PORT_ASSOC.id + ) + self.assertEqual(self.PORT.id, sot.port_id) + + def test_list_bgpvpn_port_associations(self): + port_assoc_ids = [ + port_assoc.id + for port_assoc in ( + self.operator_cloud.network.bgpvpn_port_associations( + self.BGPVPN.id + ) + ) + ] + self.assertIn(self.PORT_ASSOC.id, port_assoc_ids) + + def test_get_bgpvpn_router_association(self): + sot = self.operator_cloud.network.get_bgpvpn_router_association( + self.BGPVPN.id, self.ROUTER_ASSOC.id + ) + self.assertEqual(self.ROUTER.id, sot.router_id) + + def test_list_bgpvpn_router_associations(self): + router_assoc_ids = [ + router_assoc.id + for router_assoc in ( + self.operator_cloud.network.bgpvpn_router_associations( + self.BGPVPN.id + ) + ) + ] + self.assertIn(self.ROUTER_ASSOC.id, router_assoc_ids) diff --git a/openstack/tests/functional/network/v2/test_default_security_group_rule.py b/openstack/tests/functional/network/v2/test_default_security_group_rule.py new file mode 100644 index 0000000000..3391fa77a8 --- /dev/null +++ b/openstack/tests/functional/network/v2/test_default_security_group_rule.py @@ -0,0 +1,83 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import random + +from openstack.network.v2 import default_security_group_rule +from openstack.tests.functional import base + + +class TestDefaultSecurityGroupRule(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension( + "security-groups-default-rules" + ): + self.skipTest( + "Neutron security-groups-default-rules extension " + "is required for this test" + ) + + self.IPV4 = random.choice(["IPv4", "IPv6"]) + self.PROTO = random.choice(["tcp", "udp"]) + self.PORT = random.randint(1, 65535) + self.DIR = random.choice(["ingress", "egress"]) + self.USED_IN_DEFAULT_SG = random.choice([True, False]) + self.USED_IN_NON_DEFAULT_SG = random.choice([True, False]) + + rul = self.operator_cloud.network.create_default_security_group_rule( + direction=self.DIR, + ethertype=self.IPV4, + port_range_max=self.PORT, + port_range_min=self.PORT, + protocol=self.PROTO, + used_in_default_sg=self.USED_IN_DEFAULT_SG, + used_in_non_default_sg=self.USED_IN_NON_DEFAULT_SG, + ) + assert isinstance( + rul, default_security_group_rule.DefaultSecurityGroupRule + ) + self.RULE_ID = rul.id + + def tearDown(self): + sot = self.operator_cloud.network.delete_default_security_group_rule( + self.RULE_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def test_find(self): + sot = self.operator_cloud.network.find_default_security_group_rule( + self.RULE_ID + ) + self.assertEqual(self.RULE_ID, sot.id) + + def test_get(self): + sot = self.operator_cloud.network.get_default_security_group_rule( + self.RULE_ID + ) + self.assertEqual(self.RULE_ID, sot.id) + self.assertEqual(self.DIR, sot.direction) + self.assertEqual(self.PROTO, sot.protocol) + self.assertEqual(self.PORT, sot.port_range_min) + self.assertEqual(self.PORT, sot.port_range_max) + self.assertEqual(self.USED_IN_DEFAULT_SG, sot.used_in_default_sg) + self.assertEqual( + self.USED_IN_NON_DEFAULT_SG, sot.used_in_non_default_sg + ) + + def test_list(self): + ids = [ + o.id + for o in self.operator_cloud.network.default_security_group_rules() + ] + self.assertIn(self.RULE_ID, ids) diff --git a/openstack/tests/functional/network/v2/test_dvr_router.py b/openstack/tests/functional/network/v2/test_dvr_router.py index e93231f101..899b939677 100644 --- a/openstack/tests/functional/network/v2/test_dvr_router.py +++ b/openstack/tests/functional/network/v2/test_dvr_router.py @@ -10,47 +10,57 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid from openstack.network.v2 import router from openstack.tests.functional import base class TestDVRRouter(base.BaseFunctionalTest): - - NAME = uuid.uuid4().hex - UPDATE_NAME = uuid.uuid4().hex ID = None - @classmethod - def setUpClass(cls): - super(TestDVRRouter, cls).setUpClass() - sot = cls.conn.network.create_router(name=cls.NAME, distributed=True) + def setUp(self): + super().setUp() + if not self.operator_cloud: + # Current policies forbid regular user use it + self.skipTest("Operator cloud is required for this test") + + if not self.operator_cloud._has_neutron_extension("dvr"): + self.skipTest("dvr service not supported by cloud") + + self.NAME = self.getUniqueString() + self.UPDATE_NAME = self.getUniqueString() + sot = self.operator_cloud.network.create_router( + name=self.NAME, distributed=True + ) assert isinstance(sot, router.Router) - cls.assertIs(cls.NAME, sot.name) - cls.ID = sot.id + self.assertEqual(self.NAME, sot.name) + self.ID = sot.id - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_router(cls.ID, ignore_missing=False) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.operator_cloud.network.delete_router( + self.ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() def test_find(self): - sot = self.conn.network.find_router(self.NAME) + sot = self.operator_cloud.network.find_router(self.NAME) self.assertEqual(self.ID, sot.id) def test_get(self): - sot = self.conn.network.get_router(self.ID) + sot = self.operator_cloud.network.get_router(self.ID) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.ID, sot.id) self.assertTrue(sot.is_distributed) def test_list(self): - names = [o.name for o in self.conn.network.routers()] + names = [o.name for o in self.operator_cloud.network.routers()] self.assertIn(self.NAME, names) - dvr = [o.is_distributed for o in self.conn.network.routers()] + dvr = [o.is_distributed for o in self.operator_cloud.network.routers()] self.assertTrue(dvr) def test_update(self): - sot = self.conn.network.update_router(self.ID, name=self.UPDATE_NAME) + sot = self.operator_cloud.network.update_router( + self.ID, name=self.UPDATE_NAME + ) self.assertEqual(self.UPDATE_NAME, sot.name) diff --git a/openstack/tests/functional/network/v2/test_extension.py b/openstack/tests/functional/network/v2/test_extension.py index 98134cb6e0..16bbcf43d0 100644 --- a/openstack/tests/functional/network/v2/test_extension.py +++ b/openstack/tests/functional/network/v2/test_extension.py @@ -10,21 +10,19 @@ # License for the specific language governing permissions and limitations # under the License. -import six from openstack.tests.functional import base class TestExtension(base.BaseFunctionalTest): - def test_list(self): - extensions = list(self.conn.network.extensions()) + extensions = list(self.user_cloud.network.extensions()) self.assertGreater(len(extensions), 0) for ext in extensions: - self.assertIsInstance(ext.name, six.string_types) - self.assertIsInstance(ext.alias, six.string_types) + self.assertIsInstance(ext.name, str) + self.assertIsInstance(ext.alias, str) def test_find(self): - extension = self.conn.network.find_extension('external-net') - self.assertEqual('Neutron external network', extension.name) + extension = self.user_cloud.network.find_extension("external-net") + self.assertEqual("Neutron external network", extension.name) diff --git a/openstack/tests/functional/network/v2/test_firewall_group.py b/openstack/tests/functional/network/v2/test_firewall_group.py new file mode 100644 index 0000000000..8592149d1c --- /dev/null +++ b/openstack/tests/functional/network/v2/test_firewall_group.py @@ -0,0 +1,52 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.network.v2 import firewall_group +from openstack.tests.functional import base + + +class TestFirewallGroup(base.BaseFunctionalTest): + ID = None + + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension("fwaas_v2"): + self.skipTest("fwaas_v2 service not supported by cloud") + self.NAME = self.getUniqueString() + sot = self.user_cloud.network.create_firewall_group(name=self.NAME) + assert isinstance(sot, firewall_group.FirewallGroup) + self.assertEqual(self.NAME, sot.name) + self.ID = sot.id + + def tearDown(self): + sot = self.user_cloud.network.delete_firewall_group( + self.ID, ignore_missing=False + ) + self.assertIs(None, sot) + super().tearDown() + + def test_find(self): + sot = self.user_cloud.network.find_firewall_group(self.NAME) + self.assertEqual(self.ID, sot.id) + + def test_get(self): + sot = self.user_cloud.network.get_firewall_group(self.ID) + self.assertEqual(self.NAME, sot.name) + self.assertEqual(self.ID, sot.id) + + def test_list(self): + names = [o.name for o in self.user_cloud.network.firewall_groups()] + self.assertIn(self.NAME, names) diff --git a/openstack/tests/functional/network/v2/test_firewall_policy.py b/openstack/tests/functional/network/v2/test_firewall_policy.py new file mode 100644 index 0000000000..c1520d162f --- /dev/null +++ b/openstack/tests/functional/network/v2/test_firewall_policy.py @@ -0,0 +1,52 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.network.v2 import firewall_policy +from openstack.tests.functional import base + + +class TestFirewallPolicy(base.BaseFunctionalTest): + ID = None + + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension("fwaas_v2"): + self.skipTest("fwaas_v2 service not supported by cloud") + self.NAME = self.getUniqueString() + sot = self.user_cloud.network.create_firewall_policy(name=self.NAME) + assert isinstance(sot, firewall_policy.FirewallPolicy) + self.assertEqual(self.NAME, sot.name) + self.ID = sot.id + + def tearDown(self): + sot = self.user_cloud.network.delete_firewall_policy( + self.ID, ignore_missing=False + ) + self.assertIs(None, sot) + super().tearDown() + + def test_find(self): + sot = self.user_cloud.network.find_firewall_policy(self.NAME) + self.assertEqual(self.ID, sot.id) + + def test_get(self): + sot = self.user_cloud.network.get_firewall_policy(self.ID) + self.assertEqual(self.NAME, sot.name) + self.assertEqual(self.ID, sot.id) + + def test_list(self): + names = [o.name for o in self.user_cloud.network.firewall_policies()] + self.assertIn(self.NAME, names) diff --git a/openstack/tests/functional/network/v2/test_firewall_rule.py b/openstack/tests/functional/network/v2/test_firewall_rule.py new file mode 100644 index 0000000000..8c06d17262 --- /dev/null +++ b/openstack/tests/functional/network/v2/test_firewall_rule.py @@ -0,0 +1,74 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.network.v2 import firewall_rule +from openstack.tests.functional import base + + +class TestFirewallRule(base.BaseFunctionalTest): + ACTION = "allow" + DEST_IP = "10.0.0.0/24" + DEST_PORT = "80" + IP_VERSION = 4 + PROTOCOL = "tcp" + SOUR_IP = "10.0.1.0/24" + SOUR_PORT = "8000" + ID = None + + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension("fwaas_v2"): + self.skipTest("fwaas_v2 service not supported by cloud") + self.NAME = self.getUniqueString() + sot = self.user_cloud.network.create_firewall_rule( + name=self.NAME, + action=self.ACTION, + source_port=self.SOUR_PORT, + destination_port=self.DEST_PORT, + source_ip_address=self.SOUR_IP, + destination_ip_address=self.DEST_IP, + ip_version=self.IP_VERSION, + protocol=self.PROTOCOL, + ) + assert isinstance(sot, firewall_rule.FirewallRule) + self.assertEqual(self.NAME, sot.name) + self.ID = sot.id + + def tearDown(self): + sot = self.user_cloud.network.delete_firewall_rule( + self.ID, ignore_missing=False + ) + self.assertIs(None, sot) + super().tearDown() + + def test_find(self): + sot = self.user_cloud.network.find_firewall_rule(self.NAME) + self.assertEqual(self.ID, sot.id) + + def test_get(self): + sot = self.user_cloud.network.get_firewall_rule(self.ID) + self.assertEqual(self.ID, sot.id) + self.assertEqual(self.NAME, sot.name) + self.assertEqual(self.ACTION, sot.action) + self.assertEqual(self.DEST_IP, sot.destination_ip_address) + self.assertEqual(self.DEST_PORT, sot.destination_port) + self.assertEqual(self.IP_VERSION, sot.ip_version) + self.assertEqual(self.SOUR_IP, sot.source_ip_address) + self.assertEqual(self.SOUR_PORT, sot.source_port) + + def test_list(self): + ids = [o.id for o in self.user_cloud.network.firewall_rules()] + self.assertIn(self.ID, ids) diff --git a/openstack/tests/functional/network/v2/test_firewall_rule_insert_remove_policy.py b/openstack/tests/functional/network/v2/test_firewall_rule_insert_remove_policy.py new file mode 100644 index 0000000000..72bef3b3c3 --- /dev/null +++ b/openstack/tests/functional/network/v2/test_firewall_rule_insert_remove_policy.py @@ -0,0 +1,102 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.network.v2 import firewall_policy +from openstack.network.v2 import firewall_rule +from openstack.tests.functional import base + + +class TestFirewallPolicyRuleAssociations(base.BaseFunctionalTest): + POLICY_NAME = uuid.uuid4().hex + RULE1_NAME = uuid.uuid4().hex + RULE2_NAME = uuid.uuid4().hex + POLICY_ID = None + RULE1_ID = None + RULE2_ID = None + + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension("fwaas_v2"): + self.skipTest("fwaas_v2 service not supported by cloud") + rul1 = self.user_cloud.network.create_firewall_rule( + name=self.RULE1_NAME + ) + assert isinstance(rul1, firewall_rule.FirewallRule) + self.assertEqual(self.RULE1_NAME, rul1.name) + rul2 = self.user_cloud.network.create_firewall_rule( + name=self.RULE2_NAME + ) + assert isinstance(rul2, firewall_rule.FirewallRule) + self.assertEqual(self.RULE2_NAME, rul2.name) + pol = self.user_cloud.network.create_firewall_policy( + name=self.POLICY_NAME + ) + assert isinstance(pol, firewall_policy.FirewallPolicy) + self.assertEqual(self.POLICY_NAME, pol.name) + self.RULE1_ID = rul1.id + self.RULE2_ID = rul2.id + self.POLICY_ID = pol.id + + def tearDown(self): + sot = self.user_cloud.network.delete_firewall_policy( + self.POLICY_ID, ignore_missing=False + ) + self.assertIs(None, sot) + sot = self.user_cloud.network.delete_firewall_rule( + self.RULE1_ID, ignore_missing=False + ) + self.assertIs(None, sot) + sot = self.user_cloud.network.delete_firewall_rule( + self.RULE2_ID, ignore_missing=False + ) + self.assertIs(None, sot) + super().tearDown() + + def test_insert_rule_into_policy(self): + policy = self.user_cloud.network.insert_rule_into_policy( + self.POLICY_ID, firewall_rule_id=self.RULE1_ID + ) + self.assertIn(self.RULE1_ID, policy["firewall_rules"]) + policy = self.user_cloud.network.insert_rule_into_policy( + self.POLICY_ID, + firewall_rule_id=self.RULE2_ID, + insert_before=self.RULE1_ID, + ) + self.assertEqual(self.RULE1_ID, policy["firewall_rules"][1]) + self.assertEqual(self.RULE2_ID, policy["firewall_rules"][0]) + + def test_remove_rule_from_policy(self): + # insert rules into policy before we remove it again + policy = self.user_cloud.network.insert_rule_into_policy( + self.POLICY_ID, firewall_rule_id=self.RULE1_ID + ) + self.assertIn(self.RULE1_ID, policy["firewall_rules"]) + + policy = self.user_cloud.network.insert_rule_into_policy( + self.POLICY_ID, firewall_rule_id=self.RULE2_ID + ) + self.assertIn(self.RULE2_ID, policy["firewall_rules"]) + + policy = self.user_cloud.network.remove_rule_from_policy( + self.POLICY_ID, firewall_rule_id=self.RULE1_ID + ) + self.assertNotIn(self.RULE1_ID, policy["firewall_rules"]) + + policy = self.user_cloud.network.remove_rule_from_policy( + self.POLICY_ID, firewall_rule_id=self.RULE2_ID + ) + self.assertNotIn(self.RULE2_ID, policy["firewall_rules"]) diff --git a/openstack/tests/functional/network/v2/test_flavor.py b/openstack/tests/functional/network/v2/test_flavor.py index 2d04bd05ce..f366c85dbb 100644 --- a/openstack/tests/functional/network/v2/test_flavor.py +++ b/openstack/tests/functional/network/v2/test_flavor.py @@ -10,49 +10,93 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid - from openstack.network.v2 import flavor from openstack.tests.functional import base class TestFlavor(base.BaseFunctionalTest): - - FLAVOR_NAME = uuid.uuid4().hex UPDATE_NAME = "UPDATED-NAME" SERVICE_TYPE = "FLAVORS" ID = None - @classmethod - def setUpClass(cls): - super(TestFlavor, cls).setUpClass() - flavors = cls.conn.network.create_flavor(name=cls.FLAVOR_NAME, - service_type=cls.SERVICE_TYPE) - assert isinstance(flavors, flavor.Flavor) - cls.assertIs(cls.FLAVOR_NAME, flavors.name) - cls.assertIs(cls.SERVICE_TYPE, flavors.service_type) + SERVICE_PROFILE_DESCRIPTION = "DESCRIPTION" + METAINFO = "FlAVOR_PROFILE_METAINFO" + + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension("flavors"): + self.skipTest("Neutron flavor extension is required for this test") + + self.FLAVOR_NAME = self.getUniqueString("flavor") + if self.operator_cloud: + flavors = self.operator_cloud.network.create_flavor( + name=self.FLAVOR_NAME, service_type=self.SERVICE_TYPE + ) + assert isinstance(flavors, flavor.Flavor) + self.assertEqual(self.FLAVOR_NAME, flavors.name) + self.assertEqual(self.SERVICE_TYPE, flavors.service_type) + + self.ID = flavors.id - cls.ID = flavors.id + self.service_profiles = ( + self.operator_cloud.network.create_service_profile( + description=self.SERVICE_PROFILE_DESCRIPTION, + metainfo=self.METAINFO, + ) + ) - @classmethod - def tearDownClass(cls): - flavors = cls.conn.network.delete_flavor(cls.ID, ignore_missing=True) - cls.assertIs(None, flavors) + def tearDown(self): + if self.operator_cloud and self.ID: + flavors = self.operator_cloud.network.delete_flavor( + self.ID, ignore_missing=True + ) + self.assertIsNone(flavors) + + service_profiles = self.user_cloud.network.delete_service_profile( + self.ID, ignore_missing=True + ) + self.assertIsNone(service_profiles) + super().tearDown() def test_find(self): - flavors = self.conn.network.find_flavor(self.FLAVOR_NAME) - self.assertEqual(self.ID, flavors.id) + if self.ID: + flavors = self.user_cloud.network.find_flavor(self.FLAVOR_NAME) + self.assertEqual(self.ID, flavors.id) + else: + self.user_cloud.network.find_flavor("definitely_missing") def test_get(self): - flavors = self.conn.network.get_flavor(self.ID) + if not self.ID: + self.skipTest("Operator cloud required for this test") + + flavors = self.user_cloud.network.get_flavor(self.ID) self.assertEqual(self.FLAVOR_NAME, flavors.name) self.assertEqual(self.ID, flavors.id) def test_list(self): - names = [f.name for f in self.conn.network.flavors()] - self.assertIn(self.FLAVOR_NAME, names) + names = [f.name for f in self.user_cloud.network.flavors()] + if self.ID: + self.assertIn(self.FLAVOR_NAME, names) def test_update(self): - flavor = self.conn.network.update_flavor(self.ID, - name=self.UPDATE_NAME) + if not self.operator_cloud: + self.skipTest("Operator cloud required for this test") + flavor = self.operator_cloud.network.update_flavor( + self.ID, name=self.UPDATE_NAME + ) self.assertEqual(self.UPDATE_NAME, flavor.name) + + def test_associate_disassociate_flavor_with_service_profile(self): + if not self.operator_cloud: + self.skipTest("Operator cloud required for this test") + response = ( + self.operator_cloud.network.associate_flavor_with_service_profile( + self.ID, self.service_profiles.id + ) + ) + self.assertIsNotNone(response) + + response = self.operator_cloud.network.disassociate_flavor_from_service_profile( # noqa: E501 + self.ID, self.service_profiles.id + ) + self.assertIsNone(response) diff --git a/openstack/tests/functional/network/v2/test_floating_ip.py b/openstack/tests/functional/network/v2/test_floating_ip.py index de6b6ed576..aa489519a3 100644 --- a/openstack/tests/functional/network/v2/test_floating_ip.py +++ b/openstack/tests/functional/network/v2/test_floating_ip.py @@ -9,138 +9,200 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -import uuid +# mypy: disable-error-code="method-assign" from openstack.network.v2 import floating_ip from openstack.network.v2 import network from openstack.network.v2 import port from openstack.network.v2 import router from openstack.network.v2 import subnet -from openstack.tests.functional import base - +from openstack.tests.functional.network.v2 import common -class TestFloatingIP(base.BaseFunctionalTest): - ROT_NAME = uuid.uuid4().hex - EXT_NET_NAME = uuid.uuid4().hex - EXT_SUB_NAME = uuid.uuid4().hex - INT_NET_NAME = uuid.uuid4().hex - INT_SUB_NAME = uuid.uuid4().hex +class TestFloatingIP(common.TestTagNeutron): IPV4 = 4 EXT_CIDR = "10.100.0.0/24" INT_CIDR = "10.101.0.0/24" - EXT_NET_ID = None - INT_NET_ID = None - EXT_SUB_ID = None - INT_SUB_ID = None - ROT_ID = None - PORT_ID = None - FIP_ID = None - - @classmethod - def setUpClass(cls): - super(TestFloatingIP, cls).setUpClass() - # Create Exeternal Network - args = {'router:external': True} - net = cls._create_network(cls.EXT_NET_NAME, **args) - cls.EXT_NET_ID = net.id - sub = cls._create_subnet(cls.EXT_SUB_NAME, cls.EXT_NET_ID, - cls.EXT_CIDR) - cls.EXT_SUB_ID = sub.id + EXT_NET_ID: str + INT_NET_ID: str + EXT_SUB_ID: str + INT_SUB_ID: str + ROT_ID: str + PORT_ID: str + FIP: floating_ip.FloatingIP + DNS_DOMAIN = "example.org." + DNS_NAME = "fip1" + + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension("external-net"): + self.skipTest( + "Neutron external-net extension is required for this test" + ) + self.TIMEOUT_SCALING_FACTOR = 1.5 + self.ROT_NAME = self.getUniqueString() + self.INT_NET_NAME = self.getUniqueString() + self.INT_SUB_NAME = self.getUniqueString() + self.is_dns_supported = False + + # Find External Network + for net in self.user_cloud.network.networks(is_router_external=True): + self.EXT_NET_ID = net.id + # Find subnet of the chosen external net + for sub in self.user_cloud.network.subnets(network_id=self.EXT_NET_ID): + self.EXT_SUB_ID = sub.id + if not self.EXT_NET_ID and self.operator_cloud: + # There is no existing external net, but operator + # credentials available + # WARNING: this external net is not dropped + # Create External Network + net = self._create_network( + self.EXT_NET_NAME, **{"router:external": True} + ) + self.EXT_NET_ID = net.id + sub = self._create_subnet( + self.EXT_SUB_NAME, self.EXT_NET_ID, self.EXT_CIDR + ) + self.EXT_SUB_ID = sub.id + # Create Internal Network - net = cls._create_network(cls.INT_NET_NAME) - cls.INT_NET_ID = net.id - sub = cls._create_subnet(cls.INT_SUB_NAME, cls.INT_NET_ID, - cls.INT_CIDR) - cls.INT_SUB_ID = sub.id + net = self._create_network(self.INT_NET_NAME) + self.INT_NET_ID = net.id + sub = self._create_subnet( + self.INT_SUB_NAME, self.INT_NET_ID, self.INT_CIDR + ) + self.INT_SUB_ID = sub.id # Create Router - args = {'external_gateway_info': {'network_id': cls.EXT_NET_ID}} - sot = cls.conn.network.create_router(name=cls.ROT_NAME, **args) + sot = self.user_cloud.network.create_router( + name=self.ROT_NAME, + **{"external_gateway_info": {"network_id": self.EXT_NET_ID}}, + ) assert isinstance(sot, router.Router) - cls.assertIs(cls.ROT_NAME, sot.name) - cls.ROT_ID = sot.id - cls.ROT = sot + self.assertEqual(self.ROT_NAME, sot.name) + self.ROT_ID = sot.id + self.ROT = sot # Add Router's Interface to Internal Network - sot = cls.ROT.add_interface(cls.conn.session, subnet_id=cls.INT_SUB_ID) - cls.assertIs(sot['subnet_id'], cls.INT_SUB_ID) + sot = self.ROT.add_interface( + self.user_cloud.network, subnet_id=self.INT_SUB_ID + ) + self.assertEqual(sot["subnet_id"], self.INT_SUB_ID) # Create Port in Internal Network - prt = cls.conn.network.create_port(network_id=cls.INT_NET_ID) + prt = self.user_cloud.network.create_port(network_id=self.INT_NET_ID) assert isinstance(prt, port.Port) - cls.PORT_ID = prt.id + self.PORT_ID = prt.id + self.PORT = prt # Create Floating IP. - fip = cls.conn.network.create_ip(floating_network_id=cls.EXT_NET_ID) + fip_args = dict( + floating_network_id=self.EXT_NET_ID, + ) + if self.user_cloud._has_neutron_extension( + "dns-integration" + ) and self.user_cloud.has_service("dns"): + self.is_dns_supported = True + fip_args.update( + dict(dns_domain=self.DNS_DOMAIN, dns_name=self.DNS_NAME) + ) + fip = self.user_cloud.network.create_ip(**fip_args) assert isinstance(fip, floating_ip.FloatingIP) - cls.FIP_ID = fip.id - - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_ip(cls.FIP_ID, ignore_missing=False) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_port(cls.PORT_ID, ignore_missing=False) - cls.assertIs(None, sot) - sot = cls.ROT.remove_interface(cls.conn.session, - subnet_id=cls.INT_SUB_ID) - cls.assertIs(sot['subnet_id'], cls.INT_SUB_ID) - sot = cls.conn.network.delete_router(cls.ROT_ID, ignore_missing=False) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_subnet(cls.EXT_SUB_ID, - ignore_missing=False) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_network(cls.EXT_NET_ID, - ignore_missing=False) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_subnet(cls.INT_SUB_ID, - ignore_missing=False) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_network(cls.INT_NET_ID, - ignore_missing=False) - cls.assertIs(None, sot) - - @classmethod - def _create_network(cls, name, **args): - cls.name = name - net = cls.conn.network.create_network(name=name, **args) + self.FIP = fip + self.ID = self.FIP.id + self.get_command = self.user_cloud.network.get_ip + + def tearDown(self): + sot = self.user_cloud.network.delete_ip( + self.FIP.id, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_port( + self.PORT_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.ROT.remove_interface( + self.user_cloud.network, subnet_id=self.INT_SUB_ID + ) + self.assertEqual(sot["subnet_id"], self.INT_SUB_ID) + sot = self.user_cloud.network.delete_router( + self.ROT_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_subnet( + self.INT_SUB_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_network( + self.INT_NET_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def _create_network(self, name, **args): + self.name = name + net = self.user_cloud.network.create_network(name=name, **args) assert isinstance(net, network.Network) - cls.assertIs(cls.name, net.name) + self.assertEqual(self.name, net.name) return net - @classmethod - def _create_subnet(cls, name, net_id, cidr): - cls.name = name - cls.net_id = net_id - cls.cidr = cidr - sub = cls.conn.network.create_subnet(name=cls.name, - ip_version=cls.IPV4, - network_id=cls.net_id, - cidr=cls.cidr) + def _create_subnet(self, name, net_id, cidr): + self.name = name + self.net_id = net_id + self.cidr = cidr + sub = self.user_cloud.network.create_subnet( + name=self.name, + ip_version=self.IPV4, + network_id=self.net_id, + cidr=self.cidr, + ) assert isinstance(sub, subnet.Subnet) - cls.assertIs(cls.name, sub.name) + self.assertEqual(self.name, sub.name) return sub - def test_find(self): - sot = self.conn.network.find_ip(self.FIP_ID) - self.assertEqual(self.FIP_ID, sot.id) + def test_find_by_id(self): + sot = self.user_cloud.network.find_ip(self.FIP.id) + self.assertEqual(self.FIP.id, sot.id) + + def test_find_by_ip_address(self): + sot = self.user_cloud.network.find_ip(self.FIP.floating_ip_address) + self.assertEqual(self.FIP.floating_ip_address, sot.floating_ip_address) + self.assertEqual(self.FIP.floating_ip_address, sot.name) def test_find_available_ip(self): - sot = self.conn.network.find_available_ip() + sot = self.user_cloud.network.find_available_ip() self.assertIsNotNone(sot.id) self.assertIsNone(sot.port_id) + self.assertIsNone(sot.port_details) def test_get(self): - sot = self.conn.network.get_ip(self.FIP_ID) + sot = self.user_cloud.network.get_ip(self.FIP.id) self.assertEqual(self.EXT_NET_ID, sot.floating_network_id) - self.assertEqual(self.FIP_ID, sot.id) - self.assertIn('floating_ip_address', sot) - self.assertIn('fixed_ip_address', sot) - self.assertIn('port_id', sot) - self.assertIn('router_id', sot) + self.assertEqual(self.FIP.id, sot.id) + self.assertEqual(self.FIP.floating_ip_address, sot.floating_ip_address) + self.assertEqual(self.FIP.fixed_ip_address, sot.fixed_ip_address) + self.assertEqual(self.FIP.port_id, sot.port_id) + self.assertEqual(self.FIP.port_details, sot.port_details) + self.assertEqual(self.FIP.router_id, sot.router_id) + if self.is_dns_supported: + self.assertEqual(self.DNS_DOMAIN, sot.dns_domain) + self.assertEqual(self.DNS_NAME, sot.dns_name) def test_list(self): - ids = [o.id for o in self.conn.network.ips()] - self.assertIn(self.FIP_ID, ids) + ids = [o.id for o in self.user_cloud.network.ips()] + self.assertIn(self.FIP.id, ids) def test_update(self): - sot = self.conn.network.update_ip(self.FIP_ID, port_id=self.PORT_ID) + sot = self.user_cloud.network.update_ip( + self.FIP.id, port_id=self.PORT_ID + ) self.assertEqual(self.PORT_ID, sot.port_id) - self.assertEqual(self.FIP_ID, sot.id) + self._assert_port_details(self.PORT, sot.port_details) + self.assertEqual(self.FIP.id, sot.id) + + def _assert_port_details(self, port, port_details): + self.assertEqual(port.name, port_details["name"]) + self.assertEqual(port.network_id, port_details["network_id"]) + self.assertEqual(port.mac_address, port_details["mac_address"]) + self.assertEqual( + port.is_admin_state_up, port_details["admin_state_up"] + ) + self.assertEqual(port.status, port_details["status"]) + self.assertEqual(port.device_id, port_details["device_id"]) + self.assertEqual(port.device_owner, port_details["device_owner"]) diff --git a/openstack/tests/functional/network/v2/test_l3_conntrack_helper.py b/openstack/tests/functional/network/v2/test_l3_conntrack_helper.py new file mode 100644 index 0000000000..bf8022c040 --- /dev/null +++ b/openstack/tests/functional/network/v2/test_l3_conntrack_helper.py @@ -0,0 +1,77 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.network.v2 import l3_conntrack_helper as _l3_conntrack_helper +from openstack.network.v2 import router +from openstack.tests.functional import base + + +class TestL3ConntrackHelper(base.BaseFunctionalTest): + PROTOCOL = "udp" + HELPER = "tftp" + PORT = 69 + + ROT_ID = None + + def setUp(self): + super().setUp() + + if not self.user_cloud.network.find_extension("l3-conntrack-helper"): + self.skipTest("L3 conntrack helper extension disabled") + + self.ROT_NAME = self.getUniqueString() + # Create Router + sot = self.user_cloud.network.create_router(name=self.ROT_NAME) + self.assertIsInstance(sot, router.Router) + self.assertEqual(self.ROT_NAME, sot.name) + self.ROT_ID = sot.id + self.ROT = sot + + # Create conntrack helper + ct_helper = self.user_cloud.network.create_conntrack_helper( + router=self.ROT, + protocol=self.PROTOCOL, + helper=self.HELPER, + port=self.PORT, + ) + self.assertIsInstance(ct_helper, _l3_conntrack_helper.ConntrackHelper) + self.CT_HELPER = ct_helper + + def tearDown(self): + sot = self.user_cloud.network.delete_router( + self.ROT_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def test_get(self): + sot = self.user_cloud.network.get_conntrack_helper( + self.CT_HELPER, self.ROT_ID + ) + self.assertEqual(self.PROTOCOL, sot.protocol) + self.assertEqual(self.HELPER, sot.helper) + self.assertEqual(self.PORT, sot.port) + + def test_list(self): + helper_ids = [ + o.id + for o in self.user_cloud.network.conntrack_helpers(self.ROT_ID) + ] + self.assertIn(self.CT_HELPER.id, helper_ids) + + def test_update(self): + NEW_PORT = 90 + sot = self.user_cloud.network.update_conntrack_helper( + self.CT_HELPER.id, self.ROT_ID, port=NEW_PORT + ) + self.assertEqual(NEW_PORT, sot.port) diff --git a/openstack/tests/functional/network/v2/test_local_ip.py b/openstack/tests/functional/network/v2/test_local_ip.py new file mode 100644 index 0000000000..47ac357ab6 --- /dev/null +++ b/openstack/tests/functional/network/v2/test_local_ip.py @@ -0,0 +1,68 @@ +# Copyright 2021 Huawei, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from openstack.network.v2 import local_ip as _local_ip +from openstack.tests.functional import base + + +class TestLocalIP(base.BaseFunctionalTest): + LOCAL_IP_ID = None + + def setUp(self): + super().setUp() + + if not self.user_cloud.network.find_extension("local_ip"): + self.skipTest("Local IP extension disabled") + + self.LOCAL_IP_NAME = self.getUniqueString() + self.LOCAL_IP_DESCRIPTION = self.getUniqueString() + self.LOCAL_IP_NAME_UPDATED = self.getUniqueString() + self.LOCAL_IP_DESCRIPTION_UPDATED = self.getUniqueString() + local_ip = self.user_cloud.network.create_local_ip( + name=self.LOCAL_IP_NAME, + description=self.LOCAL_IP_DESCRIPTION, + ) + assert isinstance(local_ip, _local_ip.LocalIP) + self.assertEqual(self.LOCAL_IP_NAME, local_ip.name) + self.assertEqual(self.LOCAL_IP_DESCRIPTION, local_ip.description) + self.LOCAL_IP_ID = local_ip.id + + def tearDown(self): + sot = self.user_cloud.network.delete_local_ip(self.LOCAL_IP_ID) + self.assertIsNone(sot) + super().tearDown() + + def test_find(self): + sot = self.user_cloud.network.find_local_ip(self.LOCAL_IP_NAME) + self.assertEqual(self.LOCAL_IP_ID, sot.id) + + def test_get(self): + sot = self.user_cloud.network.get_local_ip(self.LOCAL_IP_ID) + self.assertEqual(self.LOCAL_IP_NAME, sot.name) + + def test_list(self): + names = [ + local_ip.name for local_ip in self.user_cloud.network.local_ips() + ] + self.assertIn(self.LOCAL_IP_NAME, names) + + def test_update(self): + sot = self.user_cloud.network.update_local_ip( + self.LOCAL_IP_ID, + name=self.LOCAL_IP_NAME_UPDATED, + description=self.LOCAL_IP_DESCRIPTION_UPDATED, + ) + self.assertEqual(self.LOCAL_IP_NAME_UPDATED, sot.name) + self.assertEqual(self.LOCAL_IP_DESCRIPTION_UPDATED, sot.description) diff --git a/openstack/tests/functional/network/v2/test_local_ip_association.py b/openstack/tests/functional/network/v2/test_local_ip_association.py new file mode 100644 index 0000000000..9911ee7674 --- /dev/null +++ b/openstack/tests/functional/network/v2/test_local_ip_association.py @@ -0,0 +1,76 @@ +# Copyright 2021 Huawei, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from openstack.network.v2 import local_ip_association as _local_ip_association +from openstack.tests.functional import base + + +class TestLocalIPAssociation(base.BaseFunctionalTest): + LOCAL_IP_ID = None + FIXED_PORT_ID = None + FIXED_IP = None + + def setUp(self): + super().setUp() + + if not self.user_cloud.network.find_extension("local_ip"): + self.skipTest("Local IP extension disabled") + + self.LOCAL_IP_ID = self.getUniqueString() + self.FIXED_PORT_ID = self.getUniqueString() + self.FIXED_IP = self.getUniqueString() + local_ip_association = ( + self.user_cloud.network.create_local_ip_association( + local_ip=self.LOCAL_IP_ID, + fixed_port_id=self.FIXED_PORT_ID, + fixed_ip=self.FIXED_IP, + ) + ) + assert isinstance( + local_ip_association, _local_ip_association.LocalIPAssociation + ) + self.assertEqual(self.LOCAL_IP_ID, local_ip_association.local_ip_id) + self.assertEqual( + self.FIXED_PORT_ID, local_ip_association.fixed_port_id + ) + self.assertEqual(self.FIXED_IP, local_ip_association.fixed_ip) + + def tearDown(self): + sot = self.user_cloud.network.delete_local_ip_association( + self.LOCAL_IP_ID, self.FIXED_PORT_ID + ) + self.assertIsNone(sot) + super().tearDown() + + def test_find(self): + sot = self.user_cloud.network.find_local_ip_association( + self.FIXED_PORT_ID, self.LOCAL_IP_ID + ) + self.assertEqual(self.FIXED_PORT_ID, sot.fixed_port_id) + + def test_get(self): + sot = self.user_cloud.network.get_local_ip_association( + self.FIXED_PORT_ID, self.LOCAL_IP_ID + ) + self.assertEqual(self.FIXED_PORT_ID, sot.fixed_port_id) + + def test_list(self): + fixed_port_id = [ + obj.fixed_port_id + for obj in self.user_cloud.network.local_ip_associations( + self.LOCAL_IP_ID + ) + ] + self.assertIn(self.FIXED_PORT_ID, fixed_port_id) diff --git a/openstack/tests/functional/network/v2/test_ndp_proxy.py b/openstack/tests/functional/network/v2/test_ndp_proxy.py new file mode 100644 index 0000000000..ae24c0d0fa --- /dev/null +++ b/openstack/tests/functional/network/v2/test_ndp_proxy.py @@ -0,0 +1,162 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import ndp_proxy as _ndp_proxy +from openstack.network.v2 import network +from openstack.network.v2 import port +from openstack.network.v2 import router +from openstack.network.v2 import subnet +from openstack.tests.functional import base + + +class TestNDPProxy(base.BaseFunctionalTest): + IPV6 = 6 + EXT_CIDR = "2002::1:0/112" + INT_CIDR = "2002::2:0/112" + EXT_NET_ID = None + INT_NET_ID = None + EXT_SUB_ID = None + INT_SUB_ID = None + ROT_ID = None + INTERNAL_PORT_ID = None + + def setUp(self): + super().setUp() + + if not self.user_cloud.network.find_extension("l3-ndp-proxy"): + self.skipTest("L3 ndp proxy extension disabled") + + self.ROT_NAME = self.getUniqueString() + self.EXT_NET_NAME = self.getUniqueString() + self.EXT_SUB_NAME = self.getUniqueString() + self.INT_NET_NAME = self.getUniqueString() + self.INT_SUB_NAME = self.getUniqueString() + + # Find External Network + for net in self.user_cloud.network.networks(is_router_external=True): + self.EXT_NET_ID = net.id + # Find subnet of the chosen external net + for sub in self.user_cloud.network.subnets(network_id=self.EXT_NET_ID): + self.EXT_SUB_ID = sub.id + if not self.EXT_NET_ID and self.operator_cloud: + # There is no existing external net, but operator + # credentials available + # WARNING: this external net is not dropped + # Create External Network + net = self._create_network( + self.EXT_NET_NAME, **{"router:external": True} + ) + self.EXT_NET_ID = net.id + sub = self._create_subnet( + self.EXT_SUB_NAME, self.EXT_NET_ID, self.EXT_CIDR + ) + self.EXT_SUB_ID = sub.id + + # Create Router + sot = self.user_cloud.network.create_router( + name=self.ROT_NAME, + **{ + "external_gateway_info": {"network_id": self.EXT_NET_ID}, + "enable_ndp_proxy": True, + }, + ) + assert isinstance(sot, router.Router) + self.assertEqual(self.ROT_NAME, sot.name) + self.ROT_ID = sot.id + self.ROT = sot + # Add Router's Interface to Internal Network + sot = self.ROT.add_interface( + self.user_cloud.network, subnet_id=self.INT_SUB_ID + ) + self.assertEqual(sot["subnet_id"], self.INT_SUB_ID) + # Create Port in Internal Network + prt = self.user_cloud.network.create_port(network_id=self.INT_NET_ID) + assert isinstance(prt, port.Port) + self.INTERNAL_PORT_ID = prt.id + self.INTERNAL_IP_ADDRESS = prt.fixed_ips[0]["ip_address"] + # Create ndp proxy + np = self.user_cloud.network.create_ndp_proxy( + router_id=self.ROT_ID, port_id=self.INTERNAL_PORT_ID + ) + assert isinstance(np, _ndp_proxy.NDPProxy) + self.NP = np + + def tearDown(self): + sot = self.user_cloud.network.delete_ndp_proxy( + self.NP.id, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_port( + self.INTERNAL_PORT_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.ROT.remove_interface( + self.user_cloud.network, subnet_id=self.INT_SUB_ID + ) + self.assertEqual(sot["subnet_id"], self.INT_SUB_ID) + sot = self.user_cloud.network.delete_router( + self.ROT_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_subnet( + self.INT_SUB_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_network( + self.INT_NET_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def _create_network(self, name, **args): + self.name = name + net = self.user_cloud.network.create_network(name=name, **args) + assert isinstance(net, network.Network) + self.assertEqual(self.name, net.name) + return net + + def _create_subnet(self, name, net_id, cidr): + self.name = name + self.net_id = net_id + self.cidr = cidr + sub = self.user_cloud.network.create_subnet( + name=self.name, + ip_version=self.IPV6, + network_id=self.net_id, + cidr=self.cidr, + ) + assert isinstance(sub, subnet.Subnet) + self.assertEqual(self.name, sub.name) + return sub + + def test_find(self): + sot = self.user_cloud.network.find_ndp_proxy(self.NP.id) + self.assertEqual(self.ROT_ID, sot.router_id) + self.assertEqual(self.INTERNAL_PORT_ID, sot.port_id) + self.assertEqual(self.INTERNAL_IP_ADDRESS, sot.ip_address) + + def test_get(self): + sot = self.user_cloud.network.get_ndp_proxy(self.NP.id) + self.assertEqual(self.ROT_ID, sot.router_id) + self.assertEqual(self.INTERNAL_PORT_ID, sot.port_id) + self.assertEqual(self.INTERNAL_IP_ADDRESS, sot.ip_address) + + def test_list(self): + np_ids = [o.id for o in self.user_cloud.network.ndp_proxies()] + self.assertIn(self.NP.id, np_ids) + + def test_update(self): + description = "balabalbala" + sot = self.user_cloud.network.update_ndp_proxy( + self.NP.id, description=description + ) + self.assertEqual(description, sot.description) diff --git a/openstack/tests/functional/network/v2/test_network.py b/openstack/tests/functional/network/v2/test_network.py index 6a959d71f3..f2a116e00e 100644 --- a/openstack/tests/functional/network/v2/test_network.py +++ b/openstack/tests/functional/network/v2/test_network.py @@ -9,21 +9,18 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -import uuid +# mypy: disable-error-code="method-assign" from openstack.network.v2 import network -from openstack.tests.functional import base +from openstack.tests.functional.network.v2 import common def create_network(conn, name, cidr): try: network = conn.network.create_network(name=name) subnet = conn.network.create_subnet( - name=name, - ip_version=4, - network_id=network.id, - cidr=cidr) + name=name, ip_version=4, network_id=network.id, cidr=cidr + ) return (network, subnet) except Exception as e: print(str(e)) @@ -38,33 +35,52 @@ def delete_network(conn, network, subnet): conn.network.delete_network(network) -class TestNetwork(base.BaseFunctionalTest): - - NAME = uuid.uuid4().hex +class TestNetwork(common.TestTagNeutron): ID = None - @classmethod - def setUpClass(cls): - super(TestNetwork, cls).setUpClass() - sot = cls.conn.network.create_network(name=cls.NAME) + def setUp(self): + super().setUp() + self.NAME = self.getUniqueString() + sot = self.user_cloud.network.create_network(name=self.NAME) assert isinstance(sot, network.Network) - cls.assertIs(cls.NAME, sot.name) - cls.ID = sot.id + self.assertEqual(self.NAME, sot.name) + self.ID = sot.id + self.get_command = self.user_cloud.network.get_network - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_network(cls.ID, ignore_missing=False) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.user_cloud.network.delete_network( + self.ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() def test_find(self): - sot = self.conn.network.find_network(self.NAME) + sot = self.user_cloud.network.find_network(self.NAME) self.assertEqual(self.ID, sot.id) + def test_find_with_filter(self): + if not self.operator_cloud: + self.skipTest("Operator cloud required for this test") + project_id_1 = "1" + project_id_2 = "2" + sot1 = self.operator_cloud.network.create_network( + name=self.NAME, project_id=project_id_1 + ) + sot2 = self.operator_cloud.network.create_network( + name=self.NAME, project_id=project_id_2 + ) + sot = self.operator_cloud.network.find_network( + self.NAME, project_id=project_id_1 + ) + self.assertEqual(project_id_1, sot.project_id) + self.operator_cloud.network.delete_network(sot1.id) + self.operator_cloud.network.delete_network(sot2.id) + def test_get(self): - sot = self.conn.network.get_network(self.ID) + sot = self.user_cloud.network.get_network(self.ID) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.ID, sot.id) def test_list(self): - names = [o.name for o in self.conn.network.networks()] + names = [o.name for o in self.user_cloud.network.networks()] self.assertIn(self.NAME, names) diff --git a/openstack/tests/functional/network/v2/test_network_ip_availability.py b/openstack/tests/functional/network/v2/test_network_ip_availability.py index cfd814b64f..7ae4b02267 100644 --- a/openstack/tests/functional/network/v2/test_network_ip_availability.py +++ b/openstack/tests/functional/network/v2/test_network_ip_availability.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid from openstack.network.v2 import network from openstack.network.v2 import port @@ -19,56 +18,73 @@ class TestNetworkIPAvailability(base.BaseFunctionalTest): - - NET_NAME = uuid.uuid4().hex - SUB_NAME = uuid.uuid4().hex - PORT_NAME = uuid.uuid4().hex - UPDATE_NAME = uuid.uuid4().hex IPV4 = 4 CIDR = "10.100.0.0/24" NET_ID = None SUB_ID = None PORT_ID = None - @classmethod - def setUpClass(cls): - super(TestNetworkIPAvailability, cls).setUpClass() - net = cls.conn.network.create_network(name=cls.NET_NAME) + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud required for this test") + if not self.operator_cloud._has_neutron_extension( + "network-ip-availability" + ): + self.skipTest( + "Neutron network-ip-availability extension is required " + "for this test" + ) + + self.NET_NAME = self.getUniqueString() + self.SUB_NAME = self.getUniqueString() + self.PORT_NAME = self.getUniqueString() + self.UPDATE_NAME = self.getUniqueString() + net = self.operator_cloud.network.create_network(name=self.NET_NAME) assert isinstance(net, network.Network) - cls.assertIs(cls.NET_NAME, net.name) - cls.NET_ID = net.id - sub = cls.conn.network.create_subnet(name=cls.SUB_NAME, - ip_version=cls.IPV4, - network_id=cls.NET_ID, - cidr=cls.CIDR) + self.assertEqual(self.NET_NAME, net.name) + self.NET_ID = net.id + sub = self.operator_cloud.network.create_subnet( + name=self.SUB_NAME, + ip_version=self.IPV4, + network_id=self.NET_ID, + cidr=self.CIDR, + ) assert isinstance(sub, subnet.Subnet) - cls.assertIs(cls.SUB_NAME, sub.name) - cls.SUB_ID = sub.id - prt = cls.conn.network.create_port(name=cls.PORT_NAME, - network_id=cls.NET_ID) + self.assertEqual(self.SUB_NAME, sub.name) + self.SUB_ID = sub.id + prt = self.operator_cloud.network.create_port( + name=self.PORT_NAME, network_id=self.NET_ID + ) assert isinstance(prt, port.Port) - cls.assertIs(cls.PORT_NAME, prt.name) - cls.PORT_ID = prt.id + self.assertEqual(self.PORT_NAME, prt.name) + self.PORT_ID = prt.id - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_port(cls.PORT_ID) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_subnet(cls.SUB_ID) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_network(cls.NET_ID) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.operator_cloud.network.delete_port(self.PORT_ID) + self.assertIsNone(sot) + sot = self.operator_cloud.network.delete_subnet(self.SUB_ID) + self.assertIsNone(sot) + sot = self.operator_cloud.network.delete_network(self.NET_ID) + self.assertIsNone(sot) + super().tearDown() def test_find(self): - sot = self.conn.network.find_network_ip_availability(self.NET_ID) + sot = self.operator_cloud.network.find_network_ip_availability( + self.NET_ID + ) self.assertEqual(self.NET_ID, sot.network_id) def test_get(self): - sot = self.conn.network.get_network_ip_availability(self.NET_ID) + sot = self.operator_cloud.network.get_network_ip_availability( + self.NET_ID + ) self.assertEqual(self.NET_ID, sot.network_id) self.assertEqual(self.NET_NAME, sot.network_name) def test_list(self): - ids = [o.network_id for o in - self.conn.network.network_ip_availabilities()] + ids = [ + o.network_id + for o in self.operator_cloud.network.network_ip_availabilities() + ] self.assertIn(self.NET_ID, ids) diff --git a/openstack/tests/functional/network/v2/test_network_segment_range.py b/openstack/tests/functional/network/v2/test_network_segment_range.py new file mode 100644 index 0000000000..c7c4791bf2 --- /dev/null +++ b/openstack/tests/functional/network/v2/test_network_segment_range.py @@ -0,0 +1,119 @@ +# Copyright (c) 2018, Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import network_segment_range +from openstack.tests.functional import base + + +class TestNetworkSegmentRange(base.BaseFunctionalTest): + NETWORK_SEGMENT_RANGE_ID = None + NAME = "test_name" + DEFAULT = False + SHARED = False + PROJECT_ID = "2018" + NETWORK_TYPE = "vlan" + PHYSICAL_NETWORK = "phys_net" + MINIMUM = 100 + MAXIMUM = 200 + + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud required for this test") + + # NOTE(kailun): The network segment range extension is not yet enabled + # by default. + # Skip the tests if not enabled. + if not self.operator_cloud.network.find_extension( + "network-segment-range" + ): + self.skipTest("Network Segment Range extension disabled") + + test_seg_range = ( + self.operator_cloud.network.create_network_segment_range( + name=self.NAME, + default=self.DEFAULT, + shared=self.SHARED, + project_id=self.PROJECT_ID, + network_type=self.NETWORK_TYPE, + physical_network=self.PHYSICAL_NETWORK, + minimum=self.MINIMUM, + maximum=self.MAXIMUM, + ) + ) + self.assertIsInstance( + test_seg_range, network_segment_range.NetworkSegmentRange + ) + self.NETWORK_SEGMENT_RANGE_ID = test_seg_range.id + self.assertEqual(self.NAME, test_seg_range.name) + self.assertEqual(self.DEFAULT, test_seg_range.default) + self.assertEqual(self.SHARED, test_seg_range.shared) + self.assertEqual(self.PROJECT_ID, test_seg_range.project_id) + self.assertEqual(self.NETWORK_TYPE, test_seg_range.network_type) + self.assertEqual( + self.PHYSICAL_NETWORK, test_seg_range.physical_network + ) + self.assertEqual(self.MINIMUM, test_seg_range.minimum) + self.assertEqual(self.MAXIMUM, test_seg_range.maximum) + + def tearDown(self): + super().tearDown() + + def test_create_delete(self): + del_test_seg_range = ( + self.operator_cloud.network.delete_network_segment_range( + self.NETWORK_SEGMENT_RANGE_ID + ) + ) + self.assertIsNone(del_test_seg_range) + + def test_find(self): + test_seg_range = ( + self.operator_cloud.network.find_network_segment_range( + self.NETWORK_SEGMENT_RANGE_ID + ) + ) + self.assertEqual(self.NETWORK_SEGMENT_RANGE_ID, test_seg_range.id) + + def test_get(self): + test_seg_range = self.operator_cloud.network.get_network_segment_range( + self.NETWORK_SEGMENT_RANGE_ID + ) + self.assertEqual(self.NETWORK_SEGMENT_RANGE_ID, test_seg_range.id) + self.assertEqual(self.NAME, test_seg_range.name) + self.assertEqual(self.DEFAULT, test_seg_range.default) + self.assertEqual(self.SHARED, test_seg_range.shared) + self.assertEqual(self.PROJECT_ID, test_seg_range.project_id) + self.assertEqual(self.NETWORK_TYPE, test_seg_range.network_type) + self.assertEqual( + self.PHYSICAL_NETWORK, test_seg_range.physical_network + ) + self.assertEqual(self.MINIMUM, test_seg_range.minimum) + self.assertEqual(self.MAXIMUM, test_seg_range.maximum) + + def test_list(self): + ids = [ + o.id + for o in self.operator_cloud.network.network_segment_ranges( + name=None + ) + ] + self.assertIn(self.NETWORK_SEGMENT_RANGE_ID, ids) + + def test_update(self): + update_seg_range = self.operator_cloud.network.update_segment( + self.NETWORK_SEGMENT_RANGE_ID, name="update_test_name" + ) + self.assertEqual("update_test_name", update_seg_range.name) diff --git a/openstack/tests/functional/network/v2/test_port.py b/openstack/tests/functional/network/v2/test_port.py index 7409c79407..9017c7bc46 100644 --- a/openstack/tests/functional/network/v2/test_port.py +++ b/openstack/tests/functional/network/v2/test_port.py @@ -9,71 +9,80 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -import uuid +# mypy: disable-error-code="method-assign" from openstack.network.v2 import network from openstack.network.v2 import port from openstack.network.v2 import subnet -from openstack.tests.functional import base - +from openstack.tests.functional.network.v2 import common -class TestPort(base.BaseFunctionalTest): - NET_NAME = uuid.uuid4().hex - SUB_NAME = uuid.uuid4().hex - PORT_NAME = uuid.uuid4().hex - UPDATE_NAME = uuid.uuid4().hex +class TestPort(common.TestTagNeutron): IPV4 = 4 CIDR = "10.100.0.0/24" NET_ID = None SUB_ID = None PORT_ID = None - @classmethod - def setUpClass(cls): - super(TestPort, cls).setUpClass() - net = cls.conn.network.create_network(name=cls.NET_NAME) + def setUp(self): + super().setUp() + self.NET_NAME = self.getUniqueString() + self.SUB_NAME = self.getUniqueString() + self.PORT_NAME = self.getUniqueString() + self.UPDATE_NAME = self.getUniqueString() + net = self.user_cloud.network.create_network(name=self.NET_NAME) assert isinstance(net, network.Network) - cls.assertIs(cls.NET_NAME, net.name) - cls.NET_ID = net.id - sub = cls.conn.network.create_subnet(name=cls.SUB_NAME, - ip_version=cls.IPV4, - network_id=cls.NET_ID, - cidr=cls.CIDR) + self.assertEqual(self.NET_NAME, net.name) + self.NET_ID = net.id + sub = self.user_cloud.network.create_subnet( + name=self.SUB_NAME, + ip_version=self.IPV4, + network_id=self.NET_ID, + cidr=self.CIDR, + ) assert isinstance(sub, subnet.Subnet) - cls.assertIs(cls.SUB_NAME, sub.name) - cls.SUB_ID = sub.id - prt = cls.conn.network.create_port(name=cls.PORT_NAME, - network_id=cls.NET_ID) + self.assertEqual(self.SUB_NAME, sub.name) + self.SUB_ID = sub.id + prt = self.user_cloud.network.create_port( + name=self.PORT_NAME, network_id=self.NET_ID + ) assert isinstance(prt, port.Port) - cls.assertIs(cls.PORT_NAME, prt.name) - cls.PORT_ID = prt.id + self.assertEqual(self.PORT_NAME, prt.name) + self.PORT_ID = self.ID = prt.id + self.get_command = self.user_cloud.network.get_port - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_port(cls.PORT_ID, ignore_missing=False) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_subnet(cls.SUB_ID, ignore_missing=False) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_network(cls.NET_ID, ignore_missing=False) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.user_cloud.network.delete_port( + self.PORT_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_subnet( + self.SUB_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_network( + self.NET_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() def test_find(self): - sot = self.conn.network.find_port(self.PORT_NAME) + sot = self.user_cloud.network.find_port(self.PORT_NAME) self.assertEqual(self.PORT_ID, sot.id) def test_get(self): - sot = self.conn.network.get_port(self.PORT_ID) + sot = self.user_cloud.network.get_port(self.PORT_ID) self.assertEqual(self.PORT_ID, sot.id) self.assertEqual(self.PORT_NAME, sot.name) self.assertEqual(self.NET_ID, sot.network_id) def test_list(self): - ids = [o.id for o in self.conn.network.ports()] + ids = [o.id for o in self.user_cloud.network.ports()] self.assertIn(self.PORT_ID, ids) def test_update(self): - sot = self.conn.network.update_port(self.PORT_ID, - name=self.UPDATE_NAME) + assert self.PORT_ID is not None + sot = self.user_cloud.network.update_port( + self.PORT_ID, name=self.UPDATE_NAME + ) self.assertEqual(self.UPDATE_NAME, sot.name) diff --git a/openstack/tests/functional/network/v2/test_port_forwarding.py b/openstack/tests/functional/network/v2/test_port_forwarding.py new file mode 100644 index 0000000000..0d28a8efcf --- /dev/null +++ b/openstack/tests/functional/network/v2/test_port_forwarding.py @@ -0,0 +1,207 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.network.v2 import floating_ip +from openstack.network.v2 import network +from openstack.network.v2 import port +from openstack.network.v2 import port_forwarding as _port_forwarding +from openstack.network.v2 import router +from openstack.network.v2 import subnet +from openstack.tests.functional import base + + +class TestPortForwarding(base.BaseFunctionalTest): + IPV4 = 4 + FIP_ID = None + EXT_CIDR = "10.100.0.0/24" + INT_CIDR = "10.101.0.0/24" + EXT_NET_ID = None + INT_NET_ID = None + EXT_SUB_ID = None + INT_SUB_ID = None + ROT_ID = None + + INTERNAL_PORT_ID = None + INTERNAL_IP_ADDRESS = None + INTERNAL_PORT = 8080 + EXTERNAL_PORT = 80 + PROTOCOL = "tcp" + DESCRIPTION = "description" + + def setUp(self): + super().setUp() + + if not self.user_cloud._has_neutron_extension("external-net"): + self.skipTest( + "Neutron external-net extension is required for this test" + ) + if not self.user_cloud.network.find_extension( + "floating-ip-port-forwarding" + ): + self.skipTest("Floating IP Port Forwarding extension disabled") + + self.ROT_NAME = self.getUniqueString() + self.INT_NET_NAME = self.getUniqueString() + self.INT_SUB_NAME = self.getUniqueString() + self.EXT_NET_ID = None + self.EXT_SUB_ID = None + + # Find External Network + for net in self.user_cloud.network.networks(is_router_external=True): + self.EXT_NET_ID = net.id + # Find subnet of the chosen external net + for sub in self.user_cloud.network.subnets(network_id=self.EXT_NET_ID): + self.EXT_SUB_ID = sub.id + if not self.EXT_NET_ID and self.operator_cloud: + # There is no existing external net, but operator + # credentials available + # WARNING: this external net is not dropped + # Create External Network + net = self._create_network( + self.EXT_NET_NAME, **{"router:external": True} + ) + self.EXT_NET_ID = net.id + sub = self._create_subnet( + self.EXT_SUB_NAME, self.EXT_NET_ID, self.EXT_CIDR + ) + self.EXT_SUB_ID = sub.id + + # Create Internal Network + net = self._create_network(self.INT_NET_NAME) + self.INT_NET_ID = net.id + sub = self._create_subnet( + self.INT_SUB_NAME, self.INT_NET_ID, self.INT_CIDR + ) + self.INT_SUB_ID = sub.id + # Create Router + sot = self.user_cloud.network.create_router( + name=self.ROT_NAME, + **{"external_gateway_info": {"network_id": self.EXT_NET_ID}}, + ) + assert isinstance(sot, router.Router) + self.assertEqual(self.ROT_NAME, sot.name) + self.ROT_ID = sot.id + self.ROT = sot + # Add Router's Interface to Internal Network + sot = self.ROT.add_interface( + self.user_cloud.network, subnet_id=self.INT_SUB_ID + ) + self.assertEqual(sot["subnet_id"], self.INT_SUB_ID) + # Create Port in Internal Network + prt = self.user_cloud.network.create_port(network_id=self.INT_NET_ID) + assert isinstance(prt, port.Port) + self.INTERNAL_PORT_ID = prt.id + self.INTERNAL_IP_ADDRESS = prt.fixed_ips[0]["ip_address"] + # Create Floating IP. + fip = self.user_cloud.network.create_ip( + floating_network_id=self.EXT_NET_ID + ) + assert isinstance(fip, floating_ip.FloatingIP) + self.FIP_ID = fip.id + # Create Port Forwarding + pf = self.user_cloud.network.create_port_forwarding( + floatingip_id=self.FIP_ID, + internal_port_id=self.INTERNAL_PORT_ID, + internal_ip_address=self.INTERNAL_IP_ADDRESS, + internal_port=self.INTERNAL_PORT, + external_port=self.EXTERNAL_PORT, + protocol=self.PROTOCOL, + description=self.DESCRIPTION, + ) + assert isinstance(pf, _port_forwarding.PortForwarding) + self.PF = pf + + def tearDown(self): + sot = self.user_cloud.network.delete_port_forwarding( + self.PF, self.FIP_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_ip( + self.FIP_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_port( + self.INTERNAL_PORT_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.ROT.remove_interface( + self.user_cloud.network, subnet_id=self.INT_SUB_ID + ) + self.assertEqual(sot["subnet_id"], self.INT_SUB_ID) + sot = self.user_cloud.network.delete_router( + self.ROT_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_subnet( + self.INT_SUB_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_network( + self.INT_NET_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def _create_network(self, name, **args): + self.name = name + net = self.user_cloud.network.create_network(name=name, **args) + assert isinstance(net, network.Network) + self.assertEqual(self.name, net.name) + return net + + def _create_subnet(self, name, net_id, cidr): + self.name = name + self.net_id = net_id + self.cidr = cidr + sub = self.user_cloud.network.create_subnet( + name=self.name, + ip_version=self.IPV4, + network_id=self.net_id, + cidr=self.cidr, + ) + assert isinstance(sub, subnet.Subnet) + self.assertEqual(self.name, sub.name) + return sub + + def test_find(self): + sot = self.user_cloud.network.find_port_forwarding( + self.PF.id, self.FIP_ID + ) + self.assertEqual(self.INTERNAL_PORT_ID, sot.internal_port_id) + self.assertEqual(self.INTERNAL_IP_ADDRESS, sot.internal_ip_address) + self.assertEqual(self.INTERNAL_PORT, sot.internal_port) + self.assertEqual(self.EXTERNAL_PORT, sot.external_port) + self.assertEqual(self.PROTOCOL, sot.protocol) + self.assertEqual(self.DESCRIPTION, sot.description) + + def test_get(self): + sot = self.user_cloud.network.get_port_forwarding(self.PF, self.FIP_ID) + self.assertEqual(self.INTERNAL_PORT_ID, sot.internal_port_id) + self.assertEqual(self.INTERNAL_IP_ADDRESS, sot.internal_ip_address) + self.assertEqual(self.INTERNAL_PORT, sot.internal_port) + self.assertEqual(self.EXTERNAL_PORT, sot.external_port) + self.assertEqual(self.PROTOCOL, sot.protocol) + self.assertEqual(self.DESCRIPTION, sot.description) + + def test_list(self): + pf_ids = [ + o.id for o in self.user_cloud.network.port_forwardings(self.FIP_ID) + ] + self.assertIn(self.PF.id, pf_ids) + + def test_update(self): + NEW_EXTERNAL_PORT = 90 + sot = self.user_cloud.network.update_port_forwarding( + self.PF.id, self.FIP_ID, external_port=NEW_EXTERNAL_PORT + ) + self.assertEqual(NEW_EXTERNAL_PORT, sot.external_port) diff --git a/openstack/tests/functional/network/v2/test_qos_bandwidth_limit_rule.py b/openstack/tests/functional/network/v2/test_qos_bandwidth_limit_rule.py index 84e7aee861..58a258fb39 100644 --- a/openstack/tests/functional/network/v2/test_qos_bandwidth_limit_rule.py +++ b/openstack/tests/functional/network/v2/test_qos_bandwidth_limit_rule.py @@ -10,85 +10,105 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid -from openstack.network.v2 import (qos_bandwidth_limit_rule as - _qos_bandwidth_limit_rule) +from openstack.network.v2 import ( + qos_bandwidth_limit_rule as _qos_bandwidth_limit_rule, +) from openstack.tests.functional import base class TestQoSBandwidthLimitRule(base.BaseFunctionalTest): - QOS_POLICY_ID = None - QOS_POLICY_NAME = uuid.uuid4().hex QOS_IS_SHARED = False QOS_POLICY_DESCRIPTION = "QoS policy description" - RULE_ID = uuid.uuid4().hex RULE_MAX_KBPS = 1500 RULE_MAX_KBPS_NEW = 1800 RULE_MAX_BURST_KBPS = 1100 RULE_MAX_BURST_KBPS_NEW = 1300 - # NOTE(ralonsoh): to be implemented in bug 1560961. - # New checks must be added. - # RULE_DIRECTION = 'egress' - # RULE_DIRECTION_NEW = 'ingress' + RULE_DIRECTION = "egress" + RULE_DIRECTION_NEW = "ingress" + + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud required for this test") + + # Skip the tests if qos-bw-limit-direction extension is not enabled. + if not self.operator_cloud.network.find_extension( + "qos-bw-limit-direction" + ): + self.skipTest("Network qos-bw-limit-direction extension disabled") - @classmethod - def setUpClass(cls): - super(TestQoSBandwidthLimitRule, cls).setUpClass() - qos_policy = cls.conn.network.create_qos_policy( - description=cls.QOS_POLICY_DESCRIPTION, - name=cls.QOS_POLICY_NAME, - shared=cls.QOS_IS_SHARED, + self.QOS_POLICY_NAME = self.getUniqueString() + self.RULE_ID = self.getUniqueString() + qos_policy = self.operator_cloud.network.create_qos_policy( + description=self.QOS_POLICY_DESCRIPTION, + name=self.QOS_POLICY_NAME, + shared=self.QOS_IS_SHARED, ) - cls.QOS_POLICY_ID = qos_policy.id - qos_rule = cls.conn.network.create_qos_bandwidth_limit_rule( - cls.QOS_POLICY_ID, max_kbps=cls.RULE_MAX_KBPS, - max_burst_kbps=cls.RULE_MAX_BURST_KBPS, + self.QOS_POLICY_ID = qos_policy.id + qos_rule = self.operator_cloud.network.create_qos_bandwidth_limit_rule( + self.QOS_POLICY_ID, + max_kbps=self.RULE_MAX_KBPS, + max_burst_kbps=self.RULE_MAX_BURST_KBPS, + direction=self.RULE_DIRECTION, + ) + assert isinstance( + qos_rule, _qos_bandwidth_limit_rule.QoSBandwidthLimitRule ) - assert isinstance(qos_rule, - _qos_bandwidth_limit_rule.QoSBandwidthLimitRule) - cls.assertIs(cls.RULE_MAX_KBPS, qos_rule.max_kbps) - cls.assertIs(cls.RULE_MAX_BURST_KBPS, qos_rule.max_burst_kbps) - cls.RULE_ID = qos_rule.id + self.assertEqual(self.RULE_MAX_KBPS, qos_rule.max_kbps) + self.assertEqual(self.RULE_MAX_BURST_KBPS, qos_rule.max_burst_kbps) + self.assertEqual(self.RULE_DIRECTION, qos_rule.direction) + self.RULE_ID = qos_rule.id - @classmethod - def tearDownClass(cls): - rule = cls.conn.network.delete_qos_minimum_bandwidth_rule( - cls.RULE_ID, - cls.QOS_POLICY_ID) - qos_policy = cls.conn.network.delete_qos_policy(cls.QOS_POLICY_ID) - cls.assertIs(None, rule) - cls.assertIs(None, qos_policy) + def tearDown(self): + rule = self.operator_cloud.network.delete_qos_bandwidth_limit_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) + qos_policy = self.operator_cloud.network.delete_qos_policy( + self.QOS_POLICY_ID + ) + self.assertIsNone(rule) + self.assertIsNone(qos_policy) + super().tearDown() def test_find(self): - sot = self.conn.network.find_qos_bandwidth_limit_rule( - self.RULE_ID, - self.QOS_POLICY_ID) + sot = self.operator_cloud.network.find_qos_bandwidth_limit_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.RULE_MAX_KBPS, sot.max_kbps) self.assertEqual(self.RULE_MAX_BURST_KBPS, sot.max_burst_kbps) + self.assertEqual(self.RULE_DIRECTION, sot.direction) def test_get(self): - sot = self.conn.network.get_qos_bandwidth_limit_rule( - self.RULE_ID, - self.QOS_POLICY_ID) + sot = self.operator_cloud.network.get_qos_bandwidth_limit_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.QOS_POLICY_ID, sot.qos_policy_id) self.assertEqual(self.RULE_MAX_KBPS, sot.max_kbps) self.assertEqual(self.RULE_MAX_BURST_KBPS, sot.max_burst_kbps) + self.assertEqual(self.RULE_DIRECTION, sot.direction) def test_list(self): - rule_ids = [o.id for o in - self.conn.network.qos_bandwidth_limit_rules( - self.QOS_POLICY_ID)] + rule_ids = [ + o.id + for o in self.operator_cloud.network.qos_bandwidth_limit_rules( + self.QOS_POLICY_ID + ) + ] self.assertIn(self.RULE_ID, rule_ids) def test_update(self): - sot = self.conn.network.update_qos_bandwidth_limit_rule( + sot = self.operator_cloud.network.update_qos_bandwidth_limit_rule( self.RULE_ID, self.QOS_POLICY_ID, max_kbps=self.RULE_MAX_KBPS_NEW, - max_burst_kbps=self.RULE_MAX_BURST_KBPS_NEW) + max_burst_kbps=self.RULE_MAX_BURST_KBPS_NEW, + direction=self.RULE_DIRECTION_NEW, + ) self.assertEqual(self.RULE_MAX_KBPS_NEW, sot.max_kbps) self.assertEqual(self.RULE_MAX_BURST_KBPS_NEW, sot.max_burst_kbps) + self.assertEqual(self.RULE_DIRECTION_NEW, sot.direction) diff --git a/openstack/tests/functional/network/v2/test_qos_dscp_marking_rule.py b/openstack/tests/functional/network/v2/test_qos_dscp_marking_rule.py index 3930587c07..44f79ba256 100644 --- a/openstack/tests/functional/network/v2/test_qos_dscp_marking_rule.py +++ b/openstack/tests/functional/network/v2/test_qos_dscp_marking_rule.py @@ -10,72 +10,83 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid -from openstack.network.v2 import (qos_dscp_marking_rule as - _qos_dscp_marking_rule) +from openstack.network.v2 import ( + qos_dscp_marking_rule as _qos_dscp_marking_rule, +) from openstack.tests.functional import base class TestQoSDSCPMarkingRule(base.BaseFunctionalTest): - QOS_POLICY_ID = None - QOS_POLICY_NAME = uuid.uuid4().hex QOS_IS_SHARED = False QOS_POLICY_DESCRIPTION = "QoS policy description" - RULE_ID = uuid.uuid4().hex RULE_DSCP_MARK = 36 RULE_DSCP_MARK_NEW = 40 - @classmethod - def setUpClass(cls): - super(TestQoSDSCPMarkingRule, cls).setUpClass() - qos_policy = cls.conn.network.create_qos_policy( - description=cls.QOS_POLICY_DESCRIPTION, - name=cls.QOS_POLICY_NAME, - shared=cls.QOS_IS_SHARED, + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + # Skip the tests if qos extension is not enabled. + if not self.operator_cloud.network.find_extension("qos"): + self.skipTest("Network qos extension disabled") + + self.QOS_POLICY_NAME = self.getUniqueString() + self.RULE_ID = self.getUniqueString() + qos_policy = self.operator_cloud.network.create_qos_policy( + description=self.QOS_POLICY_DESCRIPTION, + name=self.QOS_POLICY_NAME, + shared=self.QOS_IS_SHARED, ) - cls.QOS_POLICY_ID = qos_policy.id - qos_rule = cls.conn.network.create_qos_dscp_marking_rule( - cls.QOS_POLICY_ID, dscp_mark=cls.RULE_DSCP_MARK, + self.QOS_POLICY_ID = qos_policy.id + qos_rule = self.operator_cloud.network.create_qos_dscp_marking_rule( + self.QOS_POLICY_ID, + dscp_mark=self.RULE_DSCP_MARK, ) assert isinstance(qos_rule, _qos_dscp_marking_rule.QoSDSCPMarkingRule) - cls.assertIs(cls.RULE_DSCP_MARK, qos_rule.dscp_mark) - cls.RULE_ID = qos_rule.id + self.assertEqual(self.RULE_DSCP_MARK, qos_rule.dscp_mark) + self.RULE_ID = qos_rule.id - @classmethod - def tearDownClass(cls): - rule = cls.conn.network.delete_qos_minimum_bandwidth_rule( - cls.RULE_ID, - cls.QOS_POLICY_ID) - qos_policy = cls.conn.network.delete_qos_policy(cls.QOS_POLICY_ID) - cls.assertIs(None, rule) - cls.assertIs(None, qos_policy) + def tearDown(self): + rule = self.operator_cloud.network.delete_qos_dscp_marking_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) + qos_policy = self.operator_cloud.network.delete_qos_policy( + self.QOS_POLICY_ID + ) + self.assertIsNone(rule) + self.assertIsNone(qos_policy) + super().tearDown() def test_find(self): - sot = self.conn.network.find_qos_dscp_marking_rule( - self.RULE_ID, - self.QOS_POLICY_ID) + sot = self.operator_cloud.network.find_qos_dscp_marking_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.RULE_DSCP_MARK, sot.dscp_mark) def test_get(self): - sot = self.conn.network.get_qos_dscp_marking_rule( - self.RULE_ID, - self.QOS_POLICY_ID) + sot = self.operator_cloud.network.get_qos_dscp_marking_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.QOS_POLICY_ID, sot.qos_policy_id) self.assertEqual(self.RULE_DSCP_MARK, sot.dscp_mark) def test_list(self): - rule_ids = [o.id for o in - self.conn.network.qos_dscp_marking_rules( - self.QOS_POLICY_ID)] + rule_ids = [ + o.id + for o in self.operator_cloud.network.qos_dscp_marking_rules( + self.QOS_POLICY_ID + ) + ] self.assertIn(self.RULE_ID, rule_ids) def test_update(self): - sot = self.conn.network.update_qos_dscp_marking_rule( - self.RULE_ID, - self.QOS_POLICY_ID, - dscp_mark=self.RULE_DSCP_MARK_NEW) + sot = self.operator_cloud.network.update_qos_dscp_marking_rule( + self.RULE_ID, self.QOS_POLICY_ID, dscp_mark=self.RULE_DSCP_MARK_NEW + ) self.assertEqual(self.RULE_DSCP_MARK_NEW, sot.dscp_mark) diff --git a/openstack/tests/functional/network/v2/test_qos_minimum_bandwidth_rule.py b/openstack/tests/functional/network/v2/test_qos_minimum_bandwidth_rule.py index 035121ca9d..eb0898cb98 100644 --- a/openstack/tests/functional/network/v2/test_qos_minimum_bandwidth_rule.py +++ b/openstack/tests/functional/network/v2/test_qos_minimum_bandwidth_rule.py @@ -10,78 +10,95 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid -from openstack.network.v2 import (qos_minimum_bandwidth_rule as - _qos_minimum_bandwidth_rule) +from openstack.network.v2 import ( + qos_minimum_bandwidth_rule as _qos_minimum_bandwidth_rule, +) from openstack.tests.functional import base class TestQoSMinimumBandwidthRule(base.BaseFunctionalTest): - QOS_POLICY_ID = None - QOS_POLICY_NAME = uuid.uuid4().hex QOS_IS_SHARED = False QOS_POLICY_DESCRIPTION = "QoS policy description" RULE_ID = None RULE_MIN_KBPS = 1200 RULE_MIN_KBPS_NEW = 1800 - RULE_DIRECTION = 'egress' + RULE_DIRECTION = "egress" + + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") - @classmethod - def setUpClass(cls): - super(TestQoSMinimumBandwidthRule, cls).setUpClass() - qos_policy = cls.conn.network.create_qos_policy( - description=cls.QOS_POLICY_DESCRIPTION, - name=cls.QOS_POLICY_NAME, - shared=cls.QOS_IS_SHARED, + # Skip the tests if qos-bw-limit-direction extension is not enabled. + if not self.operator_cloud.network.find_extension( + "qos-bw-limit-direction" + ): + self.skipTest("Network qos-bw-limit-direction extension disabled") + + self.QOS_POLICY_NAME = self.getUniqueString() + qos_policy = self.operator_cloud.network.create_qos_policy( + description=self.QOS_POLICY_DESCRIPTION, + name=self.QOS_POLICY_NAME, + shared=self.QOS_IS_SHARED, + ) + self.QOS_POLICY_ID = qos_policy.id + qos_min_bw_rule = ( + self.operator_cloud.network.create_qos_minimum_bandwidth_rule( + self.QOS_POLICY_ID, + direction=self.RULE_DIRECTION, + min_kbps=self.RULE_MIN_KBPS, + ) ) - cls.QOS_POLICY_ID = qos_policy.id - qos_min_bw_rule = cls.conn.network.create_qos_minimum_bandwidth_rule( - cls.QOS_POLICY_ID, direction=cls.RULE_DIRECTION, - min_kbps=cls.RULE_MIN_KBPS, + assert isinstance( + qos_min_bw_rule, + _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, ) - assert isinstance(qos_min_bw_rule, - _qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule) - cls.assertIs(cls.RULE_MIN_KBPS, qos_min_bw_rule.min_kbps) - cls.assertIs(cls.RULE_DIRECTION, qos_min_bw_rule.direction) - cls.RULE_ID = qos_min_bw_rule.id + self.assertEqual(self.RULE_MIN_KBPS, qos_min_bw_rule.min_kbps) + self.assertEqual(self.RULE_DIRECTION, qos_min_bw_rule.direction) + self.RULE_ID = qos_min_bw_rule.id - @classmethod - def tearDownClass(cls): - rule = cls.conn.network.delete_qos_minimum_bandwidth_rule( - cls.RULE_ID, - cls.QOS_POLICY_ID) - qos_policy = cls.conn.network.delete_qos_policy(cls.QOS_POLICY_ID) - cls.assertIs(None, rule) - cls.assertIs(None, qos_policy) + def tearDown(self): + rule = self.operator_cloud.network.delete_qos_minimum_bandwidth_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) + qos_policy = self.operator_cloud.network.delete_qos_policy( + self.QOS_POLICY_ID + ) + self.assertIsNone(rule) + self.assertIsNone(qos_policy) + super().tearDown() def test_find(self): - sot = self.conn.network.find_qos_minimum_bandwidth_rule( - self.RULE_ID, - self.QOS_POLICY_ID) + sot = self.operator_cloud.network.find_qos_minimum_bandwidth_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.RULE_DIRECTION, sot.direction) self.assertEqual(self.RULE_MIN_KBPS, sot.min_kbps) def test_get(self): - sot = self.conn.network.get_qos_minimum_bandwidth_rule( - self.RULE_ID, - self.QOS_POLICY_ID) + sot = self.operator_cloud.network.get_qos_minimum_bandwidth_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.QOS_POLICY_ID, sot.qos_policy_id) self.assertEqual(self.RULE_DIRECTION, sot.direction) self.assertEqual(self.RULE_MIN_KBPS, sot.min_kbps) def test_list(self): - rule_ids = [o.id for o in - self.conn.network.qos_minimum_bandwidth_rules( - self.QOS_POLICY_ID)] + rule_ids = [ + o.id + for o in self.operator_cloud.network.qos_minimum_bandwidth_rules( + self.QOS_POLICY_ID + ) + ] self.assertIn(self.RULE_ID, rule_ids) def test_update(self): - sot = self.conn.network.update_qos_minimum_bandwidth_rule( - self.RULE_ID, - self.QOS_POLICY_ID, - min_kbps=self.RULE_MIN_KBPS_NEW) + sot = self.operator_cloud.network.update_qos_minimum_bandwidth_rule( + self.RULE_ID, self.QOS_POLICY_ID, min_kbps=self.RULE_MIN_KBPS_NEW + ) self.assertEqual(self.RULE_MIN_KBPS_NEW, sot.min_kbps) diff --git a/openstack/tests/functional/network/v2/test_qos_minimum_packet_rate_rule.py b/openstack/tests/functional/network/v2/test_qos_minimum_packet_rate_rule.py new file mode 100644 index 0000000000..2329c77bcf --- /dev/null +++ b/openstack/tests/functional/network/v2/test_qos_minimum_packet_rate_rule.py @@ -0,0 +1,107 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.network.v2 import ( + qos_minimum_packet_rate_rule as _qos_minimum_packet_rate_rule, +) +from openstack.tests.functional import base + + +class TestQoSMinimumPacketRateRule(base.BaseFunctionalTest): + QOS_POLICY_ID = None + QOS_IS_SHARED = False + QOS_POLICY_DESCRIPTION = "QoS policy description" + RULE_ID = None + RULE_MIN_KPPS = 1200 + RULE_MIN_KPPS_NEW = 1800 + RULE_DIRECTION = "egress" + RULE_DIRECTION_NEW = "ingress" + + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + # Skip the tests if qos-pps-minimum extension is not enabled. + if not self.operator_cloud.network.find_extension("qos-pps-minimum"): + self.skipTest("Network qos-pps-minimum extension disabled") + + self.QOS_POLICY_NAME = self.getUniqueString() + qos_policy = self.operator_cloud.network.create_qos_policy( + description=self.QOS_POLICY_DESCRIPTION, + name=self.QOS_POLICY_NAME, + shared=self.QOS_IS_SHARED, + ) + self.QOS_POLICY_ID = qos_policy.id + qos_min_pps_rule = ( + self.operator_cloud.network.create_qos_minimum_packet_rate_rule( + self.QOS_POLICY_ID, + direction=self.RULE_DIRECTION, + min_kpps=self.RULE_MIN_KPPS, + ) + ) + assert isinstance( + qos_min_pps_rule, + _qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + ) + self.assertEqual(self.RULE_MIN_KPPS, qos_min_pps_rule.min_kpps) + self.assertEqual(self.RULE_DIRECTION, qos_min_pps_rule.direction) + self.RULE_ID = qos_min_pps_rule.id + + def tearDown(self): + rule = self.operator_cloud.network.delete_qos_minimum_packet_rate_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) + qos_policy = self.operator_cloud.network.delete_qos_policy( + self.QOS_POLICY_ID + ) + self.assertIsNone(rule) + self.assertIsNone(qos_policy) + super().tearDown() + + def test_find(self): + sot = self.operator_cloud.network.find_qos_minimum_packet_rate_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) + self.assertEqual(self.RULE_ID, sot.id) + self.assertEqual(self.RULE_DIRECTION, sot.direction) + self.assertEqual(self.RULE_MIN_KPPS, sot.min_kpps) + + def test_get(self): + sot = self.operator_cloud.network.get_qos_minimum_packet_rate_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) + self.assertEqual(self.RULE_ID, sot.id) + self.assertEqual(self.QOS_POLICY_ID, sot.qos_policy_id) + self.assertEqual(self.RULE_DIRECTION, sot.direction) + self.assertEqual(self.RULE_MIN_KPPS, sot.min_kpps) + + def test_list(self): + rule_ids = [ + o.id + for o in self.operator_cloud.network.qos_minimum_packet_rate_rules( + self.QOS_POLICY_ID + ) + ] + self.assertIn(self.RULE_ID, rule_ids) + + def test_update(self): + sot = self.operator_cloud.network.update_qos_minimum_packet_rate_rule( + self.RULE_ID, + self.QOS_POLICY_ID, + min_kpps=self.RULE_MIN_KPPS_NEW, + direction=self.RULE_DIRECTION_NEW, + ) + self.assertEqual(self.RULE_MIN_KPPS_NEW, sot.min_kpps) + self.assertEqual(self.RULE_DIRECTION_NEW, sot.direction) diff --git a/openstack/tests/functional/network/v2/test_qos_packet_rate_limit_rule.py b/openstack/tests/functional/network/v2/test_qos_packet_rate_limit_rule.py new file mode 100644 index 0000000000..9071d8b810 --- /dev/null +++ b/openstack/tests/functional/network/v2/test_qos_packet_rate_limit_rule.py @@ -0,0 +1,115 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.network.v2 import ( + qos_packet_rate_limit_rule as _qos_packet_rate_limit_rule, +) +from openstack.tests.functional import base + + +class TestQoSPacketRateLimitRule(base.BaseFunctionalTest): + QOS_POLICY_ID = None + QOS_IS_SHARED = False + QOS_POLICY_DESCRIPTION = 'QoS policy description' + RULE_MAX_KPPS = 1500 + RULE_MAX_KPPS_NEW = 1800 + RULE_MAX_BURST_KPPS = 1100 + RULE_MAX_BURST_KPPS_NEW = 1300 + RULE_DIRECTION = 'egress' + RULE_DIRECTION_NEW = 'ingress' + RULE_DIRECTION_NEW_2 = 'any' + + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest('Operator cloud required for this test') + + # Skip the tests if qos-bw-limit-direction extension is not enabled. + if not self.operator_cloud.network.find_extension('qos-pps'): + self.skipTest("Network qos-pps extension disabled") + + self.QOS_POLICY_NAME = self.getUniqueString() + self.RULE_ID = self.getUniqueString() + qos_policy = self.operator_cloud.network.create_qos_policy( + description=self.QOS_POLICY_DESCRIPTION, + name=self.QOS_POLICY_NAME, + shared=self.QOS_IS_SHARED, + ) + self.QOS_POLICY_ID = qos_policy.id + qos_rule = ( + self.operator_cloud.network.create_qos_packet_rate_limit_rule( + self.QOS_POLICY_ID, + max_kpps=self.RULE_MAX_KPPS, + max_burst_kpps=self.RULE_MAX_BURST_KPPS, + direction=self.RULE_DIRECTION, + ) + ) + assert isinstance( + qos_rule, _qos_packet_rate_limit_rule.QoSPacketRateLimitRule + ) + self.assertEqual(self.RULE_MAX_KPPS, qos_rule.max_kpps) + self.assertEqual(self.RULE_MAX_BURST_KPPS, qos_rule.max_burst_kpps) + self.assertEqual(self.RULE_DIRECTION, qos_rule.direction) + self.RULE_ID = qos_rule.id + + def tearDown(self): + rule = self.operator_cloud.network.delete_qos_packet_rate_limit_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) + qos_policy = self.operator_cloud.network.delete_qos_policy( + self.QOS_POLICY_ID + ) + self.assertIsNone(rule) + self.assertIsNone(qos_policy) + super().tearDown() + + def test_find(self): + sot = self.operator_cloud.network.find_qos_packet_rate_limit_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) + self.assertEqual(self.RULE_ID, sot.id) + self.assertEqual(self.RULE_MAX_KPPS, sot.max_kpps) + self.assertEqual(self.RULE_MAX_BURST_KPPS, sot.max_burst_kpps) + self.assertEqual(self.RULE_DIRECTION, sot.direction) + + def test_get(self): + sot = self.operator_cloud.network.get_qos_packet_rate_limit_rule( + self.RULE_ID, self.QOS_POLICY_ID + ) + self.assertEqual(self.RULE_ID, sot.id) + self.assertEqual(self.QOS_POLICY_ID, sot.qos_policy_id) + self.assertEqual(self.RULE_MAX_KPPS, sot.max_kpps) + self.assertEqual(self.RULE_MAX_BURST_KPPS, sot.max_burst_kpps) + self.assertEqual(self.RULE_DIRECTION, sot.direction) + + def test_list(self): + rule_ids = [ + o.id + for o in self.operator_cloud.network.qos_packet_rate_limit_rules( + self.QOS_POLICY_ID + ) + ] + self.assertIn(self.RULE_ID, rule_ids) + + def test_update(self): + sot = self.operator_cloud.network.update_qos_packet_rate_limit_rule( + self.RULE_ID, + self.QOS_POLICY_ID, + max_kpps=self.RULE_MAX_KPPS_NEW, + max_burst_kpps=self.RULE_MAX_BURST_KPPS_NEW, + direction=self.RULE_DIRECTION_NEW, + ) + self.assertEqual(self.RULE_MAX_KPPS_NEW, sot.max_kpps) + self.assertEqual(self.RULE_MAX_BURST_KPPS_NEW, sot.max_burst_kpps) + self.assertEqual(self.RULE_DIRECTION_NEW, sot.direction) diff --git a/openstack/tests/functional/network/v2/test_qos_policy.py b/openstack/tests/functional/network/v2/test_qos_policy.py index d9e164975d..8eccedf48b 100644 --- a/openstack/tests/functional/network/v2/test_qos_policy.py +++ b/openstack/tests/functional/network/v2/test_qos_policy.py @@ -10,55 +10,75 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid from openstack.network.v2 import qos_policy as _qos_policy from openstack.tests.functional import base class TestQoSPolicy(base.BaseFunctionalTest): - QOS_POLICY_ID = None - QOS_POLICY_NAME = uuid.uuid4().hex - QOS_POLICY_NAME_UPDATED = uuid.uuid4().hex IS_SHARED = False - RULES = [] + IS_DEFAULT = False + RULES: list[str] = [] QOS_POLICY_DESCRIPTION = "QoS policy description" - @classmethod - def setUpClass(cls): - super(TestQoSPolicy, cls).setUpClass() - qos = cls.conn.network.create_qos_policy( - description=cls.QOS_POLICY_DESCRIPTION, - name=cls.QOS_POLICY_NAME, - shared=cls.IS_SHARED, + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + # Skip the tests if qos extension is not enabled. + if not self.operator_cloud.network.find_extension("qos"): + self.skipTest("Network qos extension disabled") + + self.QOS_POLICY_NAME = self.getUniqueString() + self.QOS_POLICY_NAME_UPDATED = self.getUniqueString() + qos = self.operator_cloud.network.create_qos_policy( + description=self.QOS_POLICY_DESCRIPTION, + name=self.QOS_POLICY_NAME, + shared=self.IS_SHARED, + is_default=self.IS_DEFAULT, ) assert isinstance(qos, _qos_policy.QoSPolicy) - cls.assertIs(cls.QOS_POLICY_NAME, qos.name) - cls.QOS_POLICY_ID = qos.id + self.assertEqual(self.QOS_POLICY_NAME, qos.name) + self.QOS_POLICY_ID = qos.id - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_qos_policy(cls.QOS_POLICY_ID) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.operator_cloud.network.delete_qos_policy(self.QOS_POLICY_ID) + self.assertIsNone(sot) + super().tearDown() def test_find(self): - sot = self.conn.network.find_qos_policy(self.QOS_POLICY_NAME) + sot = self.operator_cloud.network.find_qos_policy(self.QOS_POLICY_NAME) self.assertEqual(self.QOS_POLICY_ID, sot.id) def test_get(self): - sot = self.conn.network.get_qos_policy(self.QOS_POLICY_ID) + sot = self.operator_cloud.network.get_qos_policy(self.QOS_POLICY_ID) self.assertEqual(self.QOS_POLICY_NAME, sot.name) self.assertEqual(self.IS_SHARED, sot.is_shared) self.assertEqual(self.RULES, sot.rules) self.assertEqual(self.QOS_POLICY_DESCRIPTION, sot.description) + self.assertEqual(self.IS_DEFAULT, sot.is_default) def test_list(self): - names = [o.name for o in self.conn.network.qos_policies()] + names = [o.name for o in self.operator_cloud.network.qos_policies()] self.assertIn(self.QOS_POLICY_NAME, names) def test_update(self): - sot = self.conn.network.update_qos_policy( - self.QOS_POLICY_ID, - name=self.QOS_POLICY_NAME_UPDATED) + sot = self.operator_cloud.network.update_qos_policy( + self.QOS_POLICY_ID, name=self.QOS_POLICY_NAME_UPDATED + ) self.assertEqual(self.QOS_POLICY_NAME_UPDATED, sot.name) + + def test_set_tags(self): + sot = self.operator_cloud.network.get_qos_policy(self.QOS_POLICY_ID) + self.assertEqual([], sot.tags) + + self.operator_cloud.network.set_tags(sot, ["blue"]) + sot = self.operator_cloud.network.get_qos_policy(self.QOS_POLICY_ID) + self.assertEqual(["blue"], sot.tags) + + self.operator_cloud.network.set_tags(sot, []) + sot = self.operator_cloud.network.get_qos_policy(self.QOS_POLICY_ID) + self.assertEqual([], sot.tags) diff --git a/openstack/tests/functional/network/v2/test_qos_rule_type.py b/openstack/tests/functional/network/v2/test_qos_rule_type.py index 96c545da3a..6491bc19e3 100644 --- a/openstack/tests/functional/network/v2/test_qos_rule_type.py +++ b/openstack/tests/functional/network/v2/test_qos_rule_type.py @@ -10,16 +10,39 @@ # License for the specific language governing permissions and limitations # under the License. -import six from openstack.tests.functional import base class TestQoSRuleType(base.BaseFunctionalTest): + QOS_RULE_TYPE = "bandwidth_limit" + + def setUp(self): + super().setUp() + if not self.operator_cloud: + self.skipTest("Operator cloud is required for this test") + + # Skip the tests if qos-rule-type-details extension is not enabled. + if not self.operator_cloud.network.find_extension( + "qos-rule-type-details" + ): + self.skipTest("Network qos-rule-type-details extension disabled") + + def test_find(self): + sot = self.operator_cloud.network.find_qos_rule_type( + self.QOS_RULE_TYPE + ) + self.assertEqual(self.QOS_RULE_TYPE, sot.type) + self.assertIsInstance(sot.drivers, list) + + def test_get(self): + sot = self.operator_cloud.network.get_qos_rule_type(self.QOS_RULE_TYPE) + self.assertEqual(self.QOS_RULE_TYPE, sot.type) + self.assertIsInstance(sot.drivers, list) def test_list(self): - rule_types = list(self.conn.network.qos_rule_types()) + rule_types = list(self.operator_cloud.network.qos_rule_types()) self.assertGreater(len(rule_types), 0) for rule_type in rule_types: - self.assertIsInstance(rule_type.type, six.string_types) + self.assertIsInstance(rule_type.type, str) diff --git a/openstack/tests/functional/network/v2/test_quota.py b/openstack/tests/functional/network/v2/test_quota.py index 16fb87da21..248f81583c 100644 --- a/openstack/tests/functional/network/v2/test_quota.py +++ b/openstack/tests/functional/network/v2/test_quota.py @@ -10,20 +10,48 @@ # License for the specific language governing permissions and limitations # under the License. +from openstack.network.v2 import quota as _quota from openstack.tests.functional import base class TestQuota(base.BaseFunctionalTest): + def setUp(self): + super().setUp() - def test_list(self): - sot = self.conn.network.quotas() - for qot in sot: - self.assertIn('subnet', qot) - self.assertIn('network', qot) - self.assertIn('router', qot) - self.assertIn('port', qot) - self.assertIn('floatingip', qot) - self.assertIn('security_group_rule', qot) - self.assertIn('security_group', qot) - self.assertIn('subnetpool', qot) - self.assertIn('rbac_policy', qot) + if not self.operator_cloud: + self.skipTest("Operator cloud required for this test") + + self.project = self.create_temporary_project() + + def test_quota(self): + # update quota + + quota = self.operator_cloud.network.update_quota( + self.project.id, networks=123456789 + ) + self.assertIsInstance(quota, _quota.Quota) + self.assertEqual(quota.networks, 123456789) + + # retrieve details of the (updated) quota + + quota = self.operator_cloud.network.get_quota(self.project.id) + self.assertIsInstance(quota, _quota.Quota) + self.assertEqual(quota.networks, 123456789) + + # retrieve quota defaults + + defaults = self.operator_cloud.network.get_quota_default( + self.project.id + ) + self.assertIsInstance(defaults, _quota.QuotaDefault) + self.assertNotEqual(defaults.networks, 123456789) + + # list quotas + + quotas = list(self.operator_cloud.network.quotas()) + self.assertIn(self.project.id, [x.project_id for x in quotas]) + + # revert quota + + ret = self.operator_cloud.network.delete_quota(self.project.id) + self.assertIsNone(ret) diff --git a/openstack/tests/functional/network/v2/test_rbac_policy.py b/openstack/tests/functional/network/v2/test_rbac_policy.py index 1c28b886b7..ba4c2fb804 100644 --- a/openstack/tests/functional/network/v2/test_rbac_policy.py +++ b/openstack/tests/functional/network/v2/test_rbac_policy.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid from openstack.network.v2 import network from openstack.network.v2 import rbac_policy @@ -18,47 +17,77 @@ class TestRBACPolicy(base.BaseFunctionalTest): - - NET_NAME = 'net-' + uuid.uuid4().hex - UPDATE_NAME = uuid.uuid4().hex - ACTION = 'access_as_shared' - OBJ_TYPE = 'network' - TARGET_TENANT_ID = '*' + ACTION = "access_as_shared" + OBJ_TYPE = "network" + TARGET_TENANT_ID = "*" NET_ID = None ID = None - @classmethod - def setUpClass(cls): - super(TestRBACPolicy, cls).setUpClass() - net = cls.conn.network.create_network(name=cls.NET_NAME) - assert isinstance(net, network.Network) - cls.NET_ID = net.id + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension("rbac-policies"): + self.skipTest( + "Neutron rbac-policies extension is required for this test" + ) - sot = cls.conn.network.\ - create_rbac_policy(action=cls.ACTION, - object_type=cls.OBJ_TYPE, - target_tenant=cls.TARGET_TENANT_ID, - object_id=cls.NET_ID) - assert isinstance(sot, rbac_policy.RBACPolicy) - cls.ID = sot.id + self.NET_NAME = self.getUniqueString("net") + self.UPDATE_NAME = self.getUniqueString() + net = self.user_cloud.network.create_network(name=self.NET_NAME) + assert isinstance(net, network.Network) + self.NET_ID = net.id + if self.operator_cloud: + sot = self.operator_cloud.network.create_rbac_policy( + action=self.ACTION, + object_type=self.OBJ_TYPE, + target_tenant=self.TARGET_TENANT_ID, + object_id=self.NET_ID, + ) + assert isinstance(sot, rbac_policy.RBACPolicy) + self.ID = sot.id + else: + sot = self.user_cloud.network.create_rbac_policy( + action=self.ACTION, + object_type=self.OBJ_TYPE, + target_tenant=self.user_cloud.current_project_id, + object_id=self.NET_ID, + ) + assert isinstance(sot, rbac_policy.RBACPolicy) + self.ID = sot.id - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_rbac_policy(cls.ID, - ignore_missing=False) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_network(cls.NET_ID, - ignore_missing=False) - cls.assertIs(None, sot) + def tearDown(self): + if self.operator_cloud: + sot = self.operator_cloud.network.delete_rbac_policy( + self.ID, ignore_missing=False + ) + else: + sot = self.user_cloud.network.delete_rbac_policy( + self.ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_network( + self.NET_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() def test_find(self): - sot = self.conn.network.find_rbac_policy(self.ID) + if self.operator_cloud: + sot = self.operator_cloud.network.find_rbac_policy(self.ID) + else: + sot = self.user_cloud.network.find_rbac_policy(self.ID) self.assertEqual(self.ID, sot.id) def test_get(self): - sot = self.conn.network.get_rbac_policy(self.ID) + if self.operator_cloud: + sot = self.operator_cloud.network.get_rbac_policy(self.ID) + else: + sot = self.user_cloud.network.get_rbac_policy(self.ID) self.assertEqual(self.ID, sot.id) def test_list(self): - ids = [o.id for o in self.conn.network.rbac_policies()] - self.assertIn(self.ID, ids) + if self.operator_cloud: + ids = [o.id for o in self.operator_cloud.network.rbac_policies()] + else: + ids = [o.id for o in self.user_cloud.network.rbac_policies()] + if self.ID: + self.assertIn(self.ID, ids) diff --git a/openstack/tests/functional/network/v2/test_router.py b/openstack/tests/functional/network/v2/test_router.py index 1102c5da18..b40deec262 100644 --- a/openstack/tests/functional/network/v2/test_router.py +++ b/openstack/tests/functional/network/v2/test_router.py @@ -9,48 +9,52 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -import uuid +# mypy: disable-error-code="method-assign" from openstack.network.v2 import router -from openstack.tests.functional import base - +from openstack.tests.functional.network.v2 import common -class TestRouter(base.BaseFunctionalTest): - NAME = uuid.uuid4().hex - UPDATE_NAME = uuid.uuid4().hex +class TestRouter(common.TestTagNeutron): ID = None - @classmethod - def setUpClass(cls): - super(TestRouter, cls).setUpClass() - sot = cls.conn.network.create_router(name=cls.NAME) + def setUp(self): + super().setUp() + self.NAME = self.getUniqueString() + self.UPDATE_NAME = self.getUniqueString() + sot = self.user_cloud.network.create_router(name=self.NAME) assert isinstance(sot, router.Router) - cls.assertIs(cls.NAME, sot.name) - cls.ID = sot.id + self.assertEqual(self.NAME, sot.name) + self.ID = sot.id + self.get_command = self.user_cloud.network.get_router - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_router(cls.ID, ignore_missing=False) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.user_cloud.network.delete_router( + self.ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() def test_find(self): - sot = self.conn.network.find_router(self.NAME) + sot = self.user_cloud.network.find_router(self.NAME) self.assertEqual(self.ID, sot.id) def test_get(self): - sot = self.conn.network.get_router(self.ID) + sot = self.user_cloud.network.get_router(self.ID) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.ID, sot.id) - self.assertFalse(sot.is_ha) + if not self.user_cloud._has_neutron_extension("l3-ha"): + self.assertFalse(sot.is_ha) def test_list(self): - names = [o.name for o in self.conn.network.routers()] + names = [o.name for o in self.user_cloud.network.routers()] self.assertIn(self.NAME, names) - ha = [o.is_ha for o in self.conn.network.routers()] - self.assertIn(False, ha) + if not self.user_cloud._has_neutron_extension("l3-ha"): + ha = [o.is_ha for o in self.user_cloud.network.routers()] + self.assertIn(False, ha) def test_update(self): - sot = self.conn.network.update_router(self.ID, name=self.UPDATE_NAME) + sot = self.user_cloud.network.update_router( + self.ID, name=self.UPDATE_NAME + ) self.assertEqual(self.UPDATE_NAME, sot.name) diff --git a/openstack/tests/functional/network/v2/test_router_add_remove_interface.py b/openstack/tests/functional/network/v2/test_router_add_remove_interface.py index 27072dd337..0a1c1ff36c 100644 --- a/openstack/tests/functional/network/v2/test_router_add_remove_interface.py +++ b/openstack/tests/functional/network/v2/test_router_add_remove_interface.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid from openstack.network.v2 import network from openstack.network.v2 import router @@ -19,57 +18,62 @@ class TestRouterInterface(base.BaseFunctionalTest): - - ROUTER_NAME = uuid.uuid4().hex - NET_NAME = uuid.uuid4().hex - SUB_NAME = uuid.uuid4().hex CIDR = "10.100.0.0/16" IPV4 = 4 - ROUTER_ID = None - NET_ID = None - SUB_ID = None - ROT = None + ROUTER_ID: str + NET_ID: str + SUB_ID: str + ROT: router.Router - @classmethod - def setUpClass(cls): - super(TestRouterInterface, cls).setUpClass() - sot = cls.conn.network.create_router(name=cls.ROUTER_NAME) + def setUp(self): + super().setUp() + self.ROUTER_NAME = self.getUniqueString() + self.NET_NAME = self.getUniqueString() + self.SUB_NAME = self.getUniqueString() + sot = self.user_cloud.network.create_router(name=self.ROUTER_NAME) assert isinstance(sot, router.Router) - cls.assertIs(cls.ROUTER_NAME, sot.name) - net = cls.conn.network.create_network(name=cls.NET_NAME) + self.assertEqual(self.ROUTER_NAME, sot.name) + net = self.user_cloud.network.create_network(name=self.NET_NAME) assert isinstance(net, network.Network) - cls.assertIs(cls.NET_NAME, net.name) - sub = cls.conn.network.create_subnet(name=cls.SUB_NAME, - ip_version=cls.IPV4, - network_id=net.id, - cidr=cls.CIDR) + self.assertEqual(self.NET_NAME, net.name) + sub = self.user_cloud.network.create_subnet( + name=self.SUB_NAME, + ip_version=self.IPV4, + network_id=net.id, + cidr=self.CIDR, + ) assert isinstance(sub, subnet.Subnet) - cls.assertIs(cls.SUB_NAME, sub.name) - cls.ROUTER_ID = sot.id - cls.ROT = sot - cls.NET_ID = net.id - cls.SUB_ID = sub.id + self.assertEqual(self.SUB_NAME, sub.name) + self.ROUTER_ID = sot.id + self.ROT = sot + self.NET_ID = net.id + self.SUB_ID = sub.id - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_router(cls.ROUTER_ID, - ignore_missing=False) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_subnet(cls.SUB_ID, ignore_missing=False) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_network(cls.NET_ID, ignore_missing=False) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.user_cloud.network.delete_router( + self.ROUTER_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_subnet( + self.SUB_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_network( + self.NET_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() - def test_router_add_interface(self): - iface = self.ROT.add_interface(self.conn.session, - subnet_id=self.SUB_ID) + def test_router_add_remove_interface(self): + iface = self.ROT.add_interface( + self.user_cloud.network, subnet_id=self.SUB_ID + ) self._verification(iface) - - def test_router_remove_interface(self): - iface = self.ROT.remove_interface(self.conn.session, - subnet_id=self.SUB_ID) + iface = self.ROT.remove_interface( + self.user_cloud.network, subnet_id=self.SUB_ID + ) self._verification(iface) def _verification(self, interface): - self.assertEqual(interface['subnet_id'], self.SUB_ID) - self.assertIn('port_id', interface) + self.assertEqual(interface["subnet_id"], self.SUB_ID) + self.assertIn("port_id", interface) diff --git a/openstack/tests/functional/network/v2/test_security_group.py b/openstack/tests/functional/network/v2/test_security_group.py index cc9fa114e0..0f4e18f289 100644 --- a/openstack/tests/functional/network/v2/test_security_group.py +++ b/openstack/tests/functional/network/v2/test_security_group.py @@ -9,41 +9,46 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -import uuid +# mypy: disable-error-code="method-assign" from openstack.network.v2 import security_group -from openstack.tests.functional import base - +from openstack.tests.functional.network.v2 import common -class TestSecurityGroup(base.BaseFunctionalTest): - NAME = uuid.uuid4().hex +class TestSecurityGroup(common.TestTagNeutron): ID = None - @classmethod - def setUpClass(cls): - super(TestSecurityGroup, cls).setUpClass() - sot = cls.conn.network.create_security_group(name=cls.NAME) + def setUp(self): + super().setUp() + self.NAME = self.getUniqueString() + sot = self.user_cloud.network.create_security_group(name=self.NAME) assert isinstance(sot, security_group.SecurityGroup) - cls.assertIs(cls.NAME, sot.name) - cls.ID = sot.id + self.assertEqual(self.NAME, sot.name) + self.ID = sot.id + self.get_command = self.user_cloud.network.get_security_group - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_security_group(cls.ID, - ignore_missing=False) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.user_cloud.network.delete_security_group( + self.ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() def test_find(self): - sot = self.conn.network.find_security_group(self.NAME) + sot = self.user_cloud.network.find_security_group(self.NAME) self.assertEqual(self.ID, sot.id) def test_get(self): - sot = self.conn.network.get_security_group(self.ID) + sot = self.user_cloud.network.get_security_group(self.ID) self.assertEqual(self.NAME, sot.name) self.assertEqual(self.ID, sot.id) def test_list(self): - names = [o.name for o in self.conn.network.security_groups()] + names = [o.name for o in self.user_cloud.network.security_groups()] self.assertIn(self.NAME, names) + + def test_list_query_list_of_ids(self): + ids = [ + o.id for o in self.user_cloud.network.security_groups(id=[self.ID]) + ] + self.assertIn(self.ID, ids) diff --git a/openstack/tests/functional/network/v2/test_security_group_rule.py b/openstack/tests/functional/network/v2/test_security_group_rule.py index f114fc4e45..27d91cf840 100644 --- a/openstack/tests/functional/network/v2/test_security_group_rule.py +++ b/openstack/tests/functional/network/v2/test_security_group_rule.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid from openstack.network.v2 import security_group from openstack.network.v2 import security_group_rule @@ -18,45 +17,49 @@ class TestSecurityGroupRule(base.BaseFunctionalTest): - - NAME = uuid.uuid4().hex - IPV4 = 'IPv4' - PROTO = 'tcp' + IPV4 = "IPv4" + PROTO = "tcp" PORT = 22 - DIR = 'ingress' + DIR = "ingress" ID = None RULE_ID = None - @classmethod - def setUpClass(cls): - super(TestSecurityGroupRule, cls).setUpClass() - sot = cls.conn.network.create_security_group(name=cls.NAME) + def setUp(self): + super().setUp() + self.NAME = self.getUniqueString() + sot = self.user_cloud.network.create_security_group(name=self.NAME) assert isinstance(sot, security_group.SecurityGroup) - cls.assertIs(cls.NAME, sot.name) - cls.ID = sot.id - rul = cls.conn.network.create_security_group_rule( - direction=cls.DIR, ethertype=cls.IPV4, - port_range_max=cls.PORT, port_range_min=cls.PORT, - protocol=cls.PROTO, security_group_id=cls.ID) + self.assertEqual(self.NAME, sot.name) + self.ID = sot.id + rul = self.user_cloud.network.create_security_group_rule( + direction=self.DIR, + ethertype=self.IPV4, + port_range_max=self.PORT, + port_range_min=self.PORT, + protocol=self.PROTO, + security_group_id=self.ID, + ) assert isinstance(rul, security_group_rule.SecurityGroupRule) - cls.assertIs(cls.ID, rul.security_group_id) - cls.RULE_ID = rul.id + self.assertEqual(self.ID, rul.security_group_id) + self.RULE_ID = rul.id - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_security_group_rule(cls.RULE_ID, - ignore_missing=False) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_security_group(cls.ID, - ignore_missing=False) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.user_cloud.network.delete_security_group_rule( + self.RULE_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_security_group( + self.ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() def test_find(self): - sot = self.conn.network.find_security_group_rule(self.RULE_ID) + sot = self.user_cloud.network.find_security_group_rule(self.RULE_ID) self.assertEqual(self.RULE_ID, sot.id) def test_get(self): - sot = self.conn.network.get_security_group_rule(self.RULE_ID) + sot = self.user_cloud.network.get_security_group_rule(self.RULE_ID) self.assertEqual(self.RULE_ID, sot.id) self.assertEqual(self.DIR, sot.direction) self.assertEqual(self.PROTO, sot.protocol) @@ -65,5 +68,5 @@ def test_get(self): self.assertEqual(self.ID, sot.security_group_id) def test_list(self): - ids = [o.id for o in self.conn.network.security_group_rules()] + ids = [o.id for o in self.user_cloud.network.security_group_rules()] self.assertIn(self.RULE_ID, ids) diff --git a/openstack/tests/functional/network/v2/test_segment.py b/openstack/tests/functional/network/v2/test_segment.py index 54200ded0a..43ba82008e 100644 --- a/openstack/tests/functional/network/v2/test_segment.py +++ b/openstack/tests/functional/network/v2/test_segment.py @@ -10,7 +10,6 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid from openstack.network.v2 import network from openstack.network.v2 import segment @@ -18,8 +17,6 @@ class TestSegment(base.BaseFunctionalTest): - - NETWORK_NAME = uuid.uuid4().hex NETWORK_TYPE = None PHYSICAL_NETWORK = None SEGMENTATION_ID = None @@ -27,88 +24,81 @@ class TestSegment(base.BaseFunctionalTest): SEGMENT_ID = None SEGMENT_EXTENSION = None - @classmethod - def setUpClass(cls): - super(TestSegment, cls).setUpClass() + def setUp(self): + super().setUp() + self.NETWORK_NAME = self.getUniqueString() + + if not self.operator_cloud: + self.skipTest("Operator cloud required for this test") # NOTE(rtheis): The segment extension is not yet enabled by default. # Skip the tests if not enabled. - cls.SEGMENT_EXTENSION = cls.conn.network.find_extension('segment') + if not self.operator_cloud.network.find_extension("segment"): + self.skipTest("Segment extension disabled") # Create a network to hold the segment. - net = cls.conn.network.create_network(name=cls.NETWORK_NAME) + net = self.operator_cloud.network.create_network( + name=self.NETWORK_NAME + ) assert isinstance(net, network.Network) - cls.assertIs(cls.NETWORK_NAME, net.name) - cls.NETWORK_ID = net.id + self.assertEqual(self.NETWORK_NAME, net.name) + self.NETWORK_ID = net.id - if cls.SEGMENT_EXTENSION: + if self.SEGMENT_EXTENSION: # Get the segment for the network. - for seg in cls.conn.network.segments(): + for seg in self.operator_cloud.network.segments(): assert isinstance(seg, segment.Segment) - if cls.NETWORK_ID == seg.network_id: - cls.NETWORK_TYPE = seg.network_type - cls.PHYSICAL_NETWORK = seg.physical_network - cls.SEGMENTATION_ID = seg.segmentation_id - cls.SEGMENT_ID = seg.id + if self.NETWORK_ID == seg.network_id: + self.NETWORK_TYPE = seg.network_type + self.PHYSICAL_NETWORK = seg.physical_network + self.SEGMENTATION_ID = seg.segmentation_id + self.SEGMENT_ID = seg.id break - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_network(cls.NETWORK_ID, - ignore_missing=False) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.operator_cloud.network.delete_network( + self.NETWORK_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() def test_create_delete(self): - if self.SEGMENT_EXTENSION: - sot = self.conn.network.create_segment( - description='test description', - name='test name', - network_id=self.NETWORK_ID, - network_type='geneve', - segmentation_id=2055, - ) - self.assertIsInstance(sot, segment.Segment) - del_sot = self.conn.network.delete_segment(sot.id) - self.assertEqual('test description', sot.description) - self.assertEqual('test name', sot.name) - self.assertEqual(self.NETWORK_ID, sot.network_id) - self.assertEqual('geneve', sot.network_type) - self.assertIsNone(sot.physical_network) - self.assertEqual(2055, sot.segmentation_id) - self.assertIsNone(del_sot) - else: - self.skipTest('Segment extension disabled') + sot = self.operator_cloud.network.create_segment( + description="test description", + name="test name", + network_id=self.NETWORK_ID, + network_type="geneve", + segmentation_id=2055, + ) + self.assertIsInstance(sot, segment.Segment) + del_sot = self.operator_cloud.network.delete_segment(sot.id) + self.assertEqual("test description", sot.description) + self.assertEqual("test name", sot.name) + self.assertEqual(self.NETWORK_ID, sot.network_id) + self.assertEqual("geneve", sot.network_type) + self.assertIsNone(sot.physical_network) + self.assertEqual(2055, sot.segmentation_id) + self.assertIsNone(del_sot) def test_find(self): - if self.SEGMENT_EXTENSION: - sot = self.conn.network.find_segment(self.SEGMENT_ID) - self.assertEqual(self.SEGMENT_ID, sot.id) - else: - self.skipTest('Segment extension disabled') + sot = self.operator_cloud.network.find_segment(self.SEGMENT_ID) + self.assertEqual(self.SEGMENT_ID, sot.id) def test_get(self): - if self.SEGMENT_EXTENSION: - sot = self.conn.network.get_segment(self.SEGMENT_ID) - self.assertEqual(self.SEGMENT_ID, sot.id) - self.assertIsNone(sot.name) - self.assertEqual(self.NETWORK_ID, sot.network_id) - self.assertEqual(self.NETWORK_TYPE, sot.network_type) - self.assertEqual(self.PHYSICAL_NETWORK, sot.physical_network) - self.assertEqual(self.SEGMENTATION_ID, sot.segmentation_id) - else: - self.skipTest('Segment extension disabled') + sot = self.operator_cloud.network.get_segment(self.SEGMENT_ID) + self.assertEqual(self.SEGMENT_ID, sot.id) + self.assertIsNone(sot.name) + self.assertEqual(self.NETWORK_ID, sot.network_id) + self.assertEqual(self.NETWORK_TYPE, sot.network_type) + self.assertEqual(self.PHYSICAL_NETWORK, sot.physical_network) + self.assertEqual(self.SEGMENTATION_ID, sot.segmentation_id) def test_list(self): - if self.SEGMENT_EXTENSION: - ids = [o.id for o in self.conn.network.segments(name=None)] - self.assertIn(self.SEGMENT_ID, ids) - else: - self.skipTest('Segment extension disabled') + ids = [o.id for o in self.operator_cloud.network.segments(name=None)] + self.assertIn(self.SEGMENT_ID, ids) def test_update(self): - if self.SEGMENT_EXTENSION: - sot = self.conn.network.update_segment(self.SEGMENT_ID, - description='update') - self.assertEqual('update', sot.description) - else: - self.skipTest('Segment extension disabled') + sot = self.operator_cloud.network.update_segment( + self.SEGMENT_ID, description="update" + ) + self.assertEqual("update", sot.description) diff --git a/openstack/tests/functional/network/v2/test_service_profile.py b/openstack/tests/functional/network/v2/test_service_profile.py index 34fe31ac04..6bee7f47d6 100644 --- a/openstack/tests/functional/network/v2/test_service_profile.py +++ b/openstack/tests/functional/network/v2/test_service_profile.py @@ -15,50 +15,80 @@ class TestServiceProfile(base.BaseFunctionalTest): - SERVICE_PROFILE_DESCRIPTION = "DESCRIPTION" UPDATE_DESCRIPTION = "UPDATED-DESCRIPTION" METAINFO = "FlAVOR_PROFILE_METAINFO" ID = None - @classmethod - def setUpClass(cls): - super(TestServiceProfile, cls).setUpClass() - service_profiles = cls.conn.network.create_service_profile( - description=cls.SERVICE_PROFILE_DESCRIPTION, - metainfo=cls.METAINFO,) - assert isinstance(service_profiles, _service_profile.ServiceProfile) - cls.assertIs(cls.SERVICE_PROFILE_DESCRIPTION, - service_profiles.description) - cls.assertIs(cls.METAINFO, service_profiles.metainfo) + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension("flavors"): + self.skipTest("Neutron flavor extension is required for this test") + + if self.operator_cloud: + service_profiles = ( + self.operator_cloud.network.create_service_profile( + description=self.SERVICE_PROFILE_DESCRIPTION, + metainfo=self.METAINFO, + ) + ) + assert isinstance( + service_profiles, _service_profile.ServiceProfile + ) + self.assertEqual( + self.SERVICE_PROFILE_DESCRIPTION, service_profiles.description + ) + self.assertEqual(self.METAINFO, service_profiles.meta_info) - cls.ID = service_profiles.id + self.ID = service_profiles.id - @classmethod - def tearDownClass(cls): - service_profiles = cls.conn.network.delete_service_profile( - cls.ID, - ignore_missing=True) - cls.assertIs(None, service_profiles) + def tearDown(self): + if self.ID: + service_profiles = ( + self.operator_cloud.network.delete_service_profile( + self.ID, ignore_missing=True + ) + ) + self.assertIsNone(service_profiles) + super().tearDown() def test_find(self): - service_profiles = self.conn.network.find_service_profile( - self.ID) - self.assertEqual(self.METAINFO, - service_profiles.metainfo) + self.user_cloud.network.find_service_profile( + name_or_id="not_existing", ignore_missing=True + ) + if self.operator_cloud and self.ID: + service_profiles = ( + self.operator_cloud.network.find_service_profile(self.ID) + ) + self.assertEqual(self.METAINFO, service_profiles.meta_info) def test_get(self): - service_profiles = self.conn.network.get_service_profile(self.ID) - self.assertEqual(self.METAINFO, service_profiles.metainfo) - self.assertEqual(self.SERVICE_PROFILE_DESCRIPTION, - service_profiles.description) + if not self.ID: + self.skipTest("ServiceProfile was not created") + service_profiles = self.operator_cloud.network.get_service_profile( + self.ID + ) + self.assertEqual(self.METAINFO, service_profiles.meta_info) + self.assertEqual( + self.SERVICE_PROFILE_DESCRIPTION, service_profiles.description + ) def test_update(self): - service_profiles = self.conn.network.update_service_profile( - self.ID, - description=self.UPDATE_DESCRIPTION) + if not self.ID: + self.skipTest("ServiceProfile was not created") + service_profiles = self.operator_cloud.network.update_service_profile( + self.ID, description=self.UPDATE_DESCRIPTION + ) self.assertEqual(self.UPDATE_DESCRIPTION, service_profiles.description) def test_list(self): - metainfos = [f.metainfo for f in self.conn.network.service_profiles()] - self.assertIn(self.METAINFO, metainfos) + # Test in user scope + self.user_cloud.network.service_profiles() + # Test as operator + if self.operator_cloud: + metainfos = [ + f.meta_info + for f in self.operator_cloud.network.service_profiles() + ] + if self.ID: + self.assertIn(self.METAINFO, metainfos) diff --git a/openstack/tests/functional/network/v2/test_service_provider.py b/openstack/tests/functional/network/v2/test_service_provider.py index 5db2d2bf15..4dcd210378 100644 --- a/openstack/tests/functional/network/v2/test_service_provider.py +++ b/openstack/tests/functional/network/v2/test_service_provider.py @@ -10,15 +10,14 @@ # License for the specific language governing permissions and limitations # under the License. -import six - from openstack.tests.functional import base class TestServiceProvider(base.BaseFunctionalTest): def test_list(self): - providers = list(self.conn.network.service_providers()) - - for provide in providers: - self.assertIsInstance(provide.name, six.string_type) - self.assertIsInstance(provide.service_type, six.string_types) + providers = list(self.user_cloud.network.service_providers()) + names = [o.name for o in providers] + service_types = [o.service_type for o in providers] + if self.user_cloud._has_neutron_extension("l3-ha"): + self.assertIn("ha", names) + self.assertIn("L3_ROUTER_NAT", service_types) diff --git a/openstack/tests/functional/network/v2/test_sfc.py b/openstack/tests/functional/network/v2/test_sfc.py new file mode 100644 index 0000000000..c52ecc57ff --- /dev/null +++ b/openstack/tests/functional/network/v2/test_sfc.py @@ -0,0 +1,136 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import network as _network +from openstack.network.v2 import port as _port +from openstack.network.v2 import sfc_flow_classifier as _flow_classifier +from openstack.network.v2 import subnet as _subnet +from openstack.tests.functional import base + + +class TestSFCFlowClassifier(base.BaseFunctionalTest): + FC_ID = None + + def setUp(self): + super().setUp() + + if not self.user_cloud.network.find_extension("sfc"): + self.skipTest("Neutron SFC Extension disabled") + + self.FLOW_CLASSIFIER_NAME = 'my_classifier' + self.getUniqueString() + self.UPDATE_NAME = 'updated' + self.getUniqueString() + self.NET_NAME = 'network1' + self.getUniqueString() + self.SUBNET_NAME = 'subnet1' + self.getUniqueString() + self.PORT1_NAME = 'port1' + self.getUniqueString() + self.PORT2_NAME = 'port2' + self.getUniqueString() + self.ETHERTYPE = 'IPv4' + self.PROTOCOL = 'tcp' + self.S_PORT_RANGE_MIN = 80 + self.S_PORT_RANGE_MAX = 80 + self.D_PORT_RANGE_MIN = 180 + self.D_PORT_RANGE_MAX = 180 + self.CIDR = "10.101.0.0/24" + self.SOURCE_IP = '10.101.1.12/32' + self.DESTINATION_IP = '10.102.2.12/32' + + self.PORT_CHAIN_NAME = 'my_chain' + self.getUniqueString() + self.PORT_PAIR_NAME = 'my_port_pair' + self.getUniqueString() + self.PORT_PAIR_GROUP_NAME = ( + 'my_port_pair_group' + self.getUniqueString() + ) + self.SERVICE_GRAPH_NAME = 'my_service_graph' + self.getUniqueString() + self.op_net_client = self.operator_cloud.network + + net = self.op_net_client.create_network(name=self.NET_NAME) + self.assertIsInstance(net, _network.Network) + self.NETWORK = net + subnet = self.operator_cloud.network.create_subnet( + name=self.SUBNET_NAME, + ip_version=4, + network_id=self.NETWORK.id, + cidr=self.CIDR, + ) + self.assertIsInstance(subnet, _subnet.Subnet) + self.SUBNET = subnet + + self.PORT1 = self._create_port( + network=self.NETWORK, port_name=self.PORT1_NAME + ) + self.PORT2 = self._create_port( + network=self.NETWORK, port_name=self.PORT2_NAME + ) + + flow_cls = self.op_net_client.create_sfc_flow_classifier( + name=self.FLOW_CLASSIFIER_NAME, + ethertype=self.ETHERTYPE, + protocol=self.PROTOCOL, + source_port_range_min=self.S_PORT_RANGE_MIN, + source_port_range_max=self.S_PORT_RANGE_MAX, + destination_port_range_min=self.D_PORT_RANGE_MIN, + destination_port_range_max=self.D_PORT_RANGE_MAX, + source_ip_prefix=self.SOURCE_IP, + destination_ip_prefix=self.DESTINATION_IP, + logical_source_port=self.PORT1.id, + logical_destination_port=self.PORT2.id, + ) + self.assertIsInstance(flow_cls, _flow_classifier.SfcFlowClassifier) + self.FLOW_CLASSIFIER = flow_cls + self.FC_ID = flow_cls.id + + def _create_port(self, network, port_name): + port = self.op_net_client.create_port( + name=port_name, + network_id=network.id, + ) + self.assertIsInstance(port, _port.Port) + return port + + def tearDown(self): + sot = self.operator_cloud.network.delete_sfc_flow_classifier( + self.FLOW_CLASSIFIER.id, ignore_missing=True + ) + self.assertIsNone(sot) + sot = self.operator_cloud.network.delete_port(self.PORT1.id) + self.assertIsNone(sot) + sot = self.operator_cloud.network.delete_port(self.PORT2.id) + self.assertIsNone(sot) + + sot = self.operator_cloud.network.delete_subnet(self.SUBNET.id) + self.assertIsNone(sot) + sot = self.operator_cloud.network.delete_network(self.NETWORK.id) + self.assertIsNone(sot) + super().tearDown() + + def test_sfc_flow_classifier(self): + sot = self.operator_cloud.network.find_sfc_flow_classifier( + self.FLOW_CLASSIFIER.name + ) + self.assertEqual(self.ETHERTYPE, sot.ethertype) + self.assertEqual(self.SOURCE_IP, sot.source_ip_prefix) + self.assertEqual(self.PROTOCOL, sot.protocol) + + classifiers = [ + fc.name + for fc in self.operator_cloud.network.sfc_flow_classifiers() + ] + self.assertIn(self.FLOW_CLASSIFIER_NAME, classifiers) + + classifier = self.operator_cloud.network.get_sfc_flow_classifier( + self.FC_ID + ) + self.assertEqual(self.FLOW_CLASSIFIER_NAME, classifier.name) + self.assertEqual(self.FC_ID, classifier.id) + + classifier = self.operator_cloud.network.update_sfc_flow_classifier( + self.FC_ID, name=self.UPDATE_NAME + ) + self.assertEqual(self.UPDATE_NAME, classifier.name) diff --git a/openstack/tests/functional/network/v2/test_subnet.py b/openstack/tests/functional/network/v2/test_subnet.py index eb56a2bd9a..0cdda41c7d 100644 --- a/openstack/tests/functional/network/v2/test_subnet.py +++ b/openstack/tests/functional/network/v2/test_subnet.py @@ -9,19 +9,14 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -import uuid +# mypy: disable-error-code="method-assign" from openstack.network.v2 import network from openstack.network.v2 import subnet -from openstack.tests.functional import base - +from openstack.tests.functional.network.v2 import common -class TestSubnet(base.BaseFunctionalTest): - NET_NAME = uuid.uuid4().hex - SUB_NAME = uuid.uuid4().hex - UPDATE_NAME = uuid.uuid4().hex +class TestSubnet(common.TestTagNeutron): IPV4 = 4 CIDR = "10.100.0.0/24" DNS_SERVERS = ["8.8.4.4", "8.8.8.8"] @@ -30,37 +25,44 @@ class TestSubnet(base.BaseFunctionalTest): NET_ID = None SUB_ID = None - @classmethod - def setUpClass(cls): - super(TestSubnet, cls).setUpClass() - net = cls.conn.network.create_network(name=cls.NET_NAME) + def setUp(self): + super().setUp() + self.NET_NAME = self.getUniqueString() + self.SUB_NAME = self.getUniqueString() + self.UPDATE_NAME = self.getUniqueString() + net = self.user_cloud.network.create_network(name=self.NET_NAME) assert isinstance(net, network.Network) - cls.assertIs(cls.NET_NAME, net.name) - cls.NET_ID = net.id - sub = cls.conn.network.create_subnet(name=cls.SUB_NAME, - ip_version=cls.IPV4, - network_id=cls.NET_ID, - cidr=cls.CIDR, - dns_nameservers=cls.DNS_SERVERS, - allocation_pools=cls.POOL, - host_routes=cls.ROUTES) + self.assertEqual(self.NET_NAME, net.name) + self.NET_ID = net.id + sub = self.user_cloud.network.create_subnet( + name=self.SUB_NAME, + ip_version=self.IPV4, + network_id=self.NET_ID, + cidr=self.CIDR, + dns_nameservers=self.DNS_SERVERS, + allocation_pools=self.POOL, + host_routes=self.ROUTES, + ) assert isinstance(sub, subnet.Subnet) - cls.assertIs(cls.SUB_NAME, sub.name) - cls.SUB_ID = sub.id + self.assertEqual(self.SUB_NAME, sub.name) + self.SUB_ID = self.ID = sub.id + self.get_command = self.user_cloud.network.get_subnet - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_subnet(cls.SUB_ID) - cls.assertIs(None, sot) - sot = cls.conn.network.delete_network(cls.NET_ID, ignore_missing=False) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.user_cloud.network.delete_subnet(self.SUB_ID) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_network( + self.NET_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() def test_find(self): - sot = self.conn.network.find_subnet(self.SUB_NAME) + sot = self.user_cloud.network.find_subnet(self.SUB_NAME) self.assertEqual(self.SUB_ID, sot.id) def test_get(self): - sot = self.conn.network.get_subnet(self.SUB_ID) + sot = self.user_cloud.network.get_subnet(self.SUB_ID) self.assertEqual(self.SUB_NAME, sot.name) self.assertEqual(self.SUB_ID, sot.id) self.assertEqual(self.DNS_SERVERS, sot.dns_nameservers) @@ -72,10 +74,11 @@ def test_get(self): self.assertTrue(sot.is_dhcp_enabled) def test_list(self): - names = [o.name for o in self.conn.network.subnets()] + names = [o.name for o in self.user_cloud.network.subnets()] self.assertIn(self.SUB_NAME, names) def test_update(self): - sot = self.conn.network.update_subnet(self.SUB_ID, - name=self.UPDATE_NAME) + sot = self.user_cloud.network.update_subnet( + self.SUB_ID, name=self.UPDATE_NAME + ) self.assertEqual(self.UPDATE_NAME, sot.name) diff --git a/openstack/tests/functional/network/v2/test_subnet_from_subnet_pool.py b/openstack/tests/functional/network/v2/test_subnet_from_subnet_pool.py new file mode 100644 index 0000000000..175fe2740c --- /dev/null +++ b/openstack/tests/functional/network/v2/test_subnet_from_subnet_pool.py @@ -0,0 +1,82 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.network.v2 import network +from openstack.network.v2 import subnet +from openstack.network.v2 import subnet_pool +from openstack.tests.functional import base + + +class TestSubnetFromSubnetPool(base.BaseFunctionalTest): + IPV4 = 4 + CIDR = "10.100.0.0/28" + MINIMUM_PREFIX_LENGTH = 8 + DEFAULT_PREFIX_LENGTH = 24 + MAXIMUM_PREFIX_LENGTH = 32 + SUBNET_PREFIX_LENGTH = 28 + IP_VERSION = 4 + PREFIXES = ["10.100.0.0/24"] + NET_ID = None + SUB_ID = None + SUB_POOL_ID = None + + def setUp(self): + super().setUp() + self.NET_NAME = self.getUniqueString() + self.SUB_NAME = self.getUniqueString() + self.SUB_POOL_NAME = self.getUniqueString() + + sub_pool = self.user_cloud.network.create_subnet_pool( + name=self.SUB_POOL_NAME, + min_prefixlen=self.MINIMUM_PREFIX_LENGTH, + default_prefixlen=self.DEFAULT_PREFIX_LENGTH, + max_prefixlen=self.MAXIMUM_PREFIX_LENGTH, + prefixes=self.PREFIXES, + ) + self.assertIsInstance(sub_pool, subnet_pool.SubnetPool) + self.assertEqual(self.SUB_POOL_NAME, sub_pool.name) + self.SUB_POOL_ID = sub_pool.id + net = self.user_cloud.network.create_network(name=self.NET_NAME) + self.assertIsInstance(net, network.Network) + self.assertEqual(self.NET_NAME, net.name) + self.NET_ID = net.id + sub = self.user_cloud.network.create_subnet( + name=self.SUB_NAME, + ip_version=self.IPV4, + network_id=self.NET_ID, + prefixlen=self.SUBNET_PREFIX_LENGTH, + subnetpool_id=self.SUB_POOL_ID, + ) + self.assertIsInstance(sub, subnet.Subnet) + self.assertEqual(self.SUB_NAME, sub.name) + self.SUB_ID = sub.id + + def tearDown(self): + sot = self.user_cloud.network.delete_subnet(self.SUB_ID) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_network( + self.NET_ID, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_subnet_pool(self.SUB_POOL_ID) + self.assertIsNone(sot) + super().tearDown() + + def test_get(self): + sot = self.user_cloud.network.get_subnet(self.SUB_ID) + self.assertEqual(self.SUB_NAME, sot.name) + self.assertEqual(self.SUB_ID, sot.id) + self.assertEqual(self.CIDR, sot.cidr) + self.assertEqual(self.IPV4, sot.ip_version) + self.assertEqual("10.100.0.1", sot.gateway_ip) + self.assertTrue(sot.is_dhcp_enabled) diff --git a/openstack/tests/functional/network/v2/test_subnet_pool.py b/openstack/tests/functional/network/v2/test_subnet_pool.py index c06d850b91..c8f88d780f 100644 --- a/openstack/tests/functional/network/v2/test_subnet_pool.py +++ b/openstack/tests/functional/network/v2/test_subnet_pool.py @@ -9,17 +9,13 @@ # WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the # License for the specific language governing permissions and limitations # under the License. - -import uuid +# mypy: disable-error-code="method-assign" from openstack.network.v2 import subnet_pool as _subnet_pool -from openstack.tests.functional import base - +from openstack.tests.functional.network.v2 import common -class TestSubnetPool(base.BaseFunctionalTest): - SUBNET_POOL_NAME = uuid.uuid4().hex - SUBNET_POOL_NAME_UPDATED = uuid.uuid4().hex +class TestSubnetPool(common.TestTagNeutron): SUBNET_POOL_ID = None MINIMUM_PREFIX_LENGTH = 8 DEFAULT_PREFIX_LENGTH = 24 @@ -27,52 +23,52 @@ class TestSubnetPool(base.BaseFunctionalTest): DEFAULT_QUOTA = 24 IS_SHARED = False IP_VERSION = 4 - PREFIXES = ['10.100.0.0/24', '10.101.0.0/24'] + PREFIXES = ["10.100.0.0/24", "10.101.0.0/24"] - @classmethod - def setUpClass(cls): - super(TestSubnetPool, cls).setUpClass() - subnet_pool = cls.conn.network.create_subnet_pool( - name=cls.SUBNET_POOL_NAME, - min_prefixlen=cls.MINIMUM_PREFIX_LENGTH, - default_prefixlen=cls.DEFAULT_PREFIX_LENGTH, - max_prefixlen=cls.MAXIMUM_PREFIX_LENGTH, - default_quota=cls.DEFAULT_QUOTA, - shared=cls.IS_SHARED, - prefixes=cls.PREFIXES) + def setUp(self): + super().setUp() + self.SUBNET_POOL_NAME = self.getUniqueString() + self.SUBNET_POOL_NAME_UPDATED = self.getUniqueString() + subnet_pool = self.user_cloud.network.create_subnet_pool( + name=self.SUBNET_POOL_NAME, + min_prefixlen=self.MINIMUM_PREFIX_LENGTH, + default_prefixlen=self.DEFAULT_PREFIX_LENGTH, + max_prefixlen=self.MAXIMUM_PREFIX_LENGTH, + default_quota=self.DEFAULT_QUOTA, + shared=self.IS_SHARED, + prefixes=self.PREFIXES, + ) assert isinstance(subnet_pool, _subnet_pool.SubnetPool) - cls.assertIs(cls.SUBNET_POOL_NAME, subnet_pool.name) - cls.SUBNET_POOL_ID = subnet_pool.id + self.assertEqual(self.SUBNET_POOL_NAME, subnet_pool.name) + self.SUBNET_POOL_ID = self.ID = subnet_pool.id + self.get_command = self.user_cloud.network.get_subnet_pool - @classmethod - def tearDownClass(cls): - sot = cls.conn.network.delete_subnet_pool(cls.SUBNET_POOL_ID) - cls.assertIs(None, sot) + def tearDown(self): + sot = self.user_cloud.network.delete_subnet_pool(self.SUBNET_POOL_ID) + self.assertIsNone(sot) + super().tearDown() def test_find(self): - sot = self.conn.network.find_subnet_pool(self.SUBNET_POOL_NAME) + sot = self.user_cloud.network.find_subnet_pool(self.SUBNET_POOL_NAME) self.assertEqual(self.SUBNET_POOL_ID, sot.id) def test_get(self): - sot = self.conn.network.get_subnet_pool(self.SUBNET_POOL_ID) + sot = self.user_cloud.network.get_subnet_pool(self.SUBNET_POOL_ID) self.assertEqual(self.SUBNET_POOL_NAME, sot.name) - self.assertEqual(self.MINIMUM_PREFIX_LENGTH, - sot.minimum_prefix_length) - self.assertEqual(self.DEFAULT_PREFIX_LENGTH, - sot.default_prefix_length) - self.assertEqual(self.MAXIMUM_PREFIX_LENGTH, - sot.maximum_prefix_length) + self.assertEqual(self.MINIMUM_PREFIX_LENGTH, sot.minimum_prefix_length) + self.assertEqual(self.DEFAULT_PREFIX_LENGTH, sot.default_prefix_length) + self.assertEqual(self.MAXIMUM_PREFIX_LENGTH, sot.maximum_prefix_length) self.assertEqual(self.DEFAULT_QUOTA, sot.default_quota) self.assertEqual(self.IS_SHARED, sot.is_shared) self.assertEqual(self.IP_VERSION, sot.ip_version) self.assertEqual(self.PREFIXES, sot.prefixes) def test_list(self): - names = [o.name for o in self.conn.network.subnet_pools()] + names = [o.name for o in self.user_cloud.network.subnet_pools()] self.assertIn(self.SUBNET_POOL_NAME, names) def test_update(self): - sot = self.conn.network.update_subnet_pool( - self.SUBNET_POOL_ID, - name=self.SUBNET_POOL_NAME_UPDATED) + sot = self.user_cloud.network.update_subnet_pool( + self.SUBNET_POOL_ID, name=self.SUBNET_POOL_NAME_UPDATED + ) self.assertEqual(self.SUBNET_POOL_NAME_UPDATED, sot.name) diff --git a/openstack/tests/functional/network/v2/test_taas.py b/openstack/tests/functional/network/v2/test_taas.py new file mode 100644 index 0000000000..f31f14bf3f --- /dev/null +++ b/openstack/tests/functional/network/v2/test_taas.py @@ -0,0 +1,126 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import network as _network +from openstack.network.v2 import port as _port +from openstack.network.v2 import tap_flow as _tap_flow +from openstack.network.v2 import tap_service as _tap_service +from openstack.tests.functional import base + + +class TestTapService(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + if not self.user_cloud.network.find_extension("taas"): + self.skipTest("Neutron Tap-as-a-service Extension disabled") + + self.TAP_S_NAME = 'my_service' + self.getUniqueString() + self.TAP_F_NAME = 'my_flow' + self.getUniqueString() + net = self.user_cloud.network.create_network() + assert isinstance(net, _network.Network) + self.SERVICE_NET_ID = net.id + + net = self.user_cloud.network.create_network() + assert isinstance(net, _network.Network) + self.FLOW_NET_ID = net.id + + port = self.user_cloud.network.create_port( + network_id=self.SERVICE_NET_ID + ) + assert isinstance(port, _port.Port) + self.SERVICE_PORT_ID = port.id + + port = self.user_cloud.network.create_port(network_id=self.FLOW_NET_ID) + assert isinstance(port, _port.Port) + self.FLOW_PORT_ID = port.id + + tap_service = self.user_cloud.network.create_tap_service( + name=self.TAP_S_NAME, port_id=self.SERVICE_PORT_ID + ) + assert isinstance(tap_service, _tap_service.TapService) + self.TAP_SERVICE = tap_service + + tap_flow = self.user_cloud.network.create_tap_flow( + name=self.TAP_F_NAME, + tap_service_id=self.TAP_SERVICE.id, + source_port=self.FLOW_PORT_ID, + direction='BOTH', + ) + assert isinstance(tap_flow, _tap_flow.TapFlow) + self.TAP_FLOW = tap_flow + + def tearDown(self): + sot = self.user_cloud.network.delete_tap_flow( + self.TAP_FLOW.id, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_tap_service( + self.TAP_SERVICE.id, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_port(self.SERVICE_PORT_ID) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_port(self.FLOW_PORT_ID) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_network(self.SERVICE_NET_ID) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_network(self.FLOW_NET_ID) + self.assertIsNone(sot) + super().tearDown() + + def test_find_tap_service(self): + sot = self.user_cloud.network.find_tap_service(self.TAP_SERVICE.name) + self.assertEqual(self.SERVICE_PORT_ID, sot.port_id) + self.assertEqual(self.TAP_S_NAME, sot.name) + + def test_get_tap_service(self): + sot = self.user_cloud.network.get_tap_service(self.TAP_SERVICE.id) + self.assertEqual(self.SERVICE_PORT_ID, sot.port_id) + self.assertEqual(self.TAP_S_NAME, sot.name) + + def test_list_tap_services(self): + tap_service_ids = [ + ts.id for ts in self.user_cloud.network.tap_services() + ] + self.assertIn(self.TAP_SERVICE.id, tap_service_ids) + + def test_update_tap_service(self): + description = 'My tap service' + sot = self.user_cloud.network.update_tap_service( + self.TAP_SERVICE.id, description=description + ) + self.assertEqual(description, sot.description) + + def test_find_tap_flow(self): + sot = self.user_cloud.network.find_tap_flow(self.TAP_FLOW.name) + self.assertEqual(self.FLOW_PORT_ID, sot.source_port) + self.assertEqual(self.TAP_SERVICE.id, sot.tap_service_id) + self.assertEqual('BOTH', sot.direction) + self.assertEqual(self.TAP_F_NAME, sot.name) + + def test_get_tap_flow(self): + sot = self.user_cloud.network.get_tap_flow(self.TAP_FLOW.id) + self.assertEqual(self.FLOW_PORT_ID, sot.source_port) + self.assertEqual(self.TAP_F_NAME, sot.name) + self.assertEqual(self.TAP_SERVICE.id, sot.tap_service_id) + self.assertEqual('BOTH', sot.direction) + + def test_list_tap_flows(self): + tap_flow_ids = [tf.id for tf in self.user_cloud.network.tap_flows()] + self.assertIn(self.TAP_FLOW.id, tap_flow_ids) + + def test_update_tap_flow(self): + description = 'My tap flow' + sot = self.user_cloud.network.update_tap_flow( + self.TAP_FLOW.id, description=description + ) + self.assertEqual(description, sot.description) diff --git a/openstack/tests/functional/network/v2/test_tap_mirror.py b/openstack/tests/functional/network/v2/test_tap_mirror.py new file mode 100644 index 0000000000..aa781441f1 --- /dev/null +++ b/openstack/tests/functional/network/v2/test_tap_mirror.py @@ -0,0 +1,83 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import network as _network +from openstack.network.v2 import port as _port +from openstack.network.v2 import tap_mirror as _tap_mirror +from openstack.tests.functional import base + + +class TestTapMirror(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + + if not self.user_cloud.network.find_extension("tap-mirror"): + self.skipTest("Neutron Tap Mirror Extension disabled") + + self.TAP_M_NAME = 'my_tap_mirror' + self.getUniqueString() + net = self.user_cloud.network.create_network() + assert isinstance(net, _network.Network) + self.MIRROR_NET_ID = net.id + + port = self.user_cloud.network.create_port( + network_id=self.MIRROR_NET_ID + ) + assert isinstance(port, _port.Port) + self.MIRROR_PORT_ID = port.id + + self.REMOTE_IP = '193.10.10.2' + self.MIRROR_TYPE = 'erspanv1' + + tap_mirror = self.user_cloud.network.create_tap_mirror( + name=self.TAP_M_NAME, + port_id=self.MIRROR_PORT_ID, + remote_ip=self.REMOTE_IP, + mirror_type=self.MIRROR_TYPE, + directions={'IN': 99}, + ) + assert isinstance(tap_mirror, _tap_mirror.TapMirror) + self.TAP_MIRROR = tap_mirror + + def tearDown(self): + sot = self.user_cloud.network.delete_tap_mirror( + self.TAP_MIRROR.id, ignore_missing=False + ) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_port(self.MIRROR_PORT_ID) + self.assertIsNone(sot) + sot = self.user_cloud.network.delete_network(self.MIRROR_NET_ID) + self.assertIsNone(sot) + + super().tearDown() + + def test_find_tap_mirror(self): + sot = self.user_cloud.network.find_tap_mirror(self.TAP_MIRROR.name) + self.assertEqual(self.MIRROR_PORT_ID, sot.port_id) + self.assertEqual(self.TAP_M_NAME, sot.name) + + def test_get_tap_mirror(self): + sot = self.user_cloud.network.get_tap_mirror(self.TAP_MIRROR.id) + self.assertEqual(self.MIRROR_PORT_ID, sot.port_id) + self.assertEqual(self.TAP_M_NAME, sot.name) + + def test_list_tap_mirrors(self): + tap_mirror_ids = [ + tm.id for tm in self.user_cloud.network.tap_mirrors() + ] + self.assertIn(self.TAP_MIRROR.id, tap_mirror_ids) + + def test_update_tap_mirror(self): + description = 'My Tap Mirror' + sot = self.user_cloud.network.update_tap_mirror( + self.TAP_MIRROR.id, description=description + ) + self.assertEqual(description, sot.description) diff --git a/openstack/tests/functional/network/v2/test_trunk.py b/openstack/tests/functional/network/v2/test_trunk.py new file mode 100644 index 0000000000..4f60d3e22f --- /dev/null +++ b/openstack/tests/functional/network/v2/test_trunk.py @@ -0,0 +1,99 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.network.v2 import network +from openstack.network.v2 import port +from openstack.network.v2 import trunk as _trunk +from openstack.tests.functional import base + + +class TestTrunk(base.BaseFunctionalTest): + TIMEOUT_SCALING_FACTOR = 2.0 + + def setUp(self): + super().setUp() + + # Skip the tests if trunk extension is not enabled. + if not self.user_cloud.network.find_extension("trunk"): + self.skipTest("Network trunk extension disabled") + + self.TRUNK_NAME = self.getUniqueString() + self.TRUNK_NAME_UPDATED = self.getUniqueString() + net = self.user_cloud.network.create_network() + assert isinstance(net, network.Network) + self.NET_ID = net.id + prt = self.user_cloud.network.create_port(network_id=self.NET_ID) + assert isinstance(prt, port.Port) + self.PORT_ID = prt.id + self.ports_to_clean = [self.PORT_ID] + trunk = self.user_cloud.network.create_trunk( + name=self.TRUNK_NAME, port_id=self.PORT_ID + ) + assert isinstance(trunk, _trunk.Trunk) + self.TRUNK_ID = trunk.id + + def tearDown(self): + self.user_cloud.network.delete_trunk( + self.TRUNK_ID, ignore_missing=False + ) + for port_id in self.ports_to_clean: + self.user_cloud.network.delete_port(port_id, ignore_missing=False) + self.user_cloud.network.delete_network( + self.NET_ID, ignore_missing=False + ) + super().tearDown() + + def test_find(self): + sot = self.user_cloud.network.find_trunk(self.TRUNK_NAME) + self.assertEqual(self.TRUNK_ID, sot.id) + + def test_get(self): + sot = self.user_cloud.network.get_trunk(self.TRUNK_ID) + self.assertEqual(self.TRUNK_ID, sot.id) + self.assertEqual(self.TRUNK_NAME, sot.name) + + def test_list(self): + ids = [o.id for o in self.user_cloud.network.trunks()] + self.assertIn(self.TRUNK_ID, ids) + + def test_update(self): + sot = self.user_cloud.network.update_trunk( + self.TRUNK_ID, name=self.TRUNK_NAME_UPDATED + ) + self.assertEqual(self.TRUNK_NAME_UPDATED, sot.name) + + def test_subports(self): + port_for_subport = self.user_cloud.network.create_port( + network_id=self.NET_ID + ) + self.ports_to_clean.append(port_for_subport.id) + subports = [ + { + "port_id": port_for_subport.id, + "segmentation_type": "vlan", + "segmentation_id": 111, + } + ] + + sot = self.user_cloud.network.get_trunk_subports(self.TRUNK_ID) + self.assertEqual({"sub_ports": []}, sot) + + self.user_cloud.network.add_trunk_subports(self.TRUNK_ID, subports) + sot = self.user_cloud.network.get_trunk_subports(self.TRUNK_ID) + self.assertEqual({"sub_ports": subports}, sot) + + self.user_cloud.network.delete_trunk_subports( + self.TRUNK_ID, [{"port_id": port_for_subport.id}] + ) + sot = self.user_cloud.network.get_trunk_subports(self.TRUNK_ID) + self.assertEqual({"sub_ports": []}, sot) diff --git a/openstack/tests/functional/network/v2/test_vpnaas.py b/openstack/tests/functional/network/v2/test_vpnaas.py new file mode 100644 index 0000000000..0fdd2b86ce --- /dev/null +++ b/openstack/tests/functional/network/v2/test_vpnaas.py @@ -0,0 +1,59 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import vpn_ike_policy +from openstack.tests.functional import base + + +class TestVpnIkePolicy(base.BaseFunctionalTest): + ID = None + + def setUp(self): + super().setUp() + if not self.user_cloud._has_neutron_extension("vpnaas"): + self.skipTest("vpnaas service not supported by cloud") + self.IKEPOLICY_NAME = self.getUniqueString("ikepolicy") + self.UPDATE_NAME = self.getUniqueString("ikepolicy-updated") + policy = self.user_cloud.network.create_vpn_ike_policy( + name=self.IKEPOLICY_NAME + ) + assert isinstance(policy, vpn_ike_policy.VpnIkePolicy) + self.assertEqual(self.IKEPOLICY_NAME, policy.name) + self.ID = policy.id + + def tearDown(self): + ikepolicy = self.user_cloud.network.delete_vpn_ike_policy( + self.ID, ignore_missing=True + ) + self.assertIsNone(ikepolicy) + super().tearDown() + + def test_list(self): + policies = [f.name for f in self.user_cloud.network.vpn_ike_policies()] + self.assertIn(self.IKEPOLICY_NAME, policies) + + def test_find(self): + policy = self.user_cloud.network.find_vpn_ike_policy( + self.IKEPOLICY_NAME + ) + self.assertEqual(self.ID, policy.id) + + def test_get(self): + policy = self.user_cloud.network.get_vpn_ike_policy(self.ID) + self.assertEqual(self.IKEPOLICY_NAME, policy.name) + self.assertEqual(self.ID, policy.id) + + def test_update(self): + policy = self.user_cloud.network.update_vpn_ike_policy( + self.ID, name=self.UPDATE_NAME + ) + self.assertEqual(self.UPDATE_NAME, policy.name) diff --git a/openstack/tests/functional/object_store/v1/test_account.py b/openstack/tests/functional/object_store/v1/test_account.py index 879f152676..f855cad52d 100644 --- a/openstack/tests/functional/object_store/v1/test_account.py +++ b/openstack/tests/functional/object_store/v1/test_account.py @@ -14,45 +14,49 @@ class TestAccount(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + self.require_service('object-store') - @classmethod - def tearDownClass(cls): - super(TestAccount, cls).tearDownClass() - account = cls.conn.object_store.get_account_metadata() - cls.conn.object_store.delete_account_metadata(account.metadata.keys()) + def tearDown(self): + account = self.operator_cloud.object_store.get_account_metadata() + self.operator_cloud.object_store.delete_account_metadata( + account.metadata.keys() + ) + super().tearDown() def test_system_metadata(self): - account = self.conn.object_store.get_account_metadata() + account = self.operator_cloud.object_store.get_account_metadata() self.assertGreaterEqual(account.account_bytes_used, 0) self.assertGreaterEqual(account.account_container_count, 0) self.assertGreaterEqual(account.account_object_count, 0) def test_custom_metadata(self): # get custom metadata - account = self.conn.object_store.get_account_metadata() + account = self.operator_cloud.object_store.get_account_metadata() self.assertFalse(account.metadata) # set no custom metadata - self.conn.object_store.set_account_metadata() - account = self.conn.object_store.get_account_metadata() + self.operator_cloud.object_store.set_account_metadata() + account = self.operator_cloud.object_store.get_account_metadata() self.assertFalse(account.metadata) # set empty custom metadata - self.conn.object_store.set_account_metadata(k0='') - account = self.conn.object_store.get_account_metadata() + self.operator_cloud.object_store.set_account_metadata(k0='') + account = self.operator_cloud.object_store.get_account_metadata() self.assertFalse(account.metadata) # set custom metadata - self.conn.object_store.set_account_metadata(k1='v1') - account = self.conn.object_store.get_account_metadata() + self.operator_cloud.object_store.set_account_metadata(k1='v1') + account = self.operator_cloud.object_store.get_account_metadata() self.assertTrue(account.metadata) self.assertEqual(1, len(account.metadata)) self.assertIn('k1', account.metadata) self.assertEqual('v1', account.metadata['k1']) # set more custom metadata - self.conn.object_store.set_account_metadata(k2='v2') - account = self.conn.object_store.get_account_metadata() + self.operator_cloud.object_store.set_account_metadata(k2='v2') + account = self.operator_cloud.object_store.get_account_metadata() self.assertTrue(account.metadata) self.assertEqual(2, len(account.metadata)) self.assertIn('k1', account.metadata) @@ -61,8 +65,8 @@ def test_custom_metadata(self): self.assertEqual('v2', account.metadata['k2']) # update custom metadata - self.conn.object_store.set_account_metadata(k1='v1.1') - account = self.conn.object_store.get_account_metadata() + self.operator_cloud.object_store.set_account_metadata(k1='v1.1') + account = self.operator_cloud.object_store.get_account_metadata() self.assertTrue(account.metadata) self.assertEqual(2, len(account.metadata)) self.assertIn('k1', account.metadata) @@ -71,8 +75,8 @@ def test_custom_metadata(self): self.assertEqual('v2', account.metadata['k2']) # unset custom metadata - self.conn.object_store.delete_account_metadata(['k1']) - account = self.conn.object_store.get_account_metadata() + self.operator_cloud.object_store.delete_account_metadata(['k1']) + account = self.operator_cloud.object_store.get_account_metadata() self.assertTrue(account.metadata) self.assertEqual(1, len(account.metadata)) self.assertIn('k2', account.metadata) diff --git a/openstack/tests/functional/object_store/v1/test_container.py b/openstack/tests/functional/object_store/v1/test_container.py index 206f372111..da1d3d6b42 100644 --- a/openstack/tests/functional/object_store/v1/test_container.py +++ b/openstack/tests/functional/object_store/v1/test_container.py @@ -10,60 +10,71 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid - from openstack.object_store.v1 import container as _container from openstack.tests.functional import base class TestContainer(base.BaseFunctionalTest): - - NAME = uuid.uuid4().hex - - @classmethod - def setUpClass(cls): - super(TestContainer, cls).setUpClass() - container = cls.conn.object_store.create_container(name=cls.NAME) + def setUp(self): + super().setUp() + self.require_service('object-store') + + self.NAME = self.getUniqueString() + container = self.operator_cloud.object_store.create_container( + name=self.NAME + ) + self.addEmptyCleanup( + self.operator_cloud.object_store.delete_container, + self.NAME, + ignore_missing=False, + ) assert isinstance(container, _container.Container) - cls.assertIs(cls.NAME, container.name) - - @classmethod - def tearDownClass(cls): - result = cls.conn.object_store.delete_container(cls.NAME, - ignore_missing=False) - cls.assertIs(None, result) + self.assertEqual(self.NAME, container.name) def test_list(self): - names = [o.name for o in self.conn.object_store.containers()] + names = [o.name for o in self.operator_cloud.object_store.containers()] self.assertIn(self.NAME, names) def test_system_metadata(self): # get system metadata - container = self.conn.object_store.get_container_metadata(self.NAME) + container = self.operator_cloud.object_store.get_container_metadata( + self.NAME + ) self.assertEqual(0, container.object_count) self.assertEqual(0, container.bytes_used) # set system metadata - container = self.conn.object_store.get_container_metadata(self.NAME) + container = self.operator_cloud.object_store.get_container_metadata( + self.NAME + ) self.assertIsNone(container.read_ACL) self.assertIsNone(container.write_ACL) - self.conn.object_store.set_container_metadata( - container, read_ACL='.r:*', write_ACL='demo:demo') - container = self.conn.object_store.get_container_metadata(self.NAME) + self.operator_cloud.object_store.set_container_metadata( + container, read_ACL='.r:*', write_ACL='demo:demo' + ) + container = self.operator_cloud.object_store.get_container_metadata( + self.NAME + ) self.assertEqual('.r:*', container.read_ACL) self.assertEqual('demo:demo', container.write_ACL) # update system metadata - self.conn.object_store.set_container_metadata( - container, read_ACL='.r:demo') - container = self.conn.object_store.get_container_metadata(self.NAME) + self.operator_cloud.object_store.set_container_metadata( + container, read_ACL='.r:demo' + ) + container = self.operator_cloud.object_store.get_container_metadata( + self.NAME + ) self.assertEqual('.r:demo', container.read_ACL) self.assertEqual('demo:demo', container.write_ACL) # set system metadata and custom metadata - self.conn.object_store.set_container_metadata( - container, k0='v0', sync_key='1234') - container = self.conn.object_store.get_container_metadata(self.NAME) + self.operator_cloud.object_store.set_container_metadata( + container, k0='v0', sync_key='1234' + ) + container = self.operator_cloud.object_store.get_container_metadata( + self.NAME + ) self.assertTrue(container.metadata) self.assertIn('k0', container.metadata) self.assertEqual('v0', container.metadata['k0']) @@ -72,9 +83,12 @@ def test_system_metadata(self): self.assertEqual('1234', container.sync_key) # unset system metadata - self.conn.object_store.delete_container_metadata(container, - ['sync_key']) - container = self.conn.object_store.get_container_metadata(self.NAME) + self.operator_cloud.object_store.delete_container_metadata( + container, ['sync_key'] + ) + container = self.operator_cloud.object_store.get_container_metadata( + self.NAME + ) self.assertTrue(container.metadata) self.assertIn('k0', container.metadata) self.assertEqual('v0', container.metadata['k0']) @@ -84,30 +98,46 @@ def test_system_metadata(self): def test_custom_metadata(self): # get custom metadata - container = self.conn.object_store.get_container_metadata(self.NAME) + container = self.operator_cloud.object_store.get_container_metadata( + self.NAME + ) self.assertFalse(container.metadata) # set no custom metadata - self.conn.object_store.set_container_metadata(container) - container = self.conn.object_store.get_container_metadata(container) + self.operator_cloud.object_store.set_container_metadata(container) + container = self.operator_cloud.object_store.get_container_metadata( + container + ) self.assertFalse(container.metadata) # set empty custom metadata - self.conn.object_store.set_container_metadata(container, k0='') - container = self.conn.object_store.get_container_metadata(container) + self.operator_cloud.object_store.set_container_metadata( + container, k0='' + ) + container = self.operator_cloud.object_store.get_container_metadata( + container + ) self.assertFalse(container.metadata) # set custom metadata - self.conn.object_store.set_container_metadata(container, k1='v1') - container = self.conn.object_store.get_container_metadata(container) + self.operator_cloud.object_store.set_container_metadata( + container, k1='v1' + ) + container = self.operator_cloud.object_store.get_container_metadata( + container + ) self.assertTrue(container.metadata) self.assertEqual(1, len(container.metadata)) self.assertIn('k1', container.metadata) self.assertEqual('v1', container.metadata['k1']) # set more custom metadata by named container - self.conn.object_store.set_container_metadata(self.NAME, k2='v2') - container = self.conn.object_store.get_container_metadata(container) + self.operator_cloud.object_store.set_container_metadata( + self.NAME, k2='v2' + ) + container = self.operator_cloud.object_store.get_container_metadata( + container + ) self.assertTrue(container.metadata) self.assertEqual(2, len(container.metadata)) self.assertIn('k1', container.metadata) @@ -116,8 +146,12 @@ def test_custom_metadata(self): self.assertEqual('v2', container.metadata['k2']) # update metadata - self.conn.object_store.set_container_metadata(container, k1='v1.1') - container = self.conn.object_store.get_container_metadata(self.NAME) + self.operator_cloud.object_store.set_container_metadata( + container, k1='v1.1' + ) + container = self.operator_cloud.object_store.get_container_metadata( + self.NAME + ) self.assertTrue(container.metadata) self.assertEqual(2, len(container.metadata)) self.assertIn('k1', container.metadata) @@ -126,8 +160,12 @@ def test_custom_metadata(self): self.assertEqual('v2', container.metadata['k2']) # delete metadata - self.conn.object_store.delete_container_metadata(container, ['k1']) - container = self.conn.object_store.get_container_metadata(self.NAME) + self.operator_cloud.object_store.delete_container_metadata( + container, ['k1'] + ) + container = self.operator_cloud.object_store.get_container_metadata( + self.NAME + ) self.assertTrue(container.metadata) self.assertEqual(1, len(container.metadata)) self.assertIn('k2', container.metadata) diff --git a/openstack/tests/functional/object_store/v1/test_obj.py b/openstack/tests/functional/object_store/v1/test_obj.py index a96a18eedf..af43a8d10c 100644 --- a/openstack/tests/functional/object_store/v1/test_obj.py +++ b/openstack/tests/functional/object_store/v1/test_obj.py @@ -10,90 +10,91 @@ # License for the specific language governing permissions and limitations # under the License. -import uuid - from openstack.tests.functional import base class TestObject(base.BaseFunctionalTest): - - FOLDER = uuid.uuid4().hex - FILE = uuid.uuid4().hex - DATA = 'abc' - - @classmethod - def setUpClass(cls): - super(TestObject, cls).setUpClass() - cls.conn.object_store.create_container(name=cls.FOLDER) - cls.sot = cls.conn.object_store.upload_object( - container=cls.FOLDER, name=cls.FILE, data=cls.DATA) - - @classmethod - def tearDownClass(cls): - super(TestObject, cls).tearDownClass() - cls.conn.object_store.delete_object(cls.sot, ignore_missing=False) - cls.conn.object_store.delete_container(cls.FOLDER) + DATA = b'abc' + + def setUp(self): + super().setUp() + self.require_service('object-store') + + self.FOLDER = self.getUniqueString() + self.FILE = self.getUniqueString() + self.operator_cloud.object_store.create_container(name=self.FOLDER) + self.addCleanup( + self.operator_cloud.object_store.delete_container, self.FOLDER + ) + self.sot = self.operator_cloud.object_store.upload_object( + container=self.FOLDER, name=self.FILE, data=self.DATA + ) + self.addEmptyCleanup( + self.operator_cloud.object_store.delete_object, + self.sot, + ignore_missing=False, + ) def test_list(self): - names = [o.name for o - in self.conn.object_store.objects(container=self.FOLDER)] + names = [ + o.name + for o in self.operator_cloud.object_store.objects( + container=self.FOLDER + ) + ] self.assertIn(self.FILE, names) - def test_get_object(self): - result = self.conn.object_store.get_object( - self.FILE, container=self.FOLDER) + def test_download_object(self): + result = self.operator_cloud.object_store.download_object( + self.FILE, container=self.FOLDER + ) self.assertEqual(self.DATA, result) - result = self.conn.object_store.get_object(self.sot) + result = self.operator_cloud.object_store.download_object(self.sot) self.assertEqual(self.DATA, result) def test_system_metadata(self): # get system metadata - obj = self.conn.object_store.get_object_metadata( - self.FILE, container=self.FOLDER) - self.assertGreaterEqual(0, obj.bytes) + obj = self.operator_cloud.object_store.get_object_metadata( + self.FILE, container=self.FOLDER + ) + # TODO(shade) obj.bytes is coming up None on python3 but not python2 + # self.assertGreaterEqual(0, obj.bytes) self.assertIsNotNone(obj.etag) # set system metadata - obj = self.conn.object_store.get_object_metadata( - self.FILE, container=self.FOLDER) + obj = self.operator_cloud.object_store.get_object_metadata( + self.FILE, container=self.FOLDER + ) self.assertIsNone(obj.content_disposition) self.assertIsNone(obj.content_encoding) - self.conn.object_store.set_object_metadata( - obj, content_disposition='attachment', content_encoding='gzip') - obj = self.conn.object_store.get_object_metadata(obj) + self.operator_cloud.object_store.set_object_metadata( + obj, content_disposition='attachment', content_encoding='gzip' + ) + obj = self.operator_cloud.object_store.get_object_metadata(obj) self.assertEqual('attachment', obj.content_disposition) self.assertEqual('gzip', obj.content_encoding) # update system metadata - self.conn.object_store.set_object_metadata( - obj, content_encoding='deflate') - obj = self.conn.object_store.get_object_metadata(obj) + self.operator_cloud.object_store.set_object_metadata( + obj, content_encoding='deflate' + ) + obj = self.operator_cloud.object_store.get_object_metadata(obj) self.assertEqual('attachment', obj.content_disposition) self.assertEqual('deflate', obj.content_encoding) - # set system metadata and custom metadata - self.conn.object_store.set_object_metadata( - obj, k0='v0', delete_after=100) - obj = self.conn.object_store.get_object_metadata(obj) - self.assertIn('k0', obj.metadata) - self.assertEqual('v0', obj.metadata['k0']) - self.assertEqual('attachment', obj.content_disposition) - self.assertEqual('deflate', obj.content_encoding) - - # unset system metadata - self.conn.object_store.delete_object_metadata( - obj, keys=['delete_after']) - obj = self.conn.object_store.get_object_metadata(obj) + # set custom metadata + self.operator_cloud.object_store.set_object_metadata(obj, k0='v0') + obj = self.operator_cloud.object_store.get_object_metadata(obj) self.assertIn('k0', obj.metadata) self.assertEqual('v0', obj.metadata['k0']) self.assertEqual('attachment', obj.content_disposition) self.assertEqual('deflate', obj.content_encoding) - self.assertIsNone(obj.delete_at) # unset more system metadata - self.conn.object_store.delete_object_metadata( - obj, keys=['content_disposition']) - obj = self.conn.object_store.get_object_metadata(obj) + self.operator_cloud.object_store.delete_object_metadata( + obj, keys=['content_disposition'] + ) + obj = self.operator_cloud.object_store.get_object_metadata(obj) self.assertIn('k0', obj.metadata) self.assertEqual('v0', obj.metadata['k0']) self.assertIsNone(obj.content_disposition) @@ -102,32 +103,34 @@ def test_system_metadata(self): def test_custom_metadata(self): # get custom metadata - obj = self.conn.object_store.get_object_metadata( - self.FILE, container=self.FOLDER) + obj = self.operator_cloud.object_store.get_object_metadata( + self.FILE, container=self.FOLDER + ) self.assertFalse(obj.metadata) # set no custom metadata - self.conn.object_store.set_object_metadata(obj) - obj = self.conn.object_store.get_object_metadata(obj) + self.operator_cloud.object_store.set_object_metadata(obj) + obj = self.operator_cloud.object_store.get_object_metadata(obj) self.assertFalse(obj.metadata) # set empty custom metadata - self.conn.object_store.set_object_metadata(obj, k0='') - obj = self.conn.object_store.get_object_metadata(obj) + self.operator_cloud.object_store.set_object_metadata(obj, k0='') + obj = self.operator_cloud.object_store.get_object_metadata(obj) self.assertFalse(obj.metadata) # set custom metadata - self.conn.object_store.set_object_metadata(obj, k1='v1') - obj = self.conn.object_store.get_object_metadata(obj) + self.operator_cloud.object_store.set_object_metadata(obj, k1='v1') + obj = self.operator_cloud.object_store.get_object_metadata(obj) self.assertTrue(obj.metadata) self.assertEqual(1, len(obj.metadata)) self.assertIn('k1', obj.metadata) self.assertEqual('v1', obj.metadata['k1']) # set more custom metadata by named object and container - self.conn.object_store.set_object_metadata(self.FILE, self.FOLDER, - k2='v2') - obj = self.conn.object_store.get_object_metadata(obj) + self.operator_cloud.object_store.set_object_metadata( + self.FILE, self.FOLDER, k2='v2' + ) + obj = self.operator_cloud.object_store.get_object_metadata(obj) self.assertTrue(obj.metadata) self.assertEqual(2, len(obj.metadata)) self.assertIn('k1', obj.metadata) @@ -136,8 +139,8 @@ def test_custom_metadata(self): self.assertEqual('v2', obj.metadata['k2']) # update custom metadata - self.conn.object_store.set_object_metadata(obj, k1='v1.1') - obj = self.conn.object_store.get_object_metadata(obj) + self.operator_cloud.object_store.set_object_metadata(obj, k1='v1.1') + obj = self.operator_cloud.object_store.get_object_metadata(obj) self.assertTrue(obj.metadata) self.assertEqual(2, len(obj.metadata)) self.assertIn('k1', obj.metadata) @@ -146,8 +149,10 @@ def test_custom_metadata(self): self.assertEqual('v2', obj.metadata['k2']) # unset custom metadata - self.conn.object_store.delete_object_metadata(obj, keys=['k1']) - obj = self.conn.object_store.get_object_metadata(obj) + self.operator_cloud.object_store.delete_object_metadata( + obj, keys=['k1'] + ) + obj = self.operator_cloud.object_store.get_object_metadata(obj) self.assertTrue(obj.metadata) self.assertEqual(1, len(obj.metadata)) self.assertIn('k2', obj.metadata) diff --git a/openstack/tests/functional/orchestration/v1/test_stack.py b/openstack/tests/functional/orchestration/v1/test_stack.py index 6da674840c..1ed1412dd9 100644 --- a/openstack/tests/functional/orchestration/v1/test_stack.py +++ b/openstack/tests/functional/orchestration/v1/test_stack.py @@ -10,7 +10,7 @@ # License for the specific language governing permissions and limitations # under the License. -import unittest +import yaml from openstack import exceptions from openstack.orchestration.v1 import stack @@ -18,58 +18,89 @@ from openstack.tests.functional.network.v2 import test_network -@unittest.skip("bug/1525005") class TestStack(base.BaseFunctionalTest): - NAME = 'test_stack' - stack = None - network = None - subnet = None - cidr = '10.99.99.0/16' + CIDR = '10.99.99.0/16' + + _wait_for_timeout_key = 'OPENSTACKSDK_FUNC_TEST_TIMEOUT_ORCHESTRATION' - @classmethod - def setUpClass(cls): - super(TestStack, cls).setUpClass() - if cls.conn.compute.find_keypair(cls.NAME) is None: - cls.conn.compute.create_keypair(name=cls.NAME) - image = next(cls.conn.image.images()) + def setUp(self): + super().setUp() + self.require_service('orchestration') + + if self.operator_cloud.compute.find_keypair(self.NAME) is None: + self.operator_cloud.compute.create_keypair(name=self.NAME) + image = next(self.operator_cloud.image.images()) tname = "openstack/tests/functional/orchestration/v1/hello_world.yaml" with open(tname) as f: - template = f.read() - cls.network, cls.subnet = test_network.create_network(cls.conn, - cls.NAME, - cls.cidr) + template = yaml.safe_load(f) + # TODO(mordred) Fix the need for this. We have better support in + # the shade layer. + template['heat_template_version'] = '2013-05-23' + self.network, self.subnet = test_network.create_network( + self.operator_cloud, self.NAME, self.CIDR + ) parameters = { 'image': image.id, - 'key_name': cls.NAME, - 'network': cls.network.id, + 'key_name': self.NAME, + 'network': self.network.id, } - sot = cls.conn.orchestration.create_stack( - name=cls.NAME, + sot = self.operator_cloud.orchestration.create_stack( + name=self.NAME, parameters=parameters, template=template, ) - assert isinstance(sot, stack.Stack) - cls.assertIs(True, (sot.id is not None)) - cls.stack = sot - cls.assertIs(cls.NAME, sot.name) - cls.conn.orchestration.wait_for_status( - sot, status='CREATE_COMPLETE', failures=['CREATE_FAILED']) + self.assertIsInstance(sot, stack.Stack) + self.assertIsNotNone(sot.id) + self.assertEqual(self.NAME, sot.name) + self.stack = sot + self.operator_cloud.orchestration.wait_for_status( + sot, + status='CREATE_COMPLETE', + failures=['CREATE_FAILED'], + wait=self._wait_for_timeout, + ) - @classmethod - def tearDownClass(cls): - super(TestStack, cls).tearDownClass() - cls.conn.orchestration.delete_stack(cls.stack, ignore_missing=False) - cls.conn.compute.delete_keypair(cls.NAME) + def tearDown(self): + self.operator_cloud.orchestration.delete_stack( + self.stack, ignore_missing=False + ) + self.operator_cloud.compute.delete_keypair(self.NAME) # Need to wait for the stack to go away before network delete try: - cls.conn.orchestration.wait_for_status( - cls.stack, 'DELETE_COMPLETE') + self.operator_cloud.orchestration.wait_for_status( + self.stack, 'DELETE_COMPLETE', wait=self._wait_for_timeout + ) except exceptions.NotFoundException: pass - cls.linger_for_delete() - test_network.delete_network(cls.conn, cls.network, cls.subnet) + test_network.delete_network( + self.operator_cloud, self.network, self.subnet + ) + super().tearDown() def test_list(self): - names = [o.name for o in self.conn.orchestration.stacks()] + names = [o.name for o in self.operator_cloud.orchestration.stacks()] self.assertIn(self.NAME, names) + + def test_suspend_resume(self): + # given + suspend_status = "SUSPEND_COMPLETE" + resume_status = "RESUME_COMPLETE" + + # when + self.operator_cloud.orchestration.suspend_stack(self.stack) + self.stack = self.operator_cloud.orchestration.wait_for_status( + self.stack, suspend_status, wait=self._wait_for_timeout + ) + + # then + self.assertEqual(suspend_status, self.stack.status) + + # when + self.operator_cloud.orchestration.resume_stack(self.stack) + self.stack = self.operator_cloud.orchestration.wait_for_status( + self.stack, resume_status, wait=self._wait_for_timeout + ) + + # then + self.assertEqual(resume_status, self.stack.status) diff --git a/openstack/tests/functional/placement/__init__.py b/openstack/tests/functional/placement/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/placement/v1/__init__.py b/openstack/tests/functional/placement/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/placement/v1/test_resource_provider.py b/openstack/tests/functional/placement/v1/test_resource_provider.py new file mode 100644 index 0000000000..35b7e9d24f --- /dev/null +++ b/openstack/tests/functional/placement/v1/test_resource_provider.py @@ -0,0 +1,119 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.placement.v1 import resource_provider as _resource_provider +from openstack.tests.functional import base + + +class TestResourceProvider(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + + if not self.operator_cloud.has_service('placement'): + self.skipTest('placement service not supported by cloud') + + self.resource_provider_name = self.getUniqueString() + + resource_provider = ( + self.operator_cloud.placement.create_resource_provider( + name=self.resource_provider_name, + ) + ) + self.assertIsInstance( + resource_provider, _resource_provider.ResourceProvider + ) + self.assertEqual(self.resource_provider_name, resource_provider.name) + self.resource_provider = resource_provider + + def tearDown(self): + result = self.operator_cloud.placement.delete_resource_provider( + self.resource_provider, + ) + self.assertIsNone(result) + super().tearDown() + + def test_resource_provider(self): + # list all resource providers + + resource_providers = list( + self.operator_cloud.placement.resource_providers() + ) + self.assertIsInstance( + resource_providers[0], + _resource_provider.ResourceProvider, + ) + self.assertIn( + self.resource_provider_name, + {x.name for x in resource_providers}, + ) + + # retrieve details of the resource provider by name + + resource_provider = ( + self.operator_cloud.placement.find_resource_provider( + self.resource_provider.name, + ) + ) + self.assertEqual(self.resource_provider_name, resource_provider.name) + + # retrieve details of the resource provider by ID + + resource_provider = ( + self.operator_cloud.placement.get_resource_provider( + self.resource_provider.id, + ) + ) + self.assertEqual(self.resource_provider_name, resource_provider.name) + + # update the resource provider + + new_resource_provider_name = self.getUniqueString() + + resource_provider = ( + self.operator_cloud.placement.update_resource_provider( + self.resource_provider, + name=new_resource_provider_name, + generation=self.resource_provider.generation, + ) + ) + self.assertIsInstance( + resource_provider, + _resource_provider.ResourceProvider, + ) + self.assertEqual( + new_resource_provider_name, + resource_provider.name, + ) + + def test_resource_provider_aggregates(self): + aggregates = [uuid.uuid4().hex, uuid.uuid4().hex] + + # update the resource provider aggregates + + resource_provider = ( + self.operator_cloud.placement.set_resource_provider_aggregates( + self.resource_provider, + *aggregates, + ) + ) + self.assertCountEqual(aggregates, resource_provider.aggregates) + + # retrieve details of resource provider aggregates + + resource_provider = ( + self.operator_cloud.placement.get_resource_provider_aggregates( + self.resource_provider, + ) + ) + self.assertCountEqual(aggregates, resource_provider.aggregates) diff --git a/openstack/tests/functional/placement/v1/test_resource_provider_inventory.py b/openstack/tests/functional/placement/v1/test_resource_provider_inventory.py new file mode 100644 index 0000000000..0294781de4 --- /dev/null +++ b/openstack/tests/functional/placement/v1/test_resource_provider_inventory.py @@ -0,0 +1,163 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.placement.v1 import resource_class as _resource_class +from openstack.placement.v1 import resource_provider as _resource_provider +from openstack.placement.v1 import ( + resource_provider_inventory as _resource_provider_inventory, +) +from openstack.tests.functional import base + + +class TestResourceProviderInventory(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + + if not self.operator_cloud.has_service('placement'): + self.skipTest('placement service not supported by cloud') + + self.resource_provider_name = self.getUniqueString() + self.resource_class_name = f'CUSTOM_{uuid.uuid4().hex.upper()}' + + resource_class = self.operator_cloud.placement.create_resource_class( + name=self.resource_class_name, + ) + self.assertIsInstance(resource_class, _resource_class.ResourceClass) + self.assertEqual(self.resource_class_name, resource_class.name) + + resource_provider = ( + self.operator_cloud.placement.create_resource_provider( + name=self.resource_provider_name, + ) + ) + self.assertIsInstance( + resource_provider, + _resource_provider.ResourceProvider, + ) + self.assertEqual(self.resource_provider_name, resource_provider.name) + + self.resource_provider = resource_provider + self.resource_class = resource_class + + def tearDown(self): + self.operator_cloud.placement.delete_resource_provider( + self.resource_provider, + ) + self.operator_cloud.placement.delete_resource_class( + self.resource_class, + ) + super().tearDown() + + def test_resource_provider_inventory(self): + # create the resource provider inventory + + resource_provider_inventory = ( + self.operator_cloud.placement.create_resource_provider_inventory( + self.resource_provider, + resource_class=self.resource_class, + total=10, + step_size=1, + ) + ) + self.assertIsInstance( + resource_provider_inventory, + _resource_provider_inventory.ResourceProviderInventory, + ) + self.assertEqual( + self.resource_class.name, + resource_provider_inventory.resource_class, + ) + self.assertEqual(10, resource_provider_inventory.total) + + # list all resource provider inventories (there should only be one) + + resource_provider_inventories = list( + self.operator_cloud.placement.resource_provider_inventories( + self.resource_provider + ) + ) + self.assertIsInstance( + resource_provider_inventories[0], + _resource_provider_inventory.ResourceProviderInventory, + ) + self.assertIn( + self.resource_class.name, + {rpi.id for rpi in resource_provider_inventories}, + ) + + # update the resource provider inventory + + resource_provider_inventory = self.operator_cloud.placement.update_resource_provider_inventory( # noqa: E501 + resource_provider_inventory, + total=20, + resource_provider_generation=resource_provider_inventory.resource_provider_generation, + ) + self.assertIsInstance( + resource_provider_inventory, + _resource_provider_inventory.ResourceProviderInventory, + ) + self.assertEqual( + self.resource_class.name, + resource_provider_inventory.id, + ) + self.assertEqual(20, resource_provider_inventory.total) + + # retrieve details of the (updated) resource provider inventory + + resource_provider_inventory = ( + self.operator_cloud.placement.get_resource_provider_inventory( + resource_provider_inventory, + ) + ) + self.assertIsInstance( + resource_provider_inventory, + _resource_provider_inventory.ResourceProviderInventory, + ) + self.assertEqual( + self.resource_class.name, + resource_provider_inventory.id, + ) + self.assertEqual(20, resource_provider_inventory.total) + + # retrieve details of the resource provider inventory using IDs + # (requires us to provide the resource provider also) + + resource_provider_inventory = ( + self.operator_cloud.placement.get_resource_provider_inventory( + resource_provider_inventory.id, + self.resource_provider, + ) + ) + self.assertIsInstance( + resource_provider_inventory, + _resource_provider_inventory.ResourceProviderInventory, + ) + self.assertEqual( + self.resource_class.name, + resource_provider_inventory.id, + ) + self.assertEqual(20, resource_provider_inventory.total) + + # (no find_resource_provider_inventory method) + + # delete the resource provider inventory + + result = ( + self.operator_cloud.placement.delete_resource_provider_inventory( + resource_provider_inventory, + self.resource_provider, + ignore_missing=False, + ) + ) + self.assertIsNone(result) diff --git a/openstack/tests/functional/placement/v1/test_trait.py b/openstack/tests/functional/placement/v1/test_trait.py new file mode 100644 index 0000000000..b06586d0f6 --- /dev/null +++ b/openstack/tests/functional/placement/v1/test_trait.py @@ -0,0 +1,67 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.placement.v1 import trait as _trait +from openstack.tests.functional import base + + +class TestTrait(base.BaseFunctionalTest): + def setUp(self): + super().setUp() + + self.skipTest( + "This test intermittently fails on DevStack deployments. " + "See https://bugs.launchpad.net/placement/+bug/2029520 for more " + "information." + ) + + if not self.operator_cloud.has_service('placement'): + self.skipTest('placement service not supported by cloud') + + self.trait_name = f'CUSTOM_{uuid.uuid4().hex.upper()}' + + trait = self.operator_cloud.placement.create_trait( + name=self.trait_name, + ) + self.assertIsInstance(trait, _trait.Trait) + self.assertEqual(self.trait_name, trait.name) + + self.trait = trait + + def tearDown(self): + self.operator_cloud.placement.delete_trait(self.trait) + super().tearDown() + + def test_resource_provider_inventory(self): + # list all traits + + traits = list(self.operator_cloud.placement.traits()) + self.assertIsInstance(traits[0], _trait.Trait) + self.assertIn(self.trait.name, {x.id for x in traits}) + + # (no update_trait method) + + # retrieve details of the trait + + trait = self.operator_cloud.placement.get_trait(self.trait) + self.assertIsInstance(trait, _trait.Trait) + self.assertEqual(self.trait_name, trait.id) + + # retrieve details of the trait using IDs + + trait = self.operator_cloud.placement.get_trait(self.trait_name) + self.assertIsInstance(trait, _trait.Trait) + self.assertEqual(self.trait_name, trait.id) + + # (no find_trait method) diff --git a/openstack/tests/functional/shared_file_system/__init__.py b/openstack/tests/functional/shared_file_system/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/shared_file_system/v2/__init__.py b/openstack/tests/functional/shared_file_system/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/functional/shared_file_system/v2/base.py b/openstack/tests/functional/shared_file_system/v2/base.py new file mode 100644 index 0000000000..ab36de1744 --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/base.py @@ -0,0 +1,88 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import resource +from openstack.tests.functional import base + + +class BaseSharedFileSystemTest(base.BaseFunctionalTest): + min_microversion: str | None = None + + def setUp(self): + super().setUp() + self.require_service( + 'shared-file-system', min_microversion=self.min_microversion + ) + self._set_operator_cloud(shared_file_system_api_version='2.82') + self._set_user_cloud(shared_file_system_api_version='2.82') + + def create_share(self, **kwargs): + share = self.user_cloud.share.create_share(**kwargs) + self.addCleanup( + self.user_cloud.share.delete_share, share.id, ignore_missing=True + ) + self.user_cloud.share.wait_for_status( + share, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertIsNotNone(share.id) + return share + + def create_share_snapshot(self, share_id, **kwargs): + share_snapshot = self.user_cloud.share.create_share_snapshot( + share_id=share_id, force=True + ) + self.addCleanup( + resource.wait_for_delete, + self.user_cloud.share, + share_snapshot, + wait=self._wait_for_timeout, + interval=2, + ) + self.addCleanup( + self.user_cloud.share.delete_share_snapshot, + share_snapshot.id, + ignore_missing=False, + ) + self.user_cloud.share.wait_for_status( + share_snapshot, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertIsNotNone(share_snapshot.id) + return share_snapshot + + def create_share_group(self, **kwargs): + share_group = self.user_cloud.share.create_share_group(**kwargs) + self.addCleanup( + self.operator_cloud.share.delete_share_group, + share_group.id, + ignore_missing=True, + ) + self.assertIsNotNone(share_group.id) + return share_group + + def create_resource_lock(self, **kwargs): + resource_lock = self.user_cloud.share.create_resource_lock(**kwargs) + self.addCleanup( + self.user_cloud.share.delete_resource_lock, + resource_lock.id, + ignore_missing=True, + ) + self.assertIsNotNone(resource_lock.id) + return resource_lock diff --git a/openstack/tests/functional/shared_file_system/v2/test_availability_zone.py b/openstack/tests/functional/shared_file_system/v2/test_availability_zone.py new file mode 100644 index 0000000000..41bb705855 --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_availability_zone.py @@ -0,0 +1,25 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.shared_file_system.v2 import base + + +class AvailabilityZoneTest(base.BaseSharedFileSystemTest): + min_microversion = '2.7' + + def test_availability_zones(self): + azs = self.user_cloud.shared_file_system.availability_zones() + self.assertGreater(len(list(azs)), 0) + for az in azs: + for attribute in ('id', 'name', 'created_at', 'updated_at'): + self.assertTrue(hasattr(az, attribute)) + self.assertIsInstance(getattr(az, attribute), 'str') diff --git a/openstack/tests/functional/shared_file_system/v2/test_export_locations.py b/openstack/tests/functional/shared_file_system/v2/test_export_locations.py new file mode 100644 index 0000000000..3fec84d13f --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_export_locations.py @@ -0,0 +1,49 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.shared_file_system.v2 import base + + +class TestExportLocation(base.BaseSharedFileSystemTest): + min_microversion = '2.9' + + def setUp(self): + super().setUp() + + self.SHARE_NAME = self.getUniqueString() + my_share = self.create_share( + name=self.SHARE_NAME, + size=2, + share_type="dhss_false", + share_protocol='NFS', + description=None, + ) + self.SHARE_ID = my_share.id + + def test_export_locations(self): + exs = self.user_cloud.shared_file_system.export_locations( + self.SHARE_ID + ) + self.assertGreater(len(list(exs)), 0) + for ex in exs: + for attribute in ( + 'id', + 'path', + 'share_instance_id', + 'updated_at', + 'created_at', + ): + self.assertTrue(hasattr(ex, attribute)) + self.assertIsInstance(getattr(ex, attribute), 'str') + for attribute in ('is_preferred', 'is_admin'): + self.assertTrue(hasattr(ex, attribute)) + self.assertIsInstance(getattr(ex, attribute), 'bool') diff --git a/openstack/tests/functional/shared_file_system/v2/test_limit.py b/openstack/tests/functional/shared_file_system/v2/test_limit.py new file mode 100644 index 0000000000..2bdde57353 --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_limit.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.shared_file_system.v2 import base + + +class LimitTest(base.BaseSharedFileSystemTest): + def test_limits(self): + limits = self.user_cloud.shared_file_system.limits() + self.assertGreater(len(list(limits)), 0) + for limit in limits: + for attribute in ( + "maxTotalReplicaGigabytes", + "maxTotalShares", + "maxTotalShareGigabytes", + "maxTotalShareNetworks", + "maxTotalShareSnapshots", + "maxTotalShareReplicas", + "maxTotalSnapshotGigabytes", + "totalReplicaGigabytesUsed", + "totalShareGigabytesUsed", + "totalSharesUsed", + "totalShareNetworksUsed", + "totalShareSnapshotsUsed", + "totalSnapshotGigabytesUsed", + "totalShareReplicasUsed", + ): + self.assertTrue(hasattr(limit, attribute)) diff --git a/openstack/tests/functional/shared_file_system/v2/test_quota_class_set.py b/openstack/tests/functional/shared_file_system/v2/test_quota_class_set.py new file mode 100644 index 0000000000..ea6ac17599 --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_quota_class_set.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import quota_class_set as _quota_class_set +from openstack.tests.functional.shared_file_system.v2 import base + + +class QuotaClassSetTest(base.BaseSharedFileSystemTest): + def setUp(self): + super().setUp() + + if not self.operator_cloud: + self.skipTest("Operator cloud required for this test") + + self.project = self.create_temporary_project() + + def test_quota_class_set(self): + # set quota class set + + quota_class_set = self.operator_cloud.share.update_quota_class_set( + self.project.id, backups=123 + ) + self.assertIsInstance(quota_class_set, _quota_class_set.QuotaClassSet) + self.assertEqual(quota_class_set.backups, 123) + + # retrieve details of the (updated) quota class set + + quota_class_set = self.operator_cloud.share.get_quota_class_set( + self.project.id + ) + self.assertIsInstance(quota_class_set, _quota_class_set.QuotaClassSet) + self.assertEqual(quota_class_set.backups, 123) diff --git a/openstack/tests/functional/shared_file_system/v2/test_resource_lock.py b/openstack/tests/functional/shared_file_system/v2/test_resource_lock.py new file mode 100644 index 0000000000..52e4dda593 --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_resource_lock.py @@ -0,0 +1,96 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import resource_locks as _resource_locks +from openstack.tests.functional.shared_file_system.v2 import base + + +class ResourceLocksTest(base.BaseSharedFileSystemTest): + def setUp(self): + super().setUp() + + self.SHARE_NAME = self.getUniqueString() + share = self.user_cloud.shared_file_system.create_share( + name=self.SHARE_NAME, + size=2, + share_type="dhss_false", + share_protocol='NFS', + description=None, + ) + self.SHARE_ID = share.id + self.user_cloud.shared_file_system.wait_for_status( + share, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + access_rule = self.user_cloud.share.create_access_rule( + self.SHARE_ID, + access_level="rw", + access_type="ip", + access_to="0.0.0.0/0", + ) + self.user_cloud.shared_file_system.wait_for_status( + access_rule, + status='active', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + attribute='state', + ) + self.assertIsNotNone(share) + self.assertIsNotNone(share.id) + self.ACCESS_ID = access_rule.id + share_lock = self.create_resource_lock( + resource_action='delete', + resource_type='share', + resource_id=self.SHARE_ID, + lock_reason='openstacksdk testing', + ) + access_lock = self.create_resource_lock( + resource_action='show', + resource_type='access_rule', + resource_id=self.ACCESS_ID, + lock_reason='openstacksdk testing', + ) + self.SHARE_LOCK_ID = share_lock.id + self.ACCESS_LOCK_ID = access_lock.id + + def test_get(self): + share_lock = self.user_cloud.shared_file_system.get_resource_lock( + self.SHARE_LOCK_ID + ) + access_lock = self.user_cloud.shared_file_system.get_resource_lock( + self.ACCESS_LOCK_ID + ) + assert isinstance(share_lock, _resource_locks.ResourceLock) + assert isinstance(access_lock, _resource_locks.ResourceLock) + self.assertEqual(self.SHARE_LOCK_ID, share_lock.id) + self.assertEqual(self.ACCESS_LOCK_ID, access_lock.id) + self.assertEqual('show', access_lock.resource_action) + + def test_list(self): + resource_locks = self.user_cloud.share.resource_locks() + self.assertGreater(len(list(resource_locks)), 0) + lock_attrs = ( + 'id', + 'lock_reason', + 'resource_type', + 'resource_action', + 'lock_context', + 'created_at', + 'updated_at', + ) + for lock in resource_locks: + for attribute in lock_attrs: + self.assertTrue(hasattr(lock, attribute)) diff --git a/openstack/tests/functional/shared_file_system/v2/test_share.py b/openstack/tests/functional/shared_file_system/v2/test_share.py new file mode 100644 index 0000000000..1437c5aab5 --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_share.py @@ -0,0 +1,218 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack.shared_file_system.v2 import share as _share +from openstack.tests.functional.shared_file_system.v2 import base + + +class ShareTest(base.BaseSharedFileSystemTest): + def setUp(self): + super().setUp() + + self.SHARE_NAME = self.getUniqueString() + my_share = self.create_share( + name=self.SHARE_NAME, + size=2, + share_type="dhss_false", + share_protocol='NFS', + description=None, + ) + self.SHARE_ID = my_share.id + self.SHARE_SIZE = my_share.size + my_share_snapshot = self.create_share_snapshot(share_id=self.SHARE_ID) + self.SHARE_SNAPSHOT_ID = my_share_snapshot.id + + def test_get(self): + sot = self.user_cloud.share.get_share(self.SHARE_ID) + assert isinstance(sot, _share.Share) + self.assertEqual(self.SHARE_ID, sot.id) + + def test_find(self): + sot = self.user_cloud.share.find_share(name_or_id=self.SHARE_NAME) + assert isinstance(sot, _share.Share) + self.assertEqual(self.SHARE_ID, sot.id) + + def test_list_share(self): + shares = self.user_cloud.share.shares(details=False) + self.assertGreater(len(list(shares)), 0) + for share in shares: + for attribute in ('id', 'name', 'created_at', 'updated_at'): + self.assertTrue(hasattr(share, attribute)) + + def test_update(self): + updated_share = self.user_cloud.share.update_share( + self.SHARE_ID, display_description='updated share' + ) + get_updated_share = self.user_cloud.share.get_share(updated_share.id) + self.assertEqual('updated share', get_updated_share.description) + + def test_revert_share_to_snapshot(self): + self.user_cloud.share.revert_share_to_snapshot( + self.SHARE_ID, self.SHARE_SNAPSHOT_ID + ) + get_reverted_share = self.user_cloud.share.get_share(self.SHARE_ID) + self.user_cloud.share.wait_for_status( + get_reverted_share, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertIsNotNone(get_reverted_share.id) + + def test_resize_share_larger(self): + larger_size = 3 + self.user_cloud.share.resize_share(self.SHARE_ID, larger_size) + + get_resized_share = self.user_cloud.share.get_share(self.SHARE_ID) + + self.user_cloud.share.wait_for_status( + get_resized_share, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertEqual(larger_size, get_resized_share.size) + + def test_resize_share_smaller(self): + # Resize to 3 GiB + smaller_size = 1 + + self.user_cloud.share.resize_share(self.SHARE_ID, smaller_size) + + get_resized_share = self.user_cloud.share.get_share(self.SHARE_ID) + + self.user_cloud.share.wait_for_status( + get_resized_share, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertEqual(smaller_size, get_resized_share.size) + + def test_resize_share_larger_no_extend(self): + larger_size = 3 + + self.user_cloud.share.resize_share( + self.SHARE_ID, larger_size, no_extend=True + ) + + get_resized_share = self.user_cloud.share.get_share(self.SHARE_ID) + + self.user_cloud.share.wait_for_status( + get_resized_share, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + + # Assert that no change was made. + self.assertEqual(self.SHARE_SIZE, get_resized_share.size) + + def test_resize_share_smaller_no_shrink(self): + smaller_size = 1 + + self.user_cloud.share.resize_share( + self.SHARE_ID, smaller_size, no_shrink=True + ) + + get_resized_share = self.user_cloud.share.get_share(self.SHARE_ID) + + self.user_cloud.share.wait_for_status( + get_resized_share, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + + # Assert that no change was made. + self.assertEqual(self.SHARE_SIZE, get_resized_share.size) + + def test_resize_share_with_force(self): + """Test that extend with force works as expected.""" + # Resize to 3 GiB + larger_size = 3 + self.operator_cloud.share.resize_share( + self.SHARE_ID, larger_size, force=True + ) + + get_resized_share = self.user_cloud.share.get_share(self.SHARE_ID) + + self.user_cloud.share.wait_for_status( + get_resized_share, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertEqual(larger_size, get_resized_share.size) + + +class ManageUnmanageShareTest(base.BaseSharedFileSystemTest): + def setUp(self): + super().setUp() + + self.NEW_SHARE = self.create_share( + share_proto="NFS", + name="accounting_p8787", + size=2, + ) + self.SHARE_ID = self.NEW_SHARE.id + + self.export_locations = self.operator_cloud.share.export_locations( + self.SHARE_ID + ) + export_paths = [export['path'] for export in self.export_locations] + self.export_path = export_paths[0] + + self.share_host = self.operator_cloud.share.get_share(self.SHARE_ID)[ + 'host' + ] + + def test_manage_and_unmanage_share(self): + self.operator_cloud.share.unmanage_share(self.SHARE_ID) + + self.operator_cloud.shared_file_system.wait_for_delete( + self.NEW_SHARE, interval=2, wait=self._wait_for_timeout + ) + + try: + self.operator_cloud.share.get_share(self.SHARE_ID) + except exceptions.NotFoundException: + pass + + managed_share = self.operator_cloud.share.manage_share( + self.NEW_SHARE.share_protocol, self.export_path, self.share_host + ) + + self.operator_cloud.share.wait_for_status( + managed_share, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + + self.assertEqual( + self.NEW_SHARE.share_protocol, managed_share.share_protocol + ) + + managed_host = self.operator_cloud.share.get_share(managed_share.id)[ + 'host' + ] + + self.assertEqual(self.share_host, managed_host) diff --git a/openstack/tests/functional/shared_file_system/v2/test_share_access_rule.py b/openstack/tests/functional/shared_file_system/v2/test_share_access_rule.py new file mode 100644 index 0000000000..b59f125e5d --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_share_access_rule.py @@ -0,0 +1,92 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.shared_file_system.v2 import base + + +class ShareAccessRuleTest(base.BaseSharedFileSystemTest): + def setUp(self): + super().setUp() + + self.SHARE_NAME = self.getUniqueString() + mys = self.create_share( + name=self.SHARE_NAME, + size=2, + share_type="dhss_false", + share_protocol='NFS', + description=None, + ) + self.user_cloud.shared_file_system.wait_for_status( + mys, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertIsNotNone(mys) + self.assertIsNotNone(mys.id) + self.SHARE_ID = mys.id + self.SHARE = mys + access_rule = self.user_cloud.share.create_access_rule( + self.SHARE_ID, + access_level="rw", + access_type="ip", + access_to="0.0.0.0/0", + ) + self.ACCESS_ID = access_rule.id + self.RESOURCE_KEY = access_rule.resource_key + + def tearDown(self): + self.user_cloud.share.delete_access_rule( + self.ACCESS_ID, self.SHARE_ID, ignore_missing=True + ) + super().tearDown() + + def test_get_access_rule(self): + sot = self.user_cloud.shared_file_system.get_access_rule( + self.ACCESS_ID + ) + self.assertEqual(self.ACCESS_ID, sot.id) + + def test_list_access_rules(self): + rules = self.user_cloud.shared_file_system.access_rules( + self.SHARE, details=True + ) + self.assertGreater(len(list(rules)), 0) + for rule in rules: + for attribute in ( + 'id', + 'created_at', + 'updated_at', + 'access_level', + 'access_type', + 'access_to', + 'share_id', + 'access_key', + 'metadata', + ): + self.assertTrue(hasattr(rule, attribute)) + + def test_create_delete_access_rule_with_locks(self): + access_rule = self.user_cloud.share.create_access_rule( + self.SHARE_ID, + access_level="rw", + access_type="ip", + access_to="203.0.113.10", + lock_deletion=True, + lock_visibility=True, + ) + + self.user_cloud.share.delete_access_rule( + access_rule['id'], self.SHARE_ID, unrestrict=True + ) + self.user_cloud.shared_file_system.wait_for_delete(access_rule) diff --git a/openstack/tests/functional/shared_file_system/v2/test_share_group.py b/openstack/tests/functional/shared_file_system/v2/test_share_group.py new file mode 100644 index 0000000000..d3161b8bab --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_share_group.py @@ -0,0 +1,68 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import share_group as _share_group +from openstack.tests.functional.shared_file_system.v2 import base + + +class ShareGroupTest(base.BaseSharedFileSystemTest): + def setUp(self): + super().setUp() + + self.SHARE_GROUP_NAME = self.getUniqueString() + share_grp = self.user_cloud.shared_file_system.create_share_group( + name=self.SHARE_GROUP_NAME + ) + self.user_cloud.shared_file_system.wait_for_status( + share_grp, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertIsNotNone(share_grp) + self.assertIsNotNone(share_grp.id) + self.SHARE_GROUP_ID = share_grp.id + + def test_get(self): + sot = self.user_cloud.shared_file_system.get_share_group( + self.SHARE_GROUP_ID + ) + assert isinstance(sot, _share_group.ShareGroup) + self.assertEqual(self.SHARE_GROUP_ID, sot.id) + + def test_find(self): + sot = self.user_cloud.shared_file_system.find_share_group( + self.SHARE_GROUP_NAME + ) + assert isinstance(sot, _share_group.ShareGroup) + self.assertEqual(self.SHARE_GROUP_NAME, sot.name) + self.assertEqual(self.SHARE_GROUP_ID, sot.id) + + def test_list_delete_share_group(self): + s_grps = self.user_cloud.shared_file_system.share_groups() + self.assertGreater(len(list(s_grps)), 0) + for s_grp in s_grps: + for attribute in ('id', 'name', 'created_at'): + self.assertTrue(hasattr(s_grp, attribute)) + + sot = self.operator_cloud.shared_file_system.delete_share_group( + s_grp + ) + self.assertIsNone(sot) + + def test_update(self): + u_gp = self.user_cloud.shared_file_system.update_share_group( + self.SHARE_GROUP_ID, description='updated share group' + ) + get_u_gp = self.user_cloud.shared_file_system.get_share_group(u_gp.id) + self.assertEqual('updated share group', get_u_gp.description) diff --git a/openstack/tests/functional/shared_file_system/v2/test_share_group_snapshot.py b/openstack/tests/functional/shared_file_system/v2/test_share_group_snapshot.py new file mode 100644 index 0000000000..1dd8f80ee5 --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_share_group_snapshot.py @@ -0,0 +1,105 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource +from openstack.shared_file_system.v2 import ( + share_group_snapshot as _share_group_snapshot, +) +from openstack.tests.functional.shared_file_system.v2 import base + + +class ShareGroupSnapshotTest(base.BaseSharedFileSystemTest): + min_microversion = '2.55' + + def setUp(self): + super().setUp() + + self.SHARE_GROUP_NAME = self.getUniqueString() + share_grp = self.user_cloud.shared_file_system.create_share_group( + name=self.SHARE_GROUP_NAME + ) + self.user_cloud.shared_file_system.wait_for_status( + share_grp, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertIsNotNone(share_grp) + self.assertIsNotNone(share_grp.id) + self.SHARE_GROUP_ID = share_grp.id + + self.SHARE_GROUP_SNAPSHOT_NAME = self.getUniqueString() + grp_ss = ( + self.user_cloud.shared_file_system.create_share_group_snapshot( + self.SHARE_GROUP_ID, name=self.SHARE_GROUP_SNAPSHOT_NAME + ) + ) + self.user_cloud.shared_file_system.wait_for_status( + grp_ss, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertIsNotNone(grp_ss) + self.assertIsNotNone(grp_ss.id) + self.SHARE_GROUP_SNAPSHOT_ID = grp_ss.id + + def tearDown(self): + sot = self.user_cloud.shared_file_system.get_share_group_snapshot( + self.SHARE_GROUP_SNAPSHOT_ID + ) + self.user_cloud.shared_file_system.delete_share_group_snapshot( + self.SHARE_GROUP_SNAPSHOT_ID, ignore_missing=False + ) + resource.wait_for_delete( + self.user_cloud.share, sot, wait=self._wait_for_timeout, interval=2 + ) + self.user_cloud.shared_file_system.delete_share_group( + self.SHARE_GROUP_ID, ignore_missing=False + ) + super().tearDown() + + def test_get(self): + sot = self.user_cloud.shared_file_system.get_share_group_snapshot( + self.SHARE_GROUP_SNAPSHOT_ID + ) + assert isinstance(sot, _share_group_snapshot.ShareGroupSnapshot) + self.assertEqual(self.SHARE_GROUP_SNAPSHOT_ID, sot.id) + + def test_list(self): + snapshots = self.user_cloud.shared_file_system.share_group_snapshots() + self.assertGreater(len(list(snapshots)), 0) + for snapshot in snapshots: + for attribute in ('id', 'name', 'created_at'): + self.assertTrue(hasattr(snapshot, attribute)) + + def test_update(self): + u_ss = self.user_cloud.shared_file_system.update_share_group_snapshot( + self.SHARE_GROUP_SNAPSHOT_ID, + description='updated share group snapshot', + ) + get_u_ss = self.user_cloud.shared_file_system.get_share_group_snapshot( + u_ss.id + ) + self.assertEqual('updated share group snapshot', get_u_ss.description) + + def test_reset(self): + res = self.operator_cloud.shared_file_system.reset_share_group_snapshot_status( # noqa: E501 + self.SHARE_GROUP_SNAPSHOT_ID, 'error' + ) + self.assertIsNone(res) + sot = self.user_cloud.shared_file_system.get_share_group_snapshot( + self.SHARE_GROUP_SNAPSHOT_ID + ) + self.assertEqual('error', sot.status) diff --git a/openstack/tests/functional/shared_file_system/v2/test_share_instance.py b/openstack/tests/functional/shared_file_system/v2/test_share_instance.py new file mode 100644 index 0000000000..082f2c0b90 --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_share_instance.py @@ -0,0 +1,83 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import resource +from openstack.shared_file_system.v2 import share_instance as _share_instance +from openstack.tests.functional.shared_file_system.v2 import base + + +class ShareInstanceTest(base.BaseSharedFileSystemTest): + min_microversion = '2.7' + + def setUp(self): + super().setUp() + + self.SHARE_NAME = self.getUniqueString() + my_share = self.create_share( + name=self.SHARE_NAME, + size=2, + share_type="dhss_false", + share_protocol='NFS', + description=None, + ) + self.SHARE_ID = my_share.id + instances_list = self.operator_cloud.share.share_instances() + self.SHARE_INSTANCE_ID = None + for i in instances_list: + if i.share_id == self.SHARE_ID: + self.SHARE_INSTANCE_ID = i.id + + def test_get(self): + sot = self.operator_cloud.share.get_share_instance( + self.SHARE_INSTANCE_ID + ) + assert isinstance(sot, _share_instance.ShareInstance) + self.assertEqual(self.SHARE_INSTANCE_ID, sot.id) + + def test_list_share_instances(self): + share_instances = self.operator_cloud.share.share_instances() + self.assertGreater(len(list(share_instances)), 0) + for share_instance in share_instances: + for attribute in ( + 'id', + 'name', + 'created_at', + 'access_rules_status', + 'availability_zone', + ): + self.assertTrue(hasattr(share_instance, attribute)) + + def test_reset(self): + res = self.operator_cloud.share.reset_share_instance_status( + self.SHARE_INSTANCE_ID, 'error' + ) + self.assertIsNone(res) + sot = self.operator_cloud.share.get_share_instance( + self.SHARE_INSTANCE_ID + ) + self.assertEqual('error', sot.status) + + def test_delete(self): + sot = self.operator_cloud.share.get_share_instance( + self.SHARE_INSTANCE_ID + ) + fdel = self.operator_cloud.share.delete_share_instance( + self.SHARE_INSTANCE_ID + ) + resource.wait_for_delete( + self.operator_cloud.share, + sot, + wait=self._wait_for_timeout, + interval=2, + ) + self.assertIsNone(fdel) diff --git a/openstack/tests/functional/shared_file_system/v2/test_share_metadata.py b/openstack/tests/functional/shared_file_system/v2/test_share_metadata.py new file mode 100644 index 0000000000..46c6089e5f --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_share_metadata.py @@ -0,0 +1,121 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.shared_file_system.v2 import share as _share +from openstack.tests.functional.shared_file_system.v2 import base + + +class ShareMetadataTest(base.BaseSharedFileSystemTest): + def setUp(self): + super().setUp() + + self.SHARE_NAME = self.getUniqueString() + my_share = self.create_share( + name=self.SHARE_NAME, + size=2, + share_type="dhss_false", + share_protocol='NFS', + description=None, + ) + self.SHARE_ID = my_share.id + self.assertIsNotNone(my_share) + self.assertIsNotNone(my_share.id) + + def test_create(self): + meta = {"foo": "bar"} + created_share = ( + self.user_cloud.shared_file_system.create_share_metadata( + self.SHARE_ID, **meta + ) + ) + assert isinstance(created_share, _share.Share) + self.assertEqual(created_share['metadata'], meta) + + def test_get_item(self): + meta = {"foo": "bar"} + created_share = ( + self.user_cloud.shared_file_system.create_share_metadata( + self.SHARE_ID, **meta + ) + ) + returned_share = ( + self.user_cloud.shared_file_system.get_share_metadata_item( + self.SHARE_ID, "foo" + ) + ) + self.assertEqual( + created_share['metadata']['foo'], returned_share['metadata']['foo'] + ) + + def test_get(self): + meta = {"foo": "bar"} + created_share = ( + self.user_cloud.shared_file_system.create_share_metadata( + self.SHARE_ID, **meta + ) + ) + returned_share = self.user_cloud.shared_file_system.get_share_metadata( + self.SHARE_ID + ) + self.assertEqual( + created_share['metadata']['foo'], returned_share['metadata']['foo'] + ) + + def test_update(self): + meta = {"foo": "bar"} + created_share = ( + self.user_cloud.shared_file_system.create_share_metadata( + self.SHARE_ID, **meta + ) + ) + + new_meta = {"newFoo": "newBar"} + full_meta = {"foo": "bar", "newFoo": "newBar"} + empty_meta: dict[str, str] = {} + + updated_share = ( + self.user_cloud.shared_file_system.update_share_metadata( + created_share, new_meta + ) + ) + self.assertEqual(updated_share['metadata'], new_meta) + + full_metadata = self.user_cloud.shared_file_system.get_share_metadata( + created_share + )['metadata'] + self.assertEqual(full_metadata, full_meta) + + share_with_deleted_metadata = ( + self.user_cloud.shared_file_system.update_share_metadata( + updated_share, empty_meta + ) + ) + self.assertEqual(share_with_deleted_metadata['metadata'], empty_meta) + + def test_delete(self): + meta = {"foo": "bar", "newFoo": "newBar"} + created_share = ( + self.user_cloud.shared_file_system.create_share_metadata( + self.SHARE_ID, **meta + ) + ) + + self.user_cloud.shared_file_system.delete_share_metadata( + created_share, ["foo", "invalidKey"] + ) + + deleted_share = self.user_cloud.shared_file_system.get_share_metadata( + self.SHARE_ID + ) + + self.assertEqual(deleted_share['metadata'], {"newFoo": "newBar"}) diff --git a/openstack/tests/functional/shared_file_system/v2/test_share_network.py b/openstack/tests/functional/shared_file_system/v2/test_share_network.py new file mode 100644 index 0000000000..ed82754d19 --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_share_network.py @@ -0,0 +1,91 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import share_network as _share_network +from openstack.tests.functional.shared_file_system.v2 import base + + +class ShareNetworkTest(base.BaseSharedFileSystemTest): + def setUp(self): + super().setUp() + + self.NETWORK_NAME = self.getUniqueString() + net = self.user_cloud.network.create_network(name=self.NETWORK_NAME) + self.assertIsNotNone(net) + self.assertIsNotNone(net.id) + self.NETWORK_ID = net.id + + self.SUBNET_NAME = self.getUniqueString() + subnet = self.user_cloud.network.create_subnet( + name=self.SUBNET_NAME, + network_id=self.NETWORK_ID, + ip_version=4, + cidr='10.0.0.0/24', + ) + self.SUBNET_ID = subnet.id + + self.SHARE_NETWORK_NAME = self.getUniqueString() + snt = self.user_cloud.shared_file_system.create_share_network( + name=self.SHARE_NETWORK_NAME, + neutron_net_id=self.NETWORK_ID, + neutron_subnet_id=self.SUBNET_ID, + ) + + self.assertIsNotNone(snt) + self.assertIsNotNone(snt.id) + self.SHARE_NETWORK_ID = snt.id + + def tearDown(self): + sot = self.user_cloud.shared_file_system.delete_share_network( + self.SHARE_NETWORK_ID, ignore_missing=True + ) + self.assertIsNone(sot) + self.user_cloud.network.delete_network(self.NETWORK_ID) + super().tearDown() + + def test_get(self): + sot = self.user_cloud.shared_file_system.get_share_network( + self.SHARE_NETWORK_ID + ) + assert isinstance(sot, _share_network.ShareNetwork) + self.assertEqual(self.SHARE_NETWORK_ID, sot.id) + self.assertIsNotNone(sot.share_network_subnets) + self.assertEqual( + self.NETWORK_ID, + sot.share_network_subnets[0]['neutron_net_id'], + ) + self.assertEqual( + self.SUBNET_ID, + sot.share_network_subnets[0]['neutron_subnet_id'], + ) + + def test_list_share_network(self): + share_nets = self.user_cloud.shared_file_system.share_networks( + details=False + ) + self.assertGreater(len(list(share_nets)), 0) + for share_net in share_nets: + for attribute in ('id', 'name', 'created_at', 'updated_at'): + self.assertTrue(hasattr(share_net, attribute)) + + def test_delete_share_network(self): + sot = self.user_cloud.shared_file_system.delete_share_network( + self.SHARE_NETWORK_ID + ) + self.assertIsNone(sot) + + def test_update(self): + unt = self.user_cloud.shared_file_system.update_share_network( + self.SHARE_NETWORK_ID, description='updated share network' + ) + get_unt = self.user_cloud.shared_file_system.get_share_network(unt.id) + self.assertEqual('updated share network', get_unt.description) diff --git a/openstack/tests/functional/shared_file_system/v2/test_share_network_subnet.py b/openstack/tests/functional/shared_file_system/v2/test_share_network_subnet.py new file mode 100644 index 0000000000..b59769a907 --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_share_network_subnet.py @@ -0,0 +1,86 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import ( + share_network_subnet as _share_network_subnet, +) +from openstack.tests.functional.shared_file_system.v2 import base + + +class ShareNetworkSubnetTest(base.BaseSharedFileSystemTest): + def setUp(self): + super().setUp() + + zones = self.user_cloud.shared_file_system.availability_zones() + first_zone = next(zones) + + self.SHARE_NETWORK_NAME = self.getUniqueString() + snt = self.user_cloud.shared_file_system.create_share_network( + name=self.SHARE_NETWORK_NAME + ) + self.assertIsNotNone(snt) + self.assertIsNotNone(snt.id) + self.SHARE_NETWORK_ID = snt.id + snsb = self.user_cloud.shared_file_system.create_share_network_subnet( + self.SHARE_NETWORK_ID, availability_zone=first_zone.name + ) + self.assertIsNotNone(snsb) + self.assertIsNotNone(snsb.id) + self.SHARE_NETWORK_SUBNET_ID = snsb.id + + def tearDown(self): + subnet = self.user_cloud.shared_file_system.get_share_network_subnet( + self.SHARE_NETWORK_ID, self.SHARE_NETWORK_SUBNET_ID + ) + fdel = self.user_cloud.shared_file_system.delete_share_network_subnet( + self.SHARE_NETWORK_ID, + self.SHARE_NETWORK_SUBNET_ID, + ignore_missing=True, + ) + self.assertIsNone(fdel) + self.user_cloud.shared_file_system.wait_for_delete(subnet) + sot = self.user_cloud.shared_file_system.delete_share_network( + self.SHARE_NETWORK_ID, ignore_missing=True + ) + self.assertIsNone(sot) + super().tearDown() + + def test_get(self): + sub = self.user_cloud.shared_file_system.get_share_network_subnet( + self.SHARE_NETWORK_ID, self.SHARE_NETWORK_SUBNET_ID + ) + assert isinstance(sub, _share_network_subnet.ShareNetworkSubnet) + + def test_list(self): + subs = self.user_cloud.shared_file_system.share_network_subnets( + self.SHARE_NETWORK_ID + ) + self.assertGreater(len(list(subs)), 0) + for sub in subs: + for attribute in ( + 'id', + 'name', + 'created_at', + 'updated_at', + 'share_network_id', + 'availability_zone', + 'cidr', + 'gateway', + 'ip_version', + 'mtu', + 'network_type', + 'neutron_net_id', + 'neutron_subnet_id', + 'segmentation_id', + 'share_network_name', + ): + self.assertTrue(hasattr(sub, attribute)) diff --git a/openstack/tests/functional/shared_file_system/v2/test_share_snapshot.py b/openstack/tests/functional/shared_file_system/v2/test_share_snapshot.py new file mode 100644 index 0000000000..156d3ffb8a --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_share_snapshot.py @@ -0,0 +1,104 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.shared_file_system.v2 import base + + +class ShareSnapshotTest(base.BaseSharedFileSystemTest): + def setUp(self): + super().setUp() + + self.SHARE_NAME = self.getUniqueString() + self.SNAPSHOT_NAME = self.getUniqueString() + my_share = self.operator_cloud.shared_file_system.create_share( + name=self.SHARE_NAME, + size=2, + share_type="dhss_false", + share_protocol='NFS', + description=None, + ) + self.operator_cloud.shared_file_system.wait_for_status( + my_share, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertIsNotNone(my_share) + self.assertIsNotNone(my_share.id) + self.SHARE_ID = my_share.id + + msp = self.operator_cloud.shared_file_system.create_share_snapshot( + share_id=self.SHARE_ID, name=self.SNAPSHOT_NAME, force=True + ) + self.operator_cloud.shared_file_system.wait_for_status( + msp, + status='available', + failures=['error'], + interval=5, + wait=self._wait_for_timeout, + ) + self.assertIsNotNone(msp.id) + self.SNAPSHOT_ID = msp.id + + def tearDown(self): + snpt = self.operator_cloud.shared_file_system.get_share_snapshot( + self.SNAPSHOT_ID + ) + sot = self.operator_cloud.shared_file_system.delete_share_snapshot( + snpt, ignore_missing=False + ) + self.operator_cloud.shared_file_system.wait_for_delete( + snpt, interval=2, wait=self._wait_for_timeout + ) + self.assertIsNone(sot) + sot = self.operator_cloud.shared_file_system.delete_share( + self.SHARE_ID, ignore_missing=False + ) + self.assertIsNone(sot) + super().tearDown() + + def test_get(self): + sot = self.operator_cloud.shared_file_system.get_share_snapshot( + self.SNAPSHOT_ID + ) + self.assertEqual(self.SNAPSHOT_NAME, sot.name) + + def test_list(self): + snaps = self.operator_cloud.shared_file_system.share_snapshots( + details=True + ) + self.assertGreater(len(list(snaps)), 0) + for snap in snaps: + for attribute in ( + 'id', + 'name', + 'created_at', + 'updated_at', + 'description', + 'share_id', + 'share_proto', + 'share_size', + 'size', + 'status', + 'user_id', + ): + self.assertTrue(hasattr(snap, attribute)) + + def test_update(self): + u_snap = self.operator_cloud.shared_file_system.update_share_snapshot( + self.SNAPSHOT_ID, display_description='updated share snapshot' + ) + get_u_snap = self.operator_cloud.shared_file_system.get_share_snapshot( + u_snap.id + ) + self.assertEqual('updated share snapshot', get_u_snap.description) diff --git a/openstack/tests/functional/shared_file_system/v2/test_share_snapshot_instance.py b/openstack/tests/functional/shared_file_system/v2/test_share_snapshot_instance.py new file mode 100644 index 0000000000..6c2abda6fb --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_share_snapshot_instance.py @@ -0,0 +1,39 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.shared_file_system.v2 import base + + +class ShareSnapshotInstanceTest(base.BaseSharedFileSystemTest): + def setUp(self): + super().setUp() + + self.SHARE_NAME = self.getUniqueString() + my_share = self.create_share( + name=self.SHARE_NAME, + size=2, + share_type="dhss_false", + share_protocol='NFS', + description=None, + ) + self.SHARE_ID = my_share.id + self.create_share_snapshot(share_id=self.SHARE_ID) + + def test_share_snapshot_instances(self): + sots = ( + self.operator_cloud.shared_file_system.share_snapshot_instances() + ) + self.assertGreater(len(list(sots)), 0) + for sot in sots: + for attribute in ('id', 'name', 'created_at', 'updated_at'): + self.assertTrue(hasattr(sot, attribute)) + self.assertIsInstance(getattr(sot, attribute), 'str') diff --git a/openstack/tests/functional/shared_file_system/v2/test_storage_pool.py b/openstack/tests/functional/shared_file_system/v2/test_storage_pool.py new file mode 100644 index 0000000000..37e8c9f82d --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_storage_pool.py @@ -0,0 +1,28 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.shared_file_system.v2 import base + + +class StoragePoolTest(base.BaseSharedFileSystemTest): + def test_storage_pools(self): + pools = self.operator_cloud.shared_file_system.storage_pools() + self.assertGreater(len(list(pools)), 0) + for pool in pools: + for attribute in ( + 'pool', + 'name', + 'host', + 'backend', + 'capabilities', + ): + self.assertTrue(hasattr(pool, attribute)) diff --git a/openstack/tests/functional/shared_file_system/v2/test_user_message.py b/openstack/tests/functional/shared_file_system/v2/test_user_message.py new file mode 100644 index 0000000000..1ee76da6db --- /dev/null +++ b/openstack/tests/functional/shared_file_system/v2/test_user_message.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.functional.shared_file_system.v2 import base + + +class UserMessageTest(base.BaseSharedFileSystemTest): + def test_user_messages(self): + # TODO(kafilat): We must intentionally cause an asynchronous failure to + # ensure that at least one user message exists; + u_messages = self.user_cloud.shared_file_system.user_messages() + # self.assertGreater(len(list(u_messages)), 0) + for u_message in u_messages: + for attribute in ( + 'id', + 'created_at', + 'action_id', + 'detail_id', + 'expires_at', + 'message_level', + 'project_id', + 'request_id', + 'resource_id', + 'resource_type', + 'user_message', + ): + self.assertTrue(hasattr(u_message, attribute)) + self.assertIsInstance(getattr(u_message, attribute), str) + + self.operator_cloud.shared_file_system.delete_user_message( + u_message + ) diff --git a/openstack/tests/functional/telemetry/alarm/v2/test_alarm.py b/openstack/tests/functional/telemetry/alarm/v2/test_alarm.py deleted file mode 100644 index 3c6c0dd903..0000000000 --- a/openstack/tests/functional/telemetry/alarm/v2/test_alarm.py +++ /dev/null @@ -1,58 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest -import uuid - -from openstack.telemetry.alarm.v2 import alarm -from openstack.tests.functional import base - - -@unittest.skip("bug/1524468") -@unittest.skipUnless(base.service_exists(service_type="alarming"), - "Alarming service does not exist") -@unittest.skipUnless(base.service_exists(service_type="metering"), - "Metering service does not exist") -class TestAlarm(base.BaseFunctionalTest): - - NAME = uuid.uuid4().hex - ID = None - - @classmethod - def setUpClass(cls): - super(TestAlarm, cls).setUpClass() - meter = next(cls.conn.telemetry.meters()) - sot = cls.conn.alarm.create_alarm( - name=cls.NAME, - type='threshold', - threshold_rule={ - 'meter_name': meter.name, - 'threshold': 1.1, - }, - ) - assert isinstance(sot, alarm.Alarm) - cls.assertIs(cls.NAME, sot.name) - cls.ID = sot.id - - @classmethod - def tearDownClass(cls): - sot = cls.conn.alarm.delete_alarm(cls.ID, ignore_missing=False) - cls.assertIs(None, sot) - - def test_get(self): - sot = self.conn.alarm.get_alarm(self.ID) - self.assertEqual(self.NAME, sot.name) - self.assertEqual(self.ID, sot.id) - - def test_list(self): - names = [o.name for o in self.conn.alarm.alarms()] - self.assertIn(self.NAME, names) diff --git a/openstack/tests/functional/telemetry/alarm/v2/test_alarm_change.py b/openstack/tests/functional/telemetry/alarm/v2/test_alarm_change.py deleted file mode 100644 index f1b94d4191..0000000000 --- a/openstack/tests/functional/telemetry/alarm/v2/test_alarm_change.py +++ /dev/null @@ -1,50 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest -import uuid - -from openstack.tests.functional import base - - -@unittest.skip("bug/1524468") -@unittest.skipUnless(base.service_exists(service_type="metering"), - "Metering service does not exist") -@unittest.skipUnless(base.service_exists(service_type="alarming"), - "Alarming service does not exist") -class TestAlarmChange(base.BaseFunctionalTest): - - NAME = uuid.uuid4().hex - alarm = None - - @classmethod - def setUpClass(cls): - super(TestAlarmChange, cls).setUpClass() - meter = next(cls.conn.telemetry.meters()) - alarm = cls.conn.alarm.create_alarm( - name=cls.NAME, - type='threshold', - threshold_rule={ - 'meter_name': meter.name, - 'threshold': 1.1, - }, - ) - cls.alarm = alarm - - @classmethod - def tearDownClass(cls): - cls.conn.alarm.delete_alarm(cls.alarm, ignore_missing=False) - - def test_list(self): - change = next(self.conn.alarm.alarm_changes(self.alarm)) - self.assertEqual(self.alarm.id, change.alarm_id) - self.assertEqual('creation', change.type) diff --git a/openstack/tests/functional/telemetry/v2/test_capability.py b/openstack/tests/functional/telemetry/v2/test_capability.py deleted file mode 100644 index 682ded41fa..0000000000 --- a/openstack/tests/functional/telemetry/v2/test_capability.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from openstack.tests.functional import base - - -@unittest.skipUnless(base.service_exists(service_type="metering"), - "Metering service does not exist") -class TestCapability(base.BaseFunctionalTest): - - def test_list(self): - ids = [o.id for o in self.conn.telemetry.capabilities()] - self.assertIn('resources:query:simple', ids) - self.assertIn('events:query:simple', ids) - self.assertIn('meters:query:simple', ids) - self.assertIn('statistics:query:simple', ids) - self.assertIn('samples:query:simple', ids) diff --git a/openstack/tests/functional/telemetry/v2/test_meter.py b/openstack/tests/functional/telemetry/v2/test_meter.py deleted file mode 100644 index c06495ef65..0000000000 --- a/openstack/tests/functional/telemetry/v2/test_meter.py +++ /dev/null @@ -1,32 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest -import uuid - -from openstack.tests.functional import base - - -@unittest.skipUnless(base.service_exists(service_type="metering"), - "Metering service does not exist") -class TestMeter(base.BaseFunctionalTest): - - def test_list(self): - # TODO(thowe): Remove this in favor of create_meter call. - # Since we do not have a create meter method at the moment - # make sure there is some data in there - name = uuid.uuid4().hex - tainer = self.conn.object_store.create_container(name=name) - self.conn.object_store.delete_container(tainer) - - names = set([o.name for o in self.conn.telemetry.meters()]) - self.assertIn('storage.objects.incoming.bytes', names) diff --git a/openstack/tests/functional/telemetry/v2/test_resource.py b/openstack/tests/functional/telemetry/v2/test_resource.py deleted file mode 100644 index 0ec768a530..0000000000 --- a/openstack/tests/functional/telemetry/v2/test_resource.py +++ /dev/null @@ -1,24 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from openstack.tests.functional import base - - -@unittest.skipUnless(base.service_exists(service_type="metering"), - "Metering service does not exist") -class TestResource(base.BaseFunctionalTest): - - def test_list(self): - ids = [o.resource_id for o in self.conn.telemetry.resources()] - self.assertNotEqual(0, len(ids)) diff --git a/openstack/tests/functional/telemetry/v2/test_sample.py b/openstack/tests/functional/telemetry/v2/test_sample.py deleted file mode 100644 index 3aad42d287..0000000000 --- a/openstack/tests/functional/telemetry/v2/test_sample.py +++ /dev/null @@ -1,44 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from openstack.telemetry.v2 import sample -from openstack.tests.functional import base - - -@unittest.skipUnless(base.service_exists(service_type="metering"), - "Metering service does not exist") -class TestSample(base.BaseFunctionalTest): - - meter = None - sample = None - - @classmethod - def setUpClass(cls): - super(TestSample, cls).setUpClass() - cls.meter = next(cls.conn.telemetry.meters()) - resource = next(cls.conn.telemetry.resources()) - sot = cls.conn.telemetry.create_sample( - counter_name=cls.meter.name, - meter=cls.meter.name, - counter_type='gauge', - counter_unit='instance', - counter_volume=1.0, - resource_id=resource.id, - ) - assert isinstance(sot, sample.Sample) - cls.sample = sot - - def test_list(self): - ids = [o.id for o in self.conn.telemetry.samples(self.meter)] - self.assertIn(self.sample.id, ids) diff --git a/openstack/tests/functional/telemetry/v2/test_statistics.py b/openstack/tests/functional/telemetry/v2/test_statistics.py deleted file mode 100644 index d0e6c6ab19..0000000000 --- a/openstack/tests/functional/telemetry/v2/test_statistics.py +++ /dev/null @@ -1,31 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import unittest - -from openstack.tests.functional import base - - -@unittest.skipUnless(base.service_exists(service_type="metering"), - "Metering service does not exist") -class TestStatistics(base.BaseFunctionalTest): - - def test_list(self): - found_something = False - for met in self.conn.telemetry.meters(): - try: - stat = next(self.conn.telemetry.statistics(met)) - self.assertIn('period_end', stat) - found_something = True - except Exception: - pass - self.assertTrue(found_something) diff --git a/openstack/tests/unit/README.rst b/openstack/tests/unit/README.rst new file mode 100644 index 0000000000..a9bbf05c04 --- /dev/null +++ b/openstack/tests/unit/README.rst @@ -0,0 +1,7 @@ +Unit Tests for openstacksdk +=========================== + +For information on how to run and extend these tests, refer to the `contributor +guide`__. + +.. __: https://docs.openstack.org/openstacksdk/latest/contributor/testing.html diff --git a/openstack/tests/unit/accelerator/__init__.py b/openstack/tests/unit/accelerator/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/accelerator/test_version.py b/openstack/tests/unit/accelerator/test_version.py new file mode 100644 index 0000000000..93f3f03597 --- /dev/null +++ b/openstack/tests/unit/accelerator/test_version.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.accelerator import version +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'links': '2', + 'status': '3', +} + + +class TestVersion(base.TestCase): + def test_basic(self): + sot = version.Version() + self.assertEqual('version', sot.resource_key) + self.assertEqual('versions', sot.resources_key) + self.assertEqual('/', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = version.Version(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['status'], sot.status) diff --git a/openstack/tests/unit/accelerator/v2/__init__.py b/openstack/tests/unit/accelerator/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/accelerator/v2/test_accelerator_request.py b/openstack/tests/unit/accelerator/v2/test_accelerator_request.py new file mode 100644 index 0000000000..87ed69fd8e --- /dev/null +++ b/openstack/tests/unit/accelerator/v2/test_accelerator_request.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.accelerator.v2 import accelerator_request as arq +from openstack.tests.unit import base + + +FAKE_ID = '0725b527-e51a-41df-ad22-adad5f4546ad' +FAKE_RP_UUID = 'f4b7fe6c-8ab4-4914-a113-547af022935b' +FAKE_INSTANCE_UUID = '1ce4a597-9836-4e02-bea1-a3a6cbe7b9f9' +FAKE_ATTACH_INFO_STR = ( + '{"bus": "5e", "device": "00", "domain": "0000", "function": "1"}' +) + +FAKE = { + 'uuid': FAKE_ID, + 'device_profile_name': 'fake-devprof', + 'device_profile_group_id': 0, + 'device_rp_uuid': FAKE_RP_UUID, + 'instance_uuid': FAKE_INSTANCE_UUID, + 'attach_handle_type': 'PCI', + 'attach_handle_info': FAKE_ATTACH_INFO_STR, +} + + +class TestAcceleratorRequest(base.TestCase): + def test_basic(self): + sot = arq.AcceleratorRequest() + self.assertEqual('arq', sot.resource_key) + self.assertEqual('arqs', sot.resources_key) + self.assertEqual('/accelerator_requests', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_patch) + + def test_make_it(self): + sot = arq.AcceleratorRequest(**FAKE) + self.assertEqual(FAKE_ID, sot.uuid) + self.assertEqual(FAKE['device_profile_name'], sot.device_profile_name) + self.assertEqual( + FAKE['device_profile_group_id'], sot.device_profile_group_id + ) + self.assertEqual(FAKE_RP_UUID, sot.device_rp_uuid) + self.assertEqual(FAKE_INSTANCE_UUID, sot.instance_uuid) + self.assertEqual(FAKE['attach_handle_type'], sot.attach_handle_type) + self.assertEqual(FAKE_ATTACH_INFO_STR, sot.attach_handle_info) diff --git a/openstack/tests/unit/accelerator/v2/test_attribute.py b/openstack/tests/unit/accelerator/v2/test_attribute.py new file mode 100644 index 0000000000..381c57678b --- /dev/null +++ b/openstack/tests/unit/accelerator/v2/test_attribute.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.accelerator.v2 import attribute +from openstack.tests.unit import base + + +FAKE = { + "id": 1, + "uuid": "a95e10ae-b3e3-4eab-a513-1afae6f17c51", + "deployable_id": 1, + "key": "traits1", + 'value': 'CUSTOM_FAKE_DEVICE', +} + + +class TestAttribute(base.TestCase): + def test_basic(self): + sot = attribute.Attribute() + self.assertEqual('attribute', sot.resource_key) + self.assertEqual('attributes', sot.resources_key) + self.assertEqual('/attributes', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_patch) + + def test_make_it(self): + sot = attribute.Attribute(**FAKE) + self.assertEqual(FAKE['id'], sot.id) + self.assertEqual(FAKE['uuid'], sot.uuid) + self.assertEqual(FAKE['deployable_id'], sot.deployable_id) + self.assertEqual(FAKE['key'], sot.key) + self.assertEqual(FAKE['value'], sot.value) diff --git a/openstack/tests/unit/accelerator/v2/test_deployable.py b/openstack/tests/unit/accelerator/v2/test_deployable.py new file mode 100644 index 0000000000..87f1f885e9 --- /dev/null +++ b/openstack/tests/unit/accelerator/v2/test_deployable.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.accelerator.v2 import deployable +from openstack.tests.unit import base + + +EXAMPLE = { + 'uuid': uuid.uuid4(), + 'created_at': '2019-08-09T12:14:57.233772', + 'updated_at': '2019-08-09T12:15:57.233772', + 'parent_id': '1', + 'root_id': '1', + 'name': 'test_name', + 'num_accelerators': '1', + 'device_id': '1', +} + + +class TestDeployable(base.TestCase): + def test_basic(self): + sot = deployable.Deployable() + self.assertEqual('deployable', sot.resource_key) + self.assertEqual('deployables', sot.resources_key) + self.assertEqual('/deployables', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = deployable.Deployable(**EXAMPLE) + self.assertEqual(EXAMPLE['uuid'], sot.id) + self.assertEqual(EXAMPLE['parent_id'], sot.parent_id) + self.assertEqual(EXAMPLE['root_id'], sot.root_id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['num_accelerators'], sot.num_accelerators) + self.assertEqual(EXAMPLE['device_id'], sot.device_id) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/accelerator/v2/test_device.py b/openstack/tests/unit/accelerator/v2/test_device.py new file mode 100644 index 0000000000..c354d52225 --- /dev/null +++ b/openstack/tests/unit/accelerator/v2/test_device.py @@ -0,0 +1,54 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.accelerator.v2 import device +from openstack.tests.unit import base + + +EXAMPLE = { + 'id': '1', + 'uuid': uuid.uuid4(), + 'created_at': '2019-08-09T12:14:57.233772', + 'updated_at': '2019-08-09T12:15:57.233772', + 'type': 'test_type', + 'vendor': '0x8086', + 'model': 'test_model', + 'std_board_info': '{"product_id": "0x09c4"}', + 'vendor_board_info': 'test_vb_info', +} + + +class TestDevice(base.TestCase): + def test_basic(self): + sot = device.Device() + self.assertEqual('device', sot.resource_key) + self.assertEqual('devices', sot.resources_key) + self.assertEqual('/devices', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = device.Device(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['uuid'], sot.uuid) + self.assertEqual(EXAMPLE['type'], sot.type) + self.assertEqual(EXAMPLE['vendor'], sot.vendor) + self.assertEqual(EXAMPLE['model'], sot.model) + self.assertEqual(EXAMPLE['std_board_info'], sot.std_board_info) + self.assertEqual(EXAMPLE['vendor_board_info'], sot.vendor_board_info) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/accelerator/v2/test_device_profile.py b/openstack/tests/unit/accelerator/v2/test_device_profile.py new file mode 100644 index 0000000000..ce50241d75 --- /dev/null +++ b/openstack/tests/unit/accelerator/v2/test_device_profile.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.accelerator.v2 import device_profile +from openstack.tests.unit import base + + +FAKE = { + "id": 1, + "uuid": "a95e10ae-b3e3-4eab-a513-1afae6f17c51", + "name": 'afaas_example_1', + "groups": [ + { + "resources:ACCELERATOR_FPGA": "1", + "trait:CUSTOM_FPGA_INTEL_PAC_ARRIA10": "required", + "trait:CUSTOM_FUNCTION_ID_3AFB": "required", + }, + { + "resources:CUSTOM_ACCELERATOR_FOO": "2", + "resources:CUSTOM_MEMORY": "200", + "trait:CUSTOM_TRAIT_ALWAYS": "required", + }, + ], + 'description': 'description_test', +} + + +class TestDeviceProfile(base.TestCase): + def test_basic(self): + sot = device_profile.DeviceProfile() + self.assertEqual('device_profile', sot.resource_key) + self.assertEqual('device_profiles', sot.resources_key) + self.assertEqual('/device_profiles', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_patch) + + def test_make_it(self): + sot = device_profile.DeviceProfile(**FAKE) + self.assertEqual(FAKE['id'], sot.id) + self.assertEqual(FAKE['uuid'], sot.uuid) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual(FAKE['groups'], sot.groups) + self.assertEqual(FAKE['description'], sot.description) diff --git a/openstack/tests/unit/accelerator/v2/test_proxy.py b/openstack/tests/unit/accelerator/v2/test_proxy.py new file mode 100644 index 0000000000..c5823421b6 --- /dev/null +++ b/openstack/tests/unit/accelerator/v2/test_proxy.py @@ -0,0 +1,121 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.accelerator.v2 import _proxy +from openstack.accelerator.v2 import accelerator_request +from openstack.accelerator.v2 import attribute +from openstack.accelerator.v2 import deployable +from openstack.accelerator.v2 import device_profile +from openstack.tests.unit import test_proxy_base as test_proxy_base + + +class TestAcceleratorProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + +class TestAcceleratorDeployable(TestAcceleratorProxy): + def test_list_deployables(self): + self.verify_list(self.proxy.deployables, deployable.Deployable) + + +class TestAcceleratorDevice(TestAcceleratorProxy): + def test_list_device_profile(self): + self.verify_list( + self.proxy.device_profiles, device_profile.DeviceProfile + ) + + def test_create_device_profile(self): + self.verify_create( + self.proxy.create_device_profile, device_profile.DeviceProfile + ) + + def test_delete_device_profile(self): + self.verify_delete( + self.proxy.delete_device_profile, + device_profile.DeviceProfile, + False, + ) + + def test_delete_device_profile_ignore(self): + self.verify_delete( + self.proxy.delete_device_profile, + device_profile.DeviceProfile, + True, + ) + + def test_get_device_profile(self): + self.verify_get( + self.proxy.get_device_profile, device_profile.DeviceProfile + ) + + +class TestAcceleratorRequest(TestAcceleratorProxy): + def test_list_accelerator_request(self): + self.verify_list( + self.proxy.accelerator_requests, + accelerator_request.AcceleratorRequest, + ) + + def test_create_accelerator_request(self): + self.verify_create( + self.proxy.create_accelerator_request, + accelerator_request.AcceleratorRequest, + ) + + def test_delete_accelerator_request(self): + self.verify_delete( + self.proxy.delete_accelerator_request, + accelerator_request.AcceleratorRequest, + False, + ) + + def test_delete_accelerator_request_ignore(self): + self.verify_delete( + self.proxy.delete_accelerator_request, + accelerator_request.AcceleratorRequest, + True, + ) + + def test_get_accelerator_request(self): + self.verify_get( + self.proxy.get_accelerator_request, + accelerator_request.AcceleratorRequest, + ) + + +class TestAttribute(TestAcceleratorProxy): + def test_list_attribute(self): + self.verify_list( + self.proxy.attributes, + attribute.Attribute, + ) + + def test_create_attribute(self): + self.verify_create( + self.proxy.create_attribute, + attribute.Attribute, + ) + + def test_delete_attribute(self): + self.verify_delete( + self.proxy.delete_attribute, + attribute.Attribute, + False, + ) + + def test_get_attribute(self): + self.verify_get( + self.proxy.get_attribute, + attribute.Attribute, + ) diff --git a/openstack/tests/unit/bare_metal/test_bare_metal_service.py b/openstack/tests/unit/bare_metal/test_bare_metal_service.py deleted file mode 100644 index 0a7f005811..0000000000 --- a/openstack/tests/unit/bare_metal/test_bare_metal_service.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.bare_metal import bare_metal_service - - -class TestBareMetalService(testtools.TestCase): - - def test_service(self): - sot = bare_metal_service.BareMetalService() - self.assertEqual('baremetal', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(1, len(sot.valid_versions)) - self.assertEqual('v1', sot.valid_versions[0].module) - self.assertEqual('v1', sot.valid_versions[0].path) diff --git a/openstack/tests/unit/bare_metal/test_version.py b/openstack/tests/unit/bare_metal/test_version.py deleted file mode 100644 index 38aff79273..0000000000 --- a/openstack/tests/unit/bare_metal/test_version.py +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.bare_metal import version - -IDENTIFIER = 'IDENTIFIER' -EXAMPLE = { - 'id': IDENTIFIER, - 'links': '2', - 'status': '3', - 'updated': '4', -} - - -class TestVersion(testtools.TestCase): - - def test_basic(self): - sot = version.Version() - self.assertEqual('version', sot.resource_key) - self.assertEqual('versions', sot.resources_key) - self.assertEqual('/', sot.base_path) - self.assertEqual('baremetal', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - self.assertFalse(sot.allow_head) - self.assertFalse(sot.patch_update) - self.assertFalse(sot.put_create) - - def test_make_it(self): - sot = version.Version(**EXAMPLE) - self.assertEqual(EXAMPLE['id'], sot.id) - self.assertEqual(EXAMPLE['links'], sot.links) - self.assertEqual(EXAMPLE['status'], sot.status) - self.assertEqual(EXAMPLE['updated'], sot.updated) diff --git a/openstack/tests/unit/bare_metal/v1/test_chassis.py b/openstack/tests/unit/bare_metal/v1/test_chassis.py deleted file mode 100644 index eab6c7a9ae..0000000000 --- a/openstack/tests/unit/bare_metal/v1/test_chassis.py +++ /dev/null @@ -1,94 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.bare_metal.v1 import chassis - -FAKE = { - "created_at": "2016-08-18T22:28:48.165105+00:00", - "description": "Sample chassis", - "extra": {}, - "links": [ - { - "href": "http://127.0.0.1:6385/v1/chassis/ID", - "rel": "self" - }, - { - "href": "http://127.0.0.1:6385/chassis/ID", - "rel": "bookmark" - } - ], - "nodes": [ - { - "href": "http://127.0.0.1:6385/v1/chassis/ID/nodes", - "rel": "self" - }, - { - "href": "http://127.0.0.1:6385/chassis/ID/nodes", - "rel": "bookmark" - } - ], - "updated_at": None, - "uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1" -} - - -class TestChassis(testtools.TestCase): - - def test_basic(self): - sot = chassis.Chassis() - self.assertIsNone(sot.resource_key) - self.assertEqual('chassis', sot.resources_key) - self.assertEqual('/chassis', sot.base_path) - self.assertEqual('baremetal', sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) - self.assertTrue(sot.allow_delete) - self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) - - def test_instantiate(self): - sot = chassis.Chassis(**FAKE) - self.assertEqual(FAKE['uuid'], sot.id) - self.assertEqual(FAKE['created_at'], sot.created_at) - self.assertEqual(FAKE['description'], sot.description) - self.assertEqual(FAKE['extra'], sot.extra) - self.assertEqual(FAKE['links'], sot.links) - self.assertEqual(FAKE['nodes'], sot.nodes) - self.assertEqual(FAKE['updated_at'], sot.updated_at) - - -class TestChassisDetail(testtools.TestCase): - - def test_basic(self): - sot = chassis.ChassisDetail() - self.assertIsNone(sot.resource_key) - self.assertEqual('chassis', sot.resources_key) - self.assertEqual('/chassis/detail', sot.base_path) - self.assertEqual('baremetal', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_instantiate(self): - sot = chassis.ChassisDetail(**FAKE) - self.assertEqual(FAKE['uuid'], sot.id) - self.assertEqual(FAKE['created_at'], sot.created_at) - self.assertEqual(FAKE['description'], sot.description) - self.assertEqual(FAKE['extra'], sot.extra) - self.assertEqual(FAKE['links'], sot.links) - self.assertEqual(FAKE['nodes'], sot.nodes) - self.assertEqual(FAKE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/bare_metal/v1/test_driver.py b/openstack/tests/unit/bare_metal/v1/test_driver.py deleted file mode 100644 index 36ce7dc1f4..0000000000 --- a/openstack/tests/unit/bare_metal/v1/test_driver.py +++ /dev/null @@ -1,66 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.bare_metal.v1 import driver - -FAKE = { - "hosts": [ - "897ab1dad809" - ], - "links": [ - { - "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool", - "rel": "self" - }, - { - "href": "http://127.0.0.1:6385/drivers/agent_ipmitool", - "rel": "bookmark" - } - ], - "name": "agent_ipmitool", - "properties": [ - { - "href": - "http://127.0.0.1:6385/v1/drivers/agent_ipmitool/properties", - "rel": "self" - }, - { - "href": "http://127.0.0.1:6385/drivers/agent_ipmitool/properties", - "rel": "bookmark" - } - ] -} - - -class TestDriver(testtools.TestCase): - - def test_basic(self): - sot = driver.Driver() - self.assertIsNone(sot.resource_key) - self.assertEqual('drivers', sot.resources_key) - self.assertEqual('/drivers', sot.base_path) - self.assertEqual('baremetal', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_instantiate(self): - sot = driver.Driver(**FAKE) - self.assertEqual(FAKE['name'], sot.id) - self.assertEqual(FAKE['name'], sot.name) - self.assertEqual(FAKE['hosts'], sot.hosts) - self.assertEqual(FAKE['links'], sot.links) - self.assertEqual(FAKE['properties'], sot.properties) diff --git a/openstack/tests/unit/bare_metal/v1/test_node.py b/openstack/tests/unit/bare_metal/v1/test_node.py deleted file mode 100644 index ddbf2512a9..0000000000 --- a/openstack/tests/unit/bare_metal/v1/test_node.py +++ /dev/null @@ -1,198 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.bare_metal.v1 import node - -# NOTE: Sample data from api-ref doc -FAKE = { - "chassis_uuid": "1", # NOTE: missed in api-ref sample - "clean_step": {}, - "console_enabled": False, - "created_at": "2016-08-18T22:28:48.643434+00:00", - "driver": "agent_ipmitool", - "driver_info": { - "ipmi_password": "******", - "ipmi_username": "ADMIN" - }, - "driver_internal_info": {}, - "extra": {}, - "inspection_finished_at": None, - "inspection_started_at": None, - "instance_info": {}, - "instance_uuid": None, - "last_error": None, - "links": [ - { - "href": "http://127.0.0.1:6385/v1/nodes/", - "rel": "self" - }, - { - "href": "http://127.0.0.1:6385/nodes/", - "rel": "bookmark" - } - ], - "maintenance": False, - "maintenance_reason": None, - "name": "test_node", - "network_interface": "flat", - "portgroups": [ - { - "href": "http://127.0.0.1:6385/v1/nodes//portgroups", - "rel": "self" - }, - { - "href": "http://127.0.0.1:6385/nodes//portgroups", - "rel": "bookmark" - } - ], - "ports": [ - { - "href": "http://127.0.0.1:6385/v1/nodes//ports", - "rel": "self" - }, - { - "href": "http://127.0.0.1:6385/nodes//ports", - "rel": "bookmark" - } - ], - "power_state": None, - "properties": {}, - "provision_state": "enroll", - "provision_updated_at": None, - "raid_config": {}, - "reservation": None, - "resource_class": None, - "states": [ - { - "href": "http://127.0.0.1:6385/v1/nodes//states", - "rel": "self" - }, - { - "href": "http://127.0.0.1:6385/nodes//states", - "rel": "bookmark" - } - ], - "target_power_state": None, - "target_provision_state": None, - "target_raid_config": {}, - "updated_at": None, - "uuid": "6d85703a-565d-469a-96ce-30b6de53079d" -} - - -class TestNode(testtools.TestCase): - - def test_basic(self): - sot = node.Node() - self.assertIsNone(sot.resource_key) - self.assertEqual('nodes', sot.resources_key) - self.assertEqual('/nodes', sot.base_path) - self.assertEqual('baremetal', sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) - self.assertTrue(sot.allow_delete) - self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) - - def test_instantiate(self): - sot = node.Node(**FAKE) - - self.assertEqual(FAKE['uuid'], sot.id) - self.assertEqual(FAKE['name'], sot.name) - - self.assertEqual(FAKE['chassis_uuid'], sot.chassis_id) - self.assertEqual(FAKE['clean_step'], sot.clean_step) - self.assertEqual(FAKE['created_at'], sot.created_at) - self.assertEqual(FAKE['driver'], sot.driver) - self.assertEqual(FAKE['driver_info'], sot.driver_info) - self.assertEqual(FAKE['driver_internal_info'], - sot.driver_internal_info) - self.assertEqual(FAKE['extra'], sot.extra) - self.assertEqual(FAKE['instance_info'], sot.instance_info) - self.assertEqual(FAKE['instance_uuid'], sot.instance_id) - self.assertEqual(FAKE['console_enabled'], sot.is_console_enabled) - self.assertEqual(FAKE['maintenance'], sot.is_maintenance) - self.assertEqual(FAKE['last_error'], sot.last_error) - self.assertEqual(FAKE['links'], sot.links) - self.assertEqual(FAKE['maintenance_reason'], sot.maintenance_reason) - self.assertEqual(FAKE['name'], sot.name) - self.assertEqual(FAKE['network_interface'], sot.network_interface) - self.assertEqual(FAKE['ports'], sot.ports) - self.assertEqual(FAKE['portgroups'], sot.port_groups) - self.assertEqual(FAKE['power_state'], sot.power_state) - self.assertEqual(FAKE['properties'], sot.properties) - self.assertEqual(FAKE['provision_state'], sot.provision_state) - self.assertEqual(FAKE['raid_config'], sot.raid_config) - self.assertEqual(FAKE['reservation'], sot.reservation) - self.assertEqual(FAKE['resource_class'], sot.resource_class) - self.assertEqual(FAKE['states'], sot.states) - self.assertEqual(FAKE['target_provision_state'], - sot.target_provision_state) - self.assertEqual(FAKE['target_power_state'], sot.target_power_state) - self.assertEqual(FAKE['target_raid_config'], sot.target_raid_config) - self.assertEqual(FAKE['updated_at'], sot.updated_at) - - -class TestNodeDetail(testtools.TestCase): - - def test_basic(self): - sot = node.NodeDetail() - self.assertIsNone(sot.resource_key) - self.assertEqual('nodes', sot.resources_key) - self.assertEqual('/nodes/detail', sot.base_path) - self.assertEqual('baremetal', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_instantiate(self): - sot = node.NodeDetail(**FAKE) - - self.assertEqual(FAKE['uuid'], sot.id) - self.assertEqual(FAKE['name'], sot.name) - - self.assertEqual(FAKE['chassis_uuid'], sot.chassis_id) - self.assertEqual(FAKE['clean_step'], sot.clean_step) - self.assertEqual(FAKE['created_at'], sot.created_at) - self.assertEqual(FAKE['driver'], sot.driver) - self.assertEqual(FAKE['driver_info'], sot.driver_info) - self.assertEqual(FAKE['driver_internal_info'], - sot.driver_internal_info) - self.assertEqual(FAKE['extra'], sot.extra) - self.assertEqual(FAKE['instance_info'], sot.instance_info) - self.assertEqual(FAKE['instance_uuid'], sot.instance_id) - self.assertEqual(FAKE['console_enabled'], sot.is_console_enabled) - self.assertEqual(FAKE['maintenance'], sot.is_maintenance) - self.assertEqual(FAKE['last_error'], sot.last_error) - self.assertEqual(FAKE['links'], sot.links) - self.assertEqual(FAKE['maintenance_reason'], sot.maintenance_reason) - self.assertEqual(FAKE['name'], sot.name) - self.assertEqual(FAKE['network_interface'], sot.network_interface) - self.assertEqual(FAKE['ports'], sot.ports) - self.assertEqual(FAKE['portgroups'], sot.port_groups) - self.assertEqual(FAKE['power_state'], sot.power_state) - self.assertEqual(FAKE['properties'], sot.properties) - self.assertEqual(FAKE['provision_state'], sot.provision_state) - self.assertEqual(FAKE['raid_config'], sot.raid_config) - self.assertEqual(FAKE['reservation'], sot.reservation) - self.assertEqual(FAKE['resource_class'], sot.resource_class) - self.assertEqual(FAKE['states'], sot.states) - self.assertEqual(FAKE['target_provision_state'], - sot.target_provision_state) - self.assertEqual(FAKE['target_power_state'], sot.target_power_state) - self.assertEqual(FAKE['target_raid_config'], sot.target_raid_config) - self.assertEqual(FAKE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/bare_metal/v1/test_port.py b/openstack/tests/unit/bare_metal/v1/test_port.py deleted file mode 100644 index c58b6f93a8..0000000000 --- a/openstack/tests/unit/bare_metal/v1/test_port.py +++ /dev/null @@ -1,103 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.bare_metal.v1 import port - -FAKE = { - "address": "11:11:11:11:11:11", - "created_at": "2016-08-18T22:28:49.946416+00:00", - "extra": {}, - "internal_info": {}, - "links": [ - { - "href": "http://127.0.0.1:6385/v1/ports/", - "rel": "self" - }, - { - "href": "http://127.0.0.1:6385/ports/", - "rel": "bookmark" - } - ], - "local_link_connection": { - "port_id": "Ethernet3/1", - "switch_id": "0a:1b:2c:3d:4e:5f", - "switch_info": "switch1" - }, - "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", - "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", - "pxe_enabled": True, - "updated_at": None, - "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1" -} - - -class TestPort(testtools.TestCase): - - def test_basic(self): - sot = port.Port() - self.assertIsNone(sot.resource_key) - self.assertEqual('ports', sot.resources_key) - self.assertEqual('/ports', sot.base_path) - self.assertEqual('baremetal', sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) - self.assertTrue(sot.allow_delete) - self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) - - def test_instantiate(self): - sot = port.PortDetail(**FAKE) - self.assertEqual(FAKE['uuid'], sot.id) - self.assertEqual(FAKE['address'], sot.address) - self.assertEqual(FAKE['created_at'], sot.created_at) - self.assertEqual(FAKE['extra'], sot.extra) - self.assertEqual(FAKE['internal_info'], sot.internal_info) - self.assertEqual(FAKE['links'], sot.links) - self.assertEqual(FAKE['local_link_connection'], - sot.local_link_connection) - self.assertEqual(FAKE['node_uuid'], sot.node_id) - self.assertEqual(FAKE['portgroup_uuid'], sot.port_group_id) - self.assertEqual(FAKE['pxe_enabled'], sot.is_pxe_enabled) - self.assertEqual(FAKE['updated_at'], sot.updated_at) - - -class TestPortDetail(testtools.TestCase): - - def test_basic(self): - sot = port.PortDetail() - self.assertIsNone(sot.resource_key) - self.assertEqual('ports', sot.resources_key) - self.assertEqual('/ports/detail', sot.base_path) - self.assertEqual('baremetal', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_instantiate(self): - sot = port.PortDetail(**FAKE) - self.assertEqual(FAKE['uuid'], sot.id) - self.assertEqual(FAKE['address'], sot.address) - self.assertEqual(FAKE['created_at'], sot.created_at) - self.assertEqual(FAKE['extra'], sot.extra) - self.assertEqual(FAKE['internal_info'], sot.internal_info) - self.assertEqual(FAKE['links'], sot.links) - self.assertEqual(FAKE['local_link_connection'], - sot.local_link_connection) - self.assertEqual(FAKE['node_uuid'], sot.node_id) - self.assertEqual(FAKE['portgroup_uuid'], sot.port_group_id) - self.assertEqual(FAKE['pxe_enabled'], sot.is_pxe_enabled) - self.assertEqual(FAKE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/bare_metal/v1/test_port_group.py b/openstack/tests/unit/bare_metal/v1/test_port_group.py deleted file mode 100644 index ea1cfd06e3..0000000000 --- a/openstack/tests/unit/bare_metal/v1/test_port_group.py +++ /dev/null @@ -1,108 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.bare_metal.v1 import port_group - -FAKE = { - "address": "11:11:11:11:11:11", - "created_at": "2016-08-18T22:28:48.165105+00:00", - "extra": {}, - "internal_info": {}, - "links": [ - { - "href": "http://127.0.0.1:6385/v1/portgroups/", - "rel": "self" - }, - { - "href": "http://127.0.0.1:6385/portgroups/", - "rel": "bookmark" - } - ], - "name": "test_portgroup", - "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", - "ports": [ - { - "href": "http://127.0.0.1:6385/v1/portgroups//ports", - "rel": "self" - }, - { - "href": "http://127.0.0.1:6385/portgroups//ports", - "rel": "bookmark" - } - ], - "standalone_ports_supported": True, - "updated_at": None, - "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", -} - - -class TestPortGroup(testtools.TestCase): - - def test_basic(self): - sot = port_group.PortGroup() - self.assertIsNone(sot.resource_key) - self.assertEqual('portgroups', sot.resources_key) - self.assertEqual('/portgroups', sot.base_path) - self.assertEqual('baremetal', sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) - self.assertTrue(sot.allow_delete) - self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) - - def test_instantiate(self): - sot = port_group.PortGroup(**FAKE) - self.assertEqual(FAKE['uuid'], sot.id) - self.assertEqual(FAKE['address'], sot.address) - self.assertEqual(FAKE['created_at'], sot.created_at) - self.assertEqual(FAKE['extra'], sot.extra) - self.assertEqual(FAKE['internal_info'], sot.internal_info) - self.assertEqual(FAKE['links'], sot.links) - self.assertEqual(FAKE['name'], sot.name) - self.assertEqual(FAKE['node_uuid'], sot.node_id) - self.assertEqual(FAKE['ports'], sot.ports) - self.assertEqual(FAKE['standalone_ports_supported'], - sot.is_standalone_ports_supported) - self.assertEqual(FAKE['updated_at'], sot.updated_at) - - -class TestPortGroupDetail(testtools.TestCase): - - def test_basic(self): - sot = port_group.PortGroupDetail() - self.assertIsNone(sot.resource_key) - self.assertEqual('portgroups', sot.resources_key) - self.assertEqual('/portgroups/detail', sot.base_path) - self.assertEqual('baremetal', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_instantiate(self): - sot = port_group.PortGroupDetail(**FAKE) - self.assertEqual(FAKE['uuid'], sot.id) - self.assertEqual(FAKE['address'], sot.address) - self.assertEqual(FAKE['created_at'], sot.created_at) - self.assertEqual(FAKE['extra'], sot.extra) - self.assertEqual(FAKE['internal_info'], sot.internal_info) - self.assertEqual(FAKE['links'], sot.links) - self.assertEqual(FAKE['name'], sot.name) - self.assertEqual(FAKE['node_uuid'], sot.node_id) - self.assertEqual(FAKE['ports'], sot.ports) - self.assertEqual(FAKE['standalone_ports_supported'], - sot.is_standalone_ports_supported) - self.assertEqual(FAKE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/bare_metal/v1/test_proxy.py b/openstack/tests/unit/bare_metal/v1/test_proxy.py deleted file mode 100644 index f5d5308244..0000000000 --- a/openstack/tests/unit/bare_metal/v1/test_proxy.py +++ /dev/null @@ -1,154 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.bare_metal.v1 import _proxy -from openstack.bare_metal.v1 import chassis -from openstack.bare_metal.v1 import driver -from openstack.bare_metal.v1 import node -from openstack.bare_metal.v1 import port -from openstack.bare_metal.v1 import port_group -from openstack.tests.unit import test_proxy_base2 - - -class TestBareMetalProxy(test_proxy_base2.TestProxyBase): - - def setUp(self): - super(TestBareMetalProxy, self).setUp() - self.proxy = _proxy.Proxy(self.session) - - def test_drivers(self): - self.verify_list(self.proxy.drivers, driver.Driver, paginated=False) - - def test_get_driver(self): - self.verify_get(self.proxy.get_driver, driver.Driver) - - def test_chassis_detailed(self): - self.verify_list(self.proxy.chassis, chassis.ChassisDetail, - paginated=True, - method_kwargs={"details": True, "query": 1}, - expected_kwargs={"query": 1}) - - def test_chassis_not_detailed(self): - self.verify_list(self.proxy.chassis, chassis.Chassis, - paginated=True, - method_kwargs={"details": False, "query": 1}, - expected_kwargs={"query": 1}) - - def test_create_chassis(self): - self.verify_create(self.proxy.create_chassis, chassis.Chassis) - - def test_find_chassis(self): - self.verify_find(self.proxy.find_chassis, chassis.Chassis) - - def test_get_chassis(self): - self.verify_get(self.proxy.get_chassis, chassis.Chassis) - - def test_update_chassis(self): - self.verify_update(self.proxy.update_chassis, chassis.Chassis) - - def test_delete_chassis(self): - self.verify_delete(self.proxy.delete_chassis, chassis.Chassis, False) - - def test_delete_chassis_ignore(self): - self.verify_delete(self.proxy.delete_chassis, chassis.Chassis, True) - - def test_nodes_detailed(self): - self.verify_list(self.proxy.nodes, node.NodeDetail, - paginated=True, - method_kwargs={"details": True, "query": 1}, - expected_kwargs={"query": 1}) - - def test_nodes_not_detailed(self): - self.verify_list(self.proxy.nodes, node.Node, - paginated=True, - method_kwargs={"details": False, "query": 1}, - expected_kwargs={"query": 1}) - - def test_create_node(self): - self.verify_create(self.proxy.create_node, node.Node) - - def test_find_node(self): - self.verify_find(self.proxy.find_node, node.Node) - - def test_get_node(self): - self.verify_get(self.proxy.get_node, node.Node) - - def test_update_node(self): - self.verify_update(self.proxy.update_node, node.Node) - - def test_delete_node(self): - self.verify_delete(self.proxy.delete_node, node.Node, False) - - def test_delete_node_ignore(self): - self.verify_delete(self.proxy.delete_node, node.Node, True) - - def test_ports_detailed(self): - self.verify_list(self.proxy.ports, port.PortDetail, - paginated=True, - method_kwargs={"details": True, "query": 1}, - expected_kwargs={"query": 1}) - - def test_ports_not_detailed(self): - self.verify_list(self.proxy.ports, port.Port, - paginated=True, - method_kwargs={"details": False, "query": 1}, - expected_kwargs={"query": 1}) - - def test_create_port(self): - self.verify_create(self.proxy.create_port, port.Port) - - def test_find_port(self): - self.verify_find(self.proxy.find_port, port.Port) - - def test_get_port(self): - self.verify_get(self.proxy.get_port, port.Port) - - def test_update_port(self): - self.verify_update(self.proxy.update_port, port.Port) - - def test_delete_port(self): - self.verify_delete(self.proxy.delete_port, port.Port, False) - - def test_delete_port_ignore(self): - self.verify_delete(self.proxy.delete_port, port.Port, True) - - def test_portgroups_detailed(self): - self.verify_list(self.proxy.portgroups, port_group.PortGroupDetail, - paginated=True, - method_kwargs={"details": True, "query": 1}, - expected_kwargs={"query": 1}) - - def test_portgroups_not_detailed(self): - self.verify_list(self.proxy.portgroups, port_group.PortGroup, - paginated=True, - method_kwargs={"details": False, "query": 1}, - expected_kwargs={"query": 1}) - - def test_create_portgroup(self): - self.verify_create(self.proxy.create_portgroup, port_group.PortGroup) - - def test_find_portgroup(self): - self.verify_find(self.proxy.find_portgroup, port_group.PortGroup) - - def test_get_portgroup(self): - self.verify_get(self.proxy.get_portgroup, port_group.PortGroup) - - def test_update_portgroup(self): - self.verify_update(self.proxy.update_portgroup, port_group.PortGroup) - - def test_delete_portgroup(self): - self.verify_delete(self.proxy.delete_portgroup, port_group.PortGroup, - False) - - def test_delete_portgroup_ignore(self): - self.verify_delete(self.proxy.delete_portgroup, port_group.PortGroup, - True) diff --git a/openstack/tests/unit/baremetal/__init__.py b/openstack/tests/unit/baremetal/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/baremetal/test_configdrive.py b/openstack/tests/unit/baremetal/test_configdrive.py new file mode 100644 index 0000000000..e560dd2d9d --- /dev/null +++ b/openstack/tests/unit/baremetal/test_configdrive.py @@ -0,0 +1,109 @@ +# Copyright 2018 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import json +import os +from unittest import mock + +import testtools + +from openstack.baremetal import configdrive + + +class TestPopulateDirectory(testtools.TestCase): + def _check( + self, metadata, user_data=None, network_data=None, vendor_data=None + ): + with configdrive.populate_directory( + metadata, + user_data=user_data, + network_data=network_data, + vendor_data=vendor_data, + ) as d: + for version in ('2012-08-10', 'latest'): + with open( + os.path.join(d, 'openstack', version, 'meta_data.json') + ) as fp: + actual_metadata = json.load(fp) + + self.assertEqual(metadata, actual_metadata) + network_data_file = os.path.join( + d, 'openstack', version, 'network_data.json' + ) + user_data_file = os.path.join( + d, 'openstack', version, 'user_data' + ) + vendor_data_file = os.path.join( + d, 'openstack', version, 'vendor_data2.json' + ) + + if network_data is None: + self.assertFalse(os.path.exists(network_data_file)) + else: + with open(network_data_file) as fp: + self.assertEqual(network_data, json.load(fp)) + + if vendor_data is None: + self.assertFalse(os.path.exists(vendor_data_file)) + else: + with open(vendor_data_file) as fp: + self.assertEqual(vendor_data, json.load(fp)) + + if user_data is None: + self.assertFalse(os.path.exists(user_data_file)) + else: + if isinstance(user_data, str): + user_data = user_data.encode() + with open(user_data_file, 'rb') as fp: + self.assertEqual(user_data, fp.read()) + + # Clean up in __exit__ + self.assertFalse(os.path.exists(d)) + + def test_without_user_data(self): + self._check({'foo': 42}) + + def test_with_user_data(self): + self._check({'foo': 42}, b'I am user data') + + def test_with_user_data_as_string(self): + self._check({'foo': 42}, 'I am user data') + + def test_with_network_data(self): + self._check({'foo': 42}, network_data={'networks': {}}) + + def test_with_vendor_data(self): + self._check({'foo': 42}, vendor_data={'foo': 'bar'}) + + +@mock.patch('subprocess.Popen', autospec=True) +class TestPack(testtools.TestCase): + def test_no_genisoimage(self, mock_popen): + mock_popen.side_effect = OSError + self.assertRaisesRegex( + RuntimeError, "genisoimage", configdrive.pack, "/fake" + ) + + def test_genisoimage_fails(self, mock_popen): + mock_popen.return_value.communicate.return_value = b"", b"BOOM" + mock_popen.return_value.returncode = 1 + self.assertRaisesRegex(RuntimeError, "BOOM", configdrive.pack, "/fake") + + def test_success(self, mock_popen): + mock_popen.return_value.communicate.return_value = b"", b"" + mock_popen.return_value.returncode = 0 + result = configdrive.pack("/fake") + # Make sure the result is string on all python versions + self.assertIsInstance(result, str) diff --git a/openstack/tests/unit/baremetal/test_version.py b/openstack/tests/unit/baremetal/test_version.py new file mode 100644 index 0000000000..b7c9e7338a --- /dev/null +++ b/openstack/tests/unit/baremetal/test_version.py @@ -0,0 +1,46 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal import version +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'links': '2', + 'status': '3', + 'updated': '4', +} + + +class TestVersion(base.TestCase): + def test_basic(self): + sot = version.Version() + self.assertEqual('version', sot.resource_key) + self.assertEqual('versions', sot.resources_key) + self.assertEqual('/', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_head) + self.assertEqual('PUT', sot.commit_method) + self.assertEqual('POST', sot.create_method) + + def test_make_it(self): + sot = version.Version(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['updated'], sot.updated) diff --git a/openstack/tests/unit/baremetal/v1/__init__.py b/openstack/tests/unit/baremetal/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/baremetal/v1/test_allocation.py b/openstack/tests/unit/baremetal/v1/test_allocation.py new file mode 100644 index 0000000000..48b7825632 --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_allocation.py @@ -0,0 +1,146 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.baremetal.v1 import allocation +from openstack import exceptions +from openstack.tests.unit import base + +FAKE = { + "candidate_nodes": [], + "created_at": "2016-08-18T22:28:48.165105+00:00", + "extra": {}, + "last_error": None, + "links": [ + { + "href": "http://127.0.0.1:6385/v1/allocations/", + "rel": "self", + }, + { + "href": "http://127.0.0.1:6385/allocations/", + "rel": "bookmark", + }, + ], + "name": "test_allocation", + "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", + "owner": "demo", + "resource_class": "baremetal", + "state": "active", + "traits": [], + "updated_at": None, + "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", +} + + +class TestAllocation(base.TestCase): + def test_basic(self): + sot = allocation.Allocation() + self.assertIsNone(sot.resource_key) + self.assertEqual('allocations', sot.resources_key) + self.assertEqual('/allocations', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_instantiate(self): + sot = allocation.Allocation(**FAKE) + self.assertEqual(FAKE['candidate_nodes'], sot.candidate_nodes) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['extra'], sot.extra) + self.assertEqual(FAKE['last_error'], sot.last_error) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual(FAKE['node_uuid'], sot.node_id) + self.assertEqual(FAKE['owner'], sot.owner) + self.assertEqual(FAKE['resource_class'], sot.resource_class) + self.assertEqual(FAKE['state'], sot.state) + self.assertEqual(FAKE['traits'], sot.traits) + self.assertEqual(FAKE['updated_at'], sot.updated_at) + self.assertEqual(FAKE['uuid'], sot.id) + + +@mock.patch('time.sleep', lambda _t: None) +@mock.patch.object(allocation.Allocation, 'fetch', autospec=True) +class TestWaitForAllocation(base.TestCase): + def setUp(self): + super().setUp() + self.session = mock.Mock(spec=adapter.Adapter) + self.session.default_microversion = '1.52' + self.session.log = mock.Mock() + self.fake = dict(FAKE, state='allocating', node_uuid=None) + self.allocation = allocation.Allocation(**self.fake) + + def test_already_active(self, mock_fetch): + self.allocation.state = 'active' + allocation = self.allocation.wait(None) + self.assertIs(allocation, self.allocation) + self.assertFalse(mock_fetch.called) + + def test_wait(self, mock_fetch): + marker = [False] # mutable object to modify in the closure + + def _side_effect(allocation, session): + if marker[0]: + self.allocation.state = 'active' + self.allocation.node_id = FAKE['node_uuid'] + else: + marker[0] = True + + mock_fetch.side_effect = _side_effect + allocation = self.allocation.wait(self.session) + self.assertIs(allocation, self.allocation) + self.assertEqual(2, mock_fetch.call_count) + + def test_failure(self, mock_fetch): + marker = [False] # mutable object to modify in the closure + + def _side_effect(allocation, session): + if marker[0]: + self.allocation.state = 'error' + self.allocation.last_error = 'boom!' + else: + marker[0] = True + + mock_fetch.side_effect = _side_effect + self.assertRaises( + exceptions.ResourceFailure, self.allocation.wait, self.session + ) + self.assertEqual(2, mock_fetch.call_count) + + def test_failure_ignored(self, mock_fetch): + marker = [False] # mutable object to modify in the closure + + def _side_effect(allocation, session): + if marker[0]: + self.allocation.state = 'error' + self.allocation.last_error = 'boom!' + else: + marker[0] = True + + mock_fetch.side_effect = _side_effect + allocation = self.allocation.wait(self.session, ignore_error=True) + self.assertIs(allocation, self.allocation) + self.assertEqual(2, mock_fetch.call_count) + + def test_timeout(self, mock_fetch): + self.assertRaises( + exceptions.ResourceTimeout, + self.allocation.wait, + self.session, + timeout=0.001, + ) + mock_fetch.assert_called_with(self.allocation, self.session) diff --git a/openstack/tests/unit/baremetal/v1/test_chassis.py b/openstack/tests/unit/baremetal/v1/test_chassis.py new file mode 100644 index 0000000000..f6fdbb6039 --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_chassis.py @@ -0,0 +1,55 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import chassis +from openstack.tests.unit import base + + +FAKE = { + "created_at": "2016-08-18T22:28:48.165105+00:00", + "description": "Sample chassis", + "extra": {}, + "links": [ + {"href": "http://127.0.0.1:6385/v1/chassis/ID", "rel": "self"}, + {"href": "http://127.0.0.1:6385/chassis/ID", "rel": "bookmark"}, + ], + "nodes": [ + {"href": "http://127.0.0.1:6385/v1/chassis/ID/nodes", "rel": "self"}, + {"href": "http://127.0.0.1:6385/chassis/ID/nodes", "rel": "bookmark"}, + ], + "updated_at": None, + "uuid": "dff29d23-1ded-43b4-8ae1-5eebb3e30de1", +} + + +class TestChassis(base.TestCase): + def test_basic(self): + sot = chassis.Chassis() + self.assertIsNone(sot.resource_key) + self.assertEqual('chassis', sot.resources_key) + self.assertEqual('/chassis', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + def test_instantiate(self): + sot = chassis.Chassis(**FAKE) + self.assertEqual(FAKE['uuid'], sot.id) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['description'], sot.description) + self.assertEqual(FAKE['extra'], sot.extra) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual(FAKE['nodes'], sot.nodes) + self.assertEqual(FAKE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/baremetal/v1/test_conductor.py b/openstack/tests/unit/baremetal/v1/test_conductor.py new file mode 100644 index 0000000000..fdc1f6869c --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_conductor.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import conductor +from openstack.tests.unit import base + + +FAKE = { + "links": [ + { + "href": "http://127.0.0.1:6385/v1/conductors/compute2.localdomain", + "rel": "self", + }, + { + "href": "http://127.0.0.1:6385/conductors/compute2.localdomain", + "rel": "bookmark", + }, + ], + "created_at": "2018-12-05T07:03:19+00:00", + "hostname": "compute2.localdomain", + "conductor_group": "", + "updated_at": "2018-12-05T07:03:21+00:00", + "alive": True, + "drivers": ["ipmi"], +} + + +class TestContainer(base.TestCase): + def test_basic(self): + sot = conductor.Conductor() + self.assertIsNone(sot.resource_key) + self.assertEqual('conductors', sot.resources_key) + self.assertEqual('/conductors', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_patch) + + def test_instantiate(self): + sot = conductor.Conductor(**FAKE) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['updated_at'], sot.updated_at) + self.assertEqual(FAKE['hostname'], sot.hostname) + self.assertEqual(FAKE['conductor_group'], sot.conductor_group) + self.assertEqual(FAKE['alive'], sot.alive) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual(FAKE['drivers'], sot.drivers) diff --git a/openstack/tests/unit/baremetal/v1/test_deploy_templates.py b/openstack/tests/unit/baremetal/v1/test_deploy_templates.py new file mode 100644 index 0000000000..6b622d4ff2 --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_deploy_templates.py @@ -0,0 +1,69 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import deploy_templates +from openstack.tests.unit import base + + +FAKE = { + "created_at": "2016-08-18T22:28:48.643434+11:11", + "extra": {}, + "links": [ + { + "href": """http://10.60.253.180:6385/v1/deploy_templates + /bbb45f41-d4bc-4307-8d1d-32f95ce1e920""", + "rel": "self", + }, + { + "href": """http://10.60.253.180:6385/deploy_templates + /bbb45f41-d4bc-4307-8d1d-32f95ce1e920""", + "rel": "bookmark", + }, + ], + "name": "CUSTOM_HYPERTHREADING_ON", + "steps": [ + { + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "interface": "bios", + "priority": 150, + "step": "apply_configuration", + } + ], + "updated_at": None, + "uuid": "bbb45f41-d4bc-4307-8d1d-32f95ce1e920", +} + + +class DeployTemplates(base.TestCase): + def test_basic(self): + sot = deploy_templates.DeployTemplate() + self.assertIsNone(sot.resource_key) + self.assertEqual('deploy_templates', sot.resources_key) + self.assertEqual('/deploy_templates', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + def test_instantiate(self): + sot = deploy_templates.DeployTemplate(**FAKE) + self.assertEqual(FAKE['steps'], sot.steps) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['extra'], sot.extra) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual(FAKE['updated_at'], sot.updated_at) + self.assertEqual(FAKE['uuid'], sot.id) diff --git a/openstack/tests/unit/baremetal/v1/test_driver.py b/openstack/tests/unit/baremetal/v1/test_driver.py new file mode 100644 index 0000000000..5aec6bcc1e --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_driver.py @@ -0,0 +1,146 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.baremetal.v1 import _common +from openstack.baremetal.v1 import driver +from openstack import exceptions +from openstack.tests.unit import base + + +FAKE = { + "hosts": ["897ab1dad809"], + "links": [ + { + "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool", + "rel": "self", + }, + { + "href": "http://127.0.0.1:6385/drivers/agent_ipmitool", + "rel": "bookmark", + }, + ], + "name": "agent_ipmitool", + "properties": [ + { + "href": "http://127.0.0.1:6385/v1/drivers/agent_ipmitool/properties", + "rel": "self", + }, + { + "href": "http://127.0.0.1:6385/drivers/agent_ipmitool/properties", + "rel": "bookmark", + }, + ], +} + + +class TestDriver(base.TestCase): + def test_basic(self): + sot = driver.Driver() + self.assertIsNone(sot.resource_key) + self.assertEqual('drivers', sot.resources_key) + self.assertEqual('/drivers', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_instantiate(self): + sot = driver.Driver(**FAKE) + self.assertEqual(FAKE['name'], sot.id) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual(FAKE['hosts'], sot.hosts) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual(FAKE['properties'], sot.properties) + + @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) + def test_list_vendor_passthru(self): + self.session = mock.Mock(spec=adapter.Adapter) + sot = driver.Driver(**FAKE) + fake_vendor_passthru_info = { + 'fake_vendor_method': { + 'async': True, + 'attach': False, + 'description': "Fake function that does nothing in background", + 'http_methods': ['GET', 'PUT', 'POST', 'DELETE'], + } + } + self.session.get.return_value.json.return_value = ( + fake_vendor_passthru_info + ) + result = sot.list_vendor_passthru(self.session) + self.session.get.assert_called_once_with( + 'drivers/{driver_name}/vendor_passthru/methods'.format( + driver_name=FAKE["name"] + ), + headers=mock.ANY, + ) + self.assertEqual(result, fake_vendor_passthru_info) + + @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) + def test_call_vendor_passthru(self): + self.session = mock.Mock(spec=adapter.Adapter) + sot = driver.Driver(**FAKE) + # GET + sot.call_vendor_passthru(self.session, 'GET', 'fake_vendor_method') + self.session.get.assert_called_once_with( + 'drivers/{}/vendor_passthru?method={}'.format( + FAKE["name"], 'fake_vendor_method' + ), + json=None, + headers=mock.ANY, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + # PUT + sot.call_vendor_passthru( + self.session, + 'PUT', + 'fake_vendor_method', + body={"fake_param_key": "fake_param_value"}, + ) + self.session.put.assert_called_once_with( + 'drivers/{}/vendor_passthru?method={}'.format( + FAKE["name"], 'fake_vendor_method' + ), + json={"fake_param_key": "fake_param_value"}, + headers=mock.ANY, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + # POST + sot.call_vendor_passthru( + self.session, + 'POST', + 'fake_vendor_method', + body={"fake_param_key": "fake_param_value"}, + ) + self.session.post.assert_called_once_with( + 'drivers/{}/vendor_passthru?method={}'.format( + FAKE["name"], 'fake_vendor_method' + ), + json={"fake_param_key": "fake_param_value"}, + headers=mock.ANY, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + # DELETE + sot.call_vendor_passthru(self.session, 'DELETE', 'fake_vendor_method') + self.session.delete.assert_called_once_with( + 'drivers/{}/vendor_passthru?method={}'.format( + FAKE["name"], 'fake_vendor_method' + ), + json=None, + headers=mock.ANY, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) diff --git a/openstack/tests/unit/baremetal/v1/test_inspection_rules.py b/openstack/tests/unit/baremetal/v1/test_inspection_rules.py new file mode 100644 index 0000000000..446dba6595 --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_inspection_rules.py @@ -0,0 +1,90 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import inspection_rules +from openstack.tests.unit import base + + +FAKE = { + "created_at": "2025-03-18T22:28:48.643434+11:11", + "description": "BMC credentials", + "phase": "main", + "priority": 100, + "sensitive": False, + "actions": [ + { + "op": "set-attribute", + "args": { + "path": "/properties/cpus", + "value": "{inventory[cpu][count]}", + }, + }, + { + "op": "set-attribute", + "args": { + "path": "/properties/memory_mb", + "value": "{inventory[memory][physical_mb]}", + }, + }, + { + "op": "set-attribute", + "args": { + "path": "/properties/cpu_arch", + "value": "{inventory[cpu][architecture]}", + }, + }, + ], + "conditions": [ + {"op": "is-true", "args": {"value": "{inventory[cpu][count]}"}} + ], + "links": [ + { + "href": "http://10.60.253.180:6385/v1/inspection_rules" + "/783bf33a-a8e3-1e23-a645-1e95a1f95186", + "rel": "self", + }, + { + "href": "http://10.60.253.180:6385/inspection_rules" + "/783bf33a-a8e3-1e23-a645-1e95a1f95186", + "rel": "bookmark", + }, + ], + "updated_at": None, + "uuid": "783bf33a-a8e3-1e23-a645-1e95a1f95186", +} + + +class InspectionRules(base.TestCase): + def test_basic(self): + sot = inspection_rules.InspectionRule() + self.assertIsNone(sot.resource_key) + self.assertEqual('inspection_rules', sot.resources_key) + self.assertEqual('/inspection_rules', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + def test_instantiate(self): + sot = inspection_rules.InspectionRule(**FAKE) + self.assertEqual(FAKE['actions'], sot.actions) + self.assertEqual(FAKE['description'], sot.description) + self.assertEqual(FAKE['conditions'], sot.conditions) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual(FAKE['phase'], sot.phase) + self.assertEqual(FAKE['priority'], sot.priority) + self.assertEqual(FAKE['sensitive'], sot.sensitive) + self.assertEqual(FAKE['updated_at'], sot.updated_at) + self.assertEqual(FAKE['uuid'], sot.id) diff --git a/openstack/tests/unit/baremetal/v1/test_node.py b/openstack/tests/unit/baremetal/v1/test_node.py new file mode 100644 index 0000000000..183e27f25b --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_node.py @@ -0,0 +1,1579 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import base64 +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.baremetal.v1 import _common +from openstack.baremetal.v1 import node +from openstack import exceptions +from openstack import resource +from openstack.tests.unit import base +from openstack import utils + +# NOTE: Sample data from api-ref doc +FAKE = { + "automated_clean": False, + "boot_mode": "uefi", + "chassis_uuid": "1", # NOTE: missed in api-ref sample + "clean_step": {}, + "conductor_group": None, + "console_enabled": False, + "created_at": "2016-08-18T22:28:48.643434+00:00", + "description": "A node.", + "driver": "agent_ipmitool", + "driver_info": {"ipmi_password": "******", "ipmi_username": "ADMIN"}, + "driver_internal_info": {}, + "extra": {}, + "firmware_interface": None, + "inspection_finished_at": None, + "inspection_started_at": None, + "instance_info": {}, + "instance_name": "test-instance", + "instance_uuid": None, + "last_error": None, + "lessee": None, + "links": [ + {"href": "http://127.0.0.1:6385/v1/nodes/", "rel": "self"}, + {"href": "http://127.0.0.1:6385/nodes/", "rel": "bookmark"}, + ], + "maintenance": False, + "maintenance_reason": None, + "name": "test_node", + "network_interface": "flat", + "owner": "4b7ed919-e4a6-4017-a081-43205c5b0b73", + "parent_node": None, + "portgroups": [ + { + "href": "http://127.0.0.1:6385/v1/nodes//portgroups", + "rel": "self", + }, + { + "href": "http://127.0.0.1:6385/nodes//portgroups", + "rel": "bookmark", + }, + ], + "ports": [ + { + "href": "http://127.0.0.1:6385/v1/nodes//ports", + "rel": "self", + }, + { + "href": "http://127.0.0.1:6385/nodes//ports", + "rel": "bookmark", + }, + ], + "power_state": None, + "properties": {}, + "provision_state": "enroll", + "provision_updated_at": None, + "raid_config": {}, + "reservation": None, + "resource_class": None, + "service_step": {}, + "secure_boot": True, + "shard": "TestShard", + "runbook": None, + "states": [ + { + "href": "http://127.0.0.1:6385/v1/nodes//states", + "rel": "self", + }, + { + "href": "http://127.0.0.1:6385/nodes//states", + "rel": "bookmark", + }, + ], + "target_power_state": None, + "target_provision_state": None, + "target_raid_config": {}, + "updated_at": None, + "uuid": "6d85703a-565d-469a-96ce-30b6de53079d", +} + + +def _fake_assert(self, session, expected, error_message=None): + return expected + + +class TestNode(base.TestCase): + def test_basic(self): + sot = node.Node() + self.assertIsNone(sot.resource_key) + self.assertEqual('nodes', sot.resources_key) + self.assertEqual('/nodes', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + def test_instantiate(self): + sot = node.Node(**FAKE) + + self.assertEqual(FAKE['uuid'], sot.id) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual( + FAKE['automated_clean'], sot.is_automated_clean_enabled + ) + self.assertEqual(FAKE['boot_mode'], sot.boot_mode) + self.assertEqual(FAKE['chassis_uuid'], sot.chassis_id) + self.assertEqual(FAKE['clean_step'], sot.clean_step) + self.assertEqual(FAKE['conductor_group'], sot.conductor_group) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['description'], sot.description) + self.assertEqual(FAKE['driver'], sot.driver) + self.assertEqual(FAKE['driver_info'], sot.driver_info) + self.assertEqual( + FAKE['driver_internal_info'], sot.driver_internal_info + ) + self.assertEqual(FAKE['extra'], sot.extra) + self.assertEqual(FAKE['firmware_interface'], sot.firmware_interface) + self.assertEqual(FAKE['instance_info'], sot.instance_info) + self.assertEqual(FAKE['instance_name'], sot.instance_name) + self.assertEqual(FAKE['instance_uuid'], sot.instance_id) + self.assertEqual(FAKE['console_enabled'], sot.is_console_enabled) + self.assertEqual(FAKE['maintenance'], sot.is_maintenance) + self.assertEqual(FAKE['last_error'], sot.last_error) + self.assertEqual(FAKE['lessee'], sot.lessee) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual(FAKE['maintenance_reason'], sot.maintenance_reason) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual(FAKE['network_interface'], sot.network_interface) + self.assertEqual(FAKE['owner'], sot.owner) + self.assertEqual(FAKE['parent_node'], sot.parent_node) + self.assertEqual(FAKE['ports'], sot.ports) + self.assertEqual(FAKE['portgroups'], sot.port_groups) + self.assertEqual(FAKE['power_state'], sot.power_state) + self.assertEqual(FAKE['properties'], sot.properties) + self.assertEqual(FAKE['provision_state'], sot.provision_state) + self.assertEqual(FAKE['raid_config'], sot.raid_config) + self.assertEqual(FAKE['reservation'], sot.reservation) + self.assertEqual(FAKE['resource_class'], sot.resource_class) + self.assertEqual(FAKE['service_step'], sot.service_step) + self.assertEqual(FAKE['secure_boot'], sot.is_secure_boot) + self.assertEqual(FAKE['runbook'], sot.runbook) + self.assertEqual(FAKE['states'], sot.states) + self.assertEqual( + FAKE['target_provision_state'], sot.target_provision_state + ) + self.assertEqual(FAKE['target_power_state'], sot.target_power_state) + self.assertEqual(FAKE['target_raid_config'], sot.target_raid_config) + self.assertEqual(FAKE['updated_at'], sot.updated_at) + + def test_normalize_provision_state(self): + attrs = dict(FAKE, provision_state=None) + sot = node.Node(**attrs) + self.assertEqual('available', sot.provision_state) + + @mock.patch.object(node.Node, '_assert_microversion_for', _fake_assert) + @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) + def test_list(self): + self.node = node.Node() + self.session = mock.Mock( + spec=adapter.Adapter, default_microversion=None + ) + # Set a default, so we don't try and figure out the microversions + # with additional requests. + self.session.default_microversion = float(self.node._max_microversion) + self.session.get.return_value.json.return_value = {'nodes': []} + + result = list( + self.node.list( + self.session, + details=False, + shard='meow', + allow_unknown_params=True, + ) + ) + self.assertEqual(0, len(result)) + self.session.get.assert_called_once_with( + '/nodes', + headers={'Accept': 'application/json'}, + params={'shard': 'meow'}, + microversion=float(self.node._max_microversion), + ) + + +@mock.patch('time.sleep', lambda _t: None) +@mock.patch.object(node.Node, 'fetch', autospec=True) +class TestNodeWaitForProvisionState(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock() + + def test_success(self, mock_fetch): + def _get_side_effect(_self, session): + self.node.provision_state = 'manageable' + self.assertIs(session, self.session) + + mock_fetch.side_effect = _get_side_effect + + node = self.node.wait_for_provision_state(self.session, 'manageable') + self.assertIs(node, self.node) + + def test_failure(self, mock_fetch): + def _get_side_effect(_self, session): + self.node.provision_state = 'deploy failed' + self.assertIs(session, self.session) + + mock_fetch.side_effect = _get_side_effect + + self.assertRaisesRegex( + exceptions.ResourceFailure, + "failure state 'deploy failed'", + self.node.wait_for_provision_state, + self.session, + 'manageable', + ) + + def test_failure_error(self, mock_fetch): + def _get_side_effect(_self, session): + self.node.provision_state = 'error' + self.assertIs(session, self.session) + + mock_fetch.side_effect = _get_side_effect + + self.assertRaisesRegex( + exceptions.ResourceFailure, + "failure state 'error'", + self.node.wait_for_provision_state, + self.session, + 'manageable', + ) + + def test_enroll_as_failure(self, mock_fetch): + def _get_side_effect(_self, session): + self.node.provision_state = 'enroll' + self.node.last_error = 'power failure' + self.assertIs(session, self.session) + + mock_fetch.side_effect = _get_side_effect + + self.assertRaisesRegex( + exceptions.ResourceFailure, + 'failed to verify management credentials', + self.node.wait_for_provision_state, + self.session, + 'manageable', + ) + + def test_timeout(self, mock_fetch): + self.assertRaises( + exceptions.ResourceTimeout, + self.node.wait_for_provision_state, + self.session, + 'manageable', + timeout=0.001, + ) + + def test_not_abort_on_failed_state(self, mock_fetch): + def _get_side_effect(_self, session): + self.node.provision_state = 'deploy failed' + self.assertIs(session, self.session) + + mock_fetch.side_effect = _get_side_effect + + self.assertRaises( + exceptions.ResourceTimeout, + self.node.wait_for_provision_state, + self.session, + 'manageable', + timeout=0.001, + abort_on_failed_state=False, + ) + + +@mock.patch.object(node.Node, '_assert_microversion_for', _fake_assert) +@mock.patch.object(node.Node, 'fetch', lambda self, session: self) +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +class TestNodeSetProvisionState(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock( + spec=adapter.Adapter, default_microversion=None + ) + + def test_no_arguments(self): + result = self.node.set_provision_state(self.session, 'active') + self.assertIs(result, self.node) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/provision', + json={'target': 'active'}, + headers=mock.ANY, + microversion=None, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_manage(self): + result = self.node.set_provision_state(self.session, 'manage') + self.assertIs(result, self.node) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/provision', + json={'target': 'manage'}, + headers=mock.ANY, + microversion='1.4', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_deploy_with_configdrive(self): + result = self.node.set_provision_state( + self.session, 'active', config_drive='abcd' + ) + self.assertIs(result, self.node) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/provision', + json={'target': 'active', 'configdrive': 'abcd'}, + headers=mock.ANY, + microversion=None, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_deploy_with_configdrive_as_bytestring(self): + config_drive = base64.b64encode(b'foo') + result = self.node.set_provision_state( + self.session, 'active', config_drive=config_drive + ) + self.assertIs(result, self.node) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/provision', + json={'target': 'active', 'configdrive': config_drive.decode()}, + headers=mock.ANY, + microversion=None, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_rebuild_with_configdrive(self): + result = self.node.set_provision_state( + self.session, 'rebuild', config_drive='abcd' + ) + self.assertIs(result, self.node) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/provision', + json={'target': 'rebuild', 'configdrive': 'abcd'}, + headers=mock.ANY, + microversion='1.35', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_configdrive_as_dict(self): + for target in ('rebuild', 'active'): + self.session.put.reset_mock() + result = self.node.set_provision_state( + self.session, target, config_drive={'user_data': 'abcd'} + ) + self.assertIs(result, self.node) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/provision', + json={'target': target, 'configdrive': {'user_data': 'abcd'}}, + headers=mock.ANY, + microversion='1.56', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_deploy_with_deploy_steps(self): + deploy_steps = [{'interface': 'deploy', 'step': 'upgrade_fw'}] + result = self.node.set_provision_state( + self.session, 'active', deploy_steps=deploy_steps + ) + + self.assertIs(result, self.node) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/provision', + json={'target': 'active', 'deploy_steps': deploy_steps}, + headers=mock.ANY, + microversion='1.69', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_rebuild_with_deploy_steps(self): + deploy_steps = [{'interface': 'deploy', 'step': 'upgrade_fw'}] + result = self.node.set_provision_state( + self.session, 'rebuild', deploy_steps=deploy_steps + ) + + self.assertIs(result, self.node) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/provision', + json={'target': 'rebuild', 'deploy_steps': deploy_steps}, + headers=mock.ANY, + microversion='1.69', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_set_provision_state_unhold(self): + result = self.node.set_provision_state(self.session, 'unhold') + + self.assertIs(result, self.node) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/provision', + json={'target': 'unhold'}, + headers=mock.ANY, + microversion='1.85', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_set_provision_state_service(self): + service_steps = [{'interface': 'deploy', 'step': 'hold'}] + result = self.node.set_provision_state( + self.session, 'service', service_steps=service_steps + ) + + self.assertIs(result, self.node) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/provision', + json={'target': 'service', 'service_steps': service_steps}, + headers=mock.ANY, + microversion='1.87', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_set_provision_state_clean_runbook(self): + runbook = 'CUSTOM_AWESOME' + result = self.node.set_provision_state( + self.session, 'clean', runbook=runbook + ) + + self.assertIs(result, self.node) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/provision', + json={'target': 'clean', 'runbook': runbook}, + headers=mock.ANY, + microversion='1.92', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_set_provision_state_service_runbook(self): + runbook = 'CUSTOM_AWESOME' + result = self.node.set_provision_state( + self.session, 'service', runbook=runbook + ) + + self.assertIs(result, self.node) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/provision', + json={'target': 'service', 'runbook': runbook}, + headers=mock.ANY, + microversion='1.92', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + +@mock.patch.object(node.Node, '_translate_response', mock.Mock()) +@mock.patch.object(node.Node, '_get_session', lambda self, x: x) +@mock.patch.object(node.Node, 'set_provision_state', autospec=True) +class TestNodeCreate(base.TestCase): + def setUp(self): + super().setUp() + self.new_state = None + self.session = mock.Mock(spec=adapter.Adapter) + self.session.default_microversion = '1.1' + self.node = node.Node(driver=FAKE['driver']) + + def _change_state(*args, **kwargs): + self.node.provision_state = self.new_state + + self.session.post.side_effect = _change_state + + def test_available_old_version(self, mock_prov): + self.node.provision_state = 'available' + result = self.node.create(self.session) + self.assertIs(result, self.node) + self.session.post.assert_called_once_with( + mock.ANY, + json={'driver': FAKE['driver']}, + headers=mock.ANY, + microversion=self.session.default_microversion, + params={}, + ) + self.assertFalse(mock_prov.called) + + def test_available_new_version(self, mock_prov): + self.session.default_microversion = '1.11' + self.node.provision_state = 'available' + + result = self.node.create(self.session) + self.assertIs(result, self.node) + self.session.post.assert_called_once_with( + mock.ANY, + json={'driver': FAKE['driver']}, + headers=mock.ANY, + microversion='1.10', + params={}, + ) + mock_prov.assert_not_called() + + def test_no_enroll_in_old_version(self, mock_prov): + self.node.provision_state = 'enroll' + self.assertRaises( + exceptions.NotSupported, self.node.create, self.session + ) + self.assertFalse(self.session.post.called) + self.assertFalse(mock_prov.called) + + def test_enroll_new_version(self, mock_prov): + self.session.default_microversion = '1.11' + self.node.provision_state = 'enroll' + self.new_state = 'enroll' + + result = self.node.create(self.session) + self.assertIs(result, self.node) + self.session.post.assert_called_once_with( + mock.ANY, + json={'driver': FAKE['driver']}, + headers=mock.ANY, + microversion=self.session.default_microversion, + params={}, + ) + self.assertFalse(mock_prov.called) + + def test_no_manageable_in_old_version(self, mock_prov): + self.node.provision_state = 'manageable' + self.assertRaises( + exceptions.NotSupported, self.node.create, self.session + ) + self.assertFalse(self.session.post.called) + self.assertFalse(mock_prov.called) + + def test_manageable_old_version(self, mock_prov): + self.session.default_microversion = '1.4' + self.node.provision_state = 'manageable' + self.new_state = 'available' + + result = self.node.create(self.session) + self.assertIs(result, self.node) + self.session.post.assert_called_once_with( + mock.ANY, + json={'driver': FAKE['driver']}, + headers=mock.ANY, + microversion=self.session.default_microversion, + params={}, + ) + mock_prov.assert_called_once_with( + self.node, self.session, 'manage', wait=True + ) + + def test_manageable_new_version(self, mock_prov): + self.session.default_microversion = '1.11' + self.node.provision_state = 'manageable' + self.new_state = 'enroll' + + result = self.node.create(self.session) + self.assertIs(result, self.node) + self.session.post.assert_called_once_with( + mock.ANY, + json={'driver': FAKE['driver']}, + headers=mock.ANY, + microversion=self.session.default_microversion, + params={}, + ) + mock_prov.assert_called_once_with( + self.node, self.session, 'manage', wait=True + ) + + +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +@mock.patch.object(node.Node, '_get_session', lambda self, x: x) +class TestNodeVif(base.TestCase): + def setUp(self): + super().setUp() + self.session = mock.Mock(spec=adapter.Adapter) + self.session.default_microversion = '1.67' + self.session.log = mock.Mock() + self.node = node.Node( + id='c29db401-b6a7-4530-af8e-20a720dee946', driver=FAKE['driver'] + ) + self.vif_id = '714bdf6d-2386-4b5e-bd0d-bc036f04b1ef' + self.vif_port_uuid = 'port-uuid' + self.vif_portgroup_uuid = 'portgroup-uuid' + + def test_attach_vif(self): + self.assertIsNone(self.node.attach_vif(self.session, self.vif_id)) + self.session.post.assert_called_once_with( + f'nodes/{self.node.id}/vifs', + json={'id': self.vif_id}, + headers=mock.ANY, + microversion='1.67', + retriable_status_codes=[409, 503], + ) + + def test_attach_vif_no_retries(self): + self.assertIsNone( + self.node.attach_vif( + self.session, self.vif_id, retry_on_conflict=False + ) + ) + self.session.post.assert_called_once_with( + f'nodes/{self.node.id}/vifs', + json={'id': self.vif_id}, + headers=mock.ANY, + microversion='1.67', + retriable_status_codes=[503], + ) + + def test_attach_vif_with_port_uuid(self): + self.assertIsNone( + self.node.attach_vif( + self.session, self.vif_id, port_id=self.vif_port_uuid + ) + ) + self.session.post.assert_called_once_with( + f'nodes/{self.node.id}/vifs', + json={'id': self.vif_id, 'port_uuid': self.vif_port_uuid}, + headers=mock.ANY, + microversion='1.67', + retriable_status_codes=[409, 503], + ) + + def test_attach_vif_with_portgroup_uuid(self): + self.assertIsNone( + self.node.attach_vif( + self.session, + self.vif_id, + port_group_id=self.vif_portgroup_uuid, + ) + ) + self.session.post.assert_called_once_with( + f'nodes/{self.node.id}/vifs', + json={ + 'id': self.vif_id, + 'portgroup_uuid': self.vif_portgroup_uuid, + }, + headers=mock.ANY, + microversion='1.67', + retriable_status_codes=[409, 503], + ) + + def test_attach_vif_with_port_uuid_and_portgroup_uuid(self): + self.assertRaises( + exceptions.InvalidRequest, + self.node.attach_vif, + self.session, + self.vif_id, + port_id=self.vif_port_uuid, + port_group_id=self.vif_portgroup_uuid, + ) + + def test_detach_vif_existing(self): + self.assertTrue(self.node.detach_vif(self.session, self.vif_id)) + self.session.delete.assert_called_once_with( + f'nodes/{self.node.id}/vifs/{self.vif_id}', + headers=mock.ANY, + microversion='1.67', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_detach_vif_missing(self): + self.session.delete.return_value.status_code = 400 + self.assertFalse(self.node.detach_vif(self.session, self.vif_id)) + self.session.delete.assert_called_once_with( + f'nodes/{self.node.id}/vifs/{self.vif_id}', + headers=mock.ANY, + microversion='1.67', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_list_vifs(self): + self.session.get.return_value.json.return_value = { + 'vifs': [ + {'id': '1234'}, + {'id': '5678'}, + ] + } + res = self.node.list_vifs(self.session) + self.assertEqual(['1234', '5678'], res) + self.session.get.assert_called_once_with( + f'nodes/{self.node.id}/vifs', + headers=mock.ANY, + microversion='1.67', + ) + + def test_incompatible_microversion(self): + self.session.default_microversion = '1.1' + self.assertRaises( + exceptions.NotSupported, + self.node.attach_vif, + self.session, + self.vif_id, + ) + self.assertRaises( + exceptions.NotSupported, + self.node.detach_vif, + self.session, + self.vif_id, + ) + self.assertRaises( + exceptions.NotSupported, self.node.list_vifs, self.session + ) + + def test_incompatible_microversion_optional_params(self): + self.session.default_microversion = '1.28' + self.assertRaises( + exceptions.NotSupported, + self.node.attach_vif, + self.session, + self.vif_id, + port_id=self.vif_port_uuid, + ) + self.assertRaises( + exceptions.NotSupported, + self.node.attach_vif, + self.session, + self.vif_id, + port_group_id=self.vif_portgroup_uuid, + ) + + +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +@mock.patch.object(node.Node, '_get_session', lambda self, x: x) +class TestNodeVmedia(base.TestCase): + def setUp(self): + super().setUp() + self.session = mock.Mock(spec=adapter.Adapter) + self.session.default_microversion = '1.89' + self.session.log = mock.Mock() + self.node = node.Node( + id='c29db401-b6a7-4530-af8e-20a720dee946', driver=FAKE['driver'] + ) + self.device_type = "CDROM" + self.image_url = "http://image" + + def test_attach_vmedia(self): + self.assertIsNone( + self.node.attach_vmedia( + self.session, self.device_type, self.image_url + ) + ) + self.session.post.assert_called_once_with( + f'nodes/{self.node.id}/vmedia', + json={ + 'device_type': self.device_type, + 'image_url': self.image_url, + }, + headers=mock.ANY, + microversion='1.89', + retriable_status_codes=[409, 503], + ) + + def test_attach_vmedia_no_retries(self): + self.assertIsNone( + self.node.attach_vmedia( + self.session, + self.device_type, + self.image_url, + retry_on_conflict=False, + ) + ) + self.session.post.assert_called_once_with( + f'nodes/{self.node.id}/vmedia', + json={ + 'device_type': self.device_type, + 'image_url': self.image_url, + }, + headers=mock.ANY, + microversion='1.89', + retriable_status_codes=[503], + ) + + def test_detach_vmedia_existing(self): + self.assertIsNone(self.node.detach_vmedia(self.session)) + self.session.delete.assert_called_once_with( + f'nodes/{self.node.id}/vmedia', + headers=mock.ANY, + microversion='1.89', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_detach_vmedia_missing(self): + self.session.delete.return_value.status_code = 400 + self.assertIsNone(self.node.detach_vmedia(self.session)) + self.session.delete.assert_called_once_with( + f'nodes/{self.node.id}/vmedia', + headers=mock.ANY, + microversion='1.89', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_incompatible_microversion(self): + self.session.default_microversion = '1.1' + self.assertRaises( + exceptions.NotSupported, + self.node.attach_vmedia, + self.session, + self.device_type, + self.image_url, + ) + self.assertRaises( + exceptions.NotSupported, + self.node.detach_vmedia, + self.session, + ) + + +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +@mock.patch.object(node.Node, '_get_session', lambda self, x: x) +class TestNodeValidate(base.TestCase): + def setUp(self): + super().setUp() + self.session = mock.Mock(spec=adapter.Adapter) + self.session.default_microversion = '1.28' + self.node = node.Node(**FAKE) + + def test_validate_ok(self): + self.session.get.return_value.json.return_value = { + 'boot': {'result': True}, + 'console': {'result': False, 'reason': 'Not configured'}, + 'deploy': {'result': True}, + 'inspect': {'result': None, 'reason': 'Not supported'}, + 'power': {'result': True}, + } + result = self.node.validate(self.session) + for iface in ('boot', 'deploy', 'power'): + self.assertTrue(result[iface].result) + self.assertFalse(result[iface].reason) + for iface in ('console', 'inspect'): + self.assertIsNot(True, result[iface].result) + self.assertTrue(result[iface].reason) + + def test_validate_failed(self): + self.session.get.return_value.json.return_value = { + 'boot': {'result': False}, + 'console': {'result': False, 'reason': 'Not configured'}, + 'deploy': {'result': False, 'reason': 'No deploy for you'}, + 'inspect': {'result': None, 'reason': 'Not supported'}, + 'power': {'result': True}, + } + self.assertRaisesRegex( + exceptions.ValidationException, + 'No deploy for you', + self.node.validate, + self.session, + ) + + def test_validate_no_failure(self): + self.session.get.return_value.json.return_value = { + 'boot': {'result': False}, + 'console': {'result': False, 'reason': 'Not configured'}, + 'deploy': {'result': False, 'reason': 'No deploy for you'}, + 'inspect': {'result': None, 'reason': 'Not supported'}, + 'power': {'result': True}, + } + result = self.node.validate(self.session, required=None) + self.assertTrue(result['power'].result) + self.assertFalse(result['power'].reason) + for iface in ('deploy', 'console', 'inspect'): + self.assertIsNot(True, result[iface].result) + self.assertTrue(result[iface].reason) + # Reason can be empty + self.assertFalse(result['boot'].result) + self.assertIsNone(result['boot'].reason) + + +@mock.patch('time.sleep', lambda _t: None) +@mock.patch.object(node.Node, 'fetch', autospec=True) +class TestNodeWaitForReservation(base.TestCase): + def setUp(self): + super().setUp() + self.session = mock.Mock(spec=adapter.Adapter) + self.session.default_microversion = '1.6' + self.session.log = mock.Mock() + self.node = node.Node(**FAKE) + + def test_no_reservation(self, mock_fetch): + self.node.reservation = None + node = self.node.wait_for_reservation(None) + self.assertIs(node, self.node) + self.assertFalse(mock_fetch.called) + + def test_reservation(self, mock_fetch): + self.node.reservation = 'example.com' + + def _side_effect(node, session): + if self.node.reservation == 'example.com': + self.node.reservation = 'example2.com' + else: + self.node.reservation = None + + mock_fetch.side_effect = _side_effect + node = self.node.wait_for_reservation(self.session) + self.assertIs(node, self.node) + self.assertEqual(2, mock_fetch.call_count) + + def test_timeout(self, mock_fetch): + self.node.reservation = 'example.com' + + self.assertRaises( + exceptions.ResourceTimeout, + self.node.wait_for_reservation, + self.session, + timeout=0.001, + ) + mock_fetch.assert_called_with(self.node, self.session) + + +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +class TestNodeInjectNMI(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock(spec=adapter.Adapter) + self.session.default_microversion = '1.29' + self.node = node.Node(**FAKE) + + def test_inject_nmi(self): + self.node.inject_nmi(self.session) + self.session.put.assert_called_once_with( + 'nodes/{}/management/inject_nmi'.format(FAKE['uuid']), + json={}, + headers=mock.ANY, + microversion='1.29', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_incompatible_microversion(self): + self.session.default_microversion = '1.28' + self.assertRaises( + exceptions.NotSupported, + self.node.inject_nmi, + self.session, + ) + + +@mock.patch.object(node.Node, '_assert_microversion_for', _fake_assert) +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +class TestNodeSetPowerState(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock( + spec=adapter.Adapter, default_microversion=None + ) + + def test_power_on(self): + self.node.set_power_state(self.session, 'power on') + self.session.put.assert_called_once_with( + 'nodes/{}/states/power'.format(FAKE['uuid']), + json={'target': 'power on'}, + headers=mock.ANY, + microversion=None, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_soft_power_on(self): + self.node.set_power_state(self.session, 'soft power off') + self.session.put.assert_called_once_with( + 'nodes/{}/states/power'.format(FAKE['uuid']), + json={'target': 'soft power off'}, + headers=mock.ANY, + microversion='1.27', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +@mock.patch.object(node.Node, '_translate_response', mock.Mock()) +@mock.patch.object(node.Node, '_get_session', lambda self, x: x) +class TestNodeMaintenance(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node.existing(**FAKE) + self.session = mock.Mock( + spec=adapter.Adapter, + default_microversion='1.1', + retriable_status_codes=None, + ) + + def test_set(self): + self.node.set_maintenance(self.session) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/maintenance', + json={'reason': None}, + headers=mock.ANY, + microversion=mock.ANY, + ) + + def test_set_with_reason(self): + self.node.set_maintenance(self.session, 'No work on Monday') + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/maintenance', + json={'reason': 'No work on Monday'}, + headers=mock.ANY, + microversion=mock.ANY, + ) + + def test_unset(self): + self.node.unset_maintenance(self.session) + self.session.delete.assert_called_once_with( + f'nodes/{self.node.id}/maintenance', + json=None, + headers=mock.ANY, + microversion=mock.ANY, + ) + + def test_set_via_update(self): + self.node.is_maintenance = True + self.node.commit(self.session) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/maintenance', + json={'reason': None}, + headers=mock.ANY, + microversion=mock.ANY, + ) + + self.assertFalse(self.session.patch.called) + + def test_set_with_reason_via_update(self): + self.node.is_maintenance = True + self.node.maintenance_reason = 'No work on Monday' + self.node.commit(self.session) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/maintenance', + json={'reason': 'No work on Monday'}, + headers=mock.ANY, + microversion=mock.ANY, + ) + self.assertFalse(self.session.patch.called) + + def test_set_with_other_fields(self): + self.node.is_maintenance = True + self.node.name = 'lazy-3000' + self.node.commit(self.session) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/maintenance', + json={'reason': None}, + headers=mock.ANY, + microversion=mock.ANY, + ) + + self.session.patch.assert_called_once_with( + f'nodes/{self.node.id}', + json=[{'path': '/name', 'op': 'replace', 'value': 'lazy-3000'}], + headers=mock.ANY, + microversion=mock.ANY, + ) + + def test_set_with_reason_and_other_fields(self): + self.node.is_maintenance = True + self.node.maintenance_reason = 'No work on Monday' + self.node.name = 'lazy-3000' + self.node.commit(self.session) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/maintenance', + json={'reason': 'No work on Monday'}, + headers=mock.ANY, + microversion=mock.ANY, + ) + + self.session.patch.assert_called_once_with( + f'nodes/{self.node.id}', + json=[{'path': '/name', 'op': 'replace', 'value': 'lazy-3000'}], + headers=mock.ANY, + microversion=mock.ANY, + ) + + def test_no_reason_without_maintenance(self): + self.node.maintenance_reason = 'Can I?' + self.assertRaises(ValueError, self.node.commit, self.session) + self.assertFalse(self.session.put.called) + self.assertFalse(self.session.patch.called) + + def test_set_unset_maintenance(self): + self.node.is_maintenance = True + self.node.maintenance_reason = 'No work on Monday' + self.node.commit(self.session) + + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/maintenance', + json={'reason': 'No work on Monday'}, + headers=mock.ANY, + microversion=mock.ANY, + ) + + self.node.is_maintenance = False + self.node.commit(self.session) + self.assertIsNone(self.node.maintenance_reason) + + self.session.delete.assert_called_once_with( + f'nodes/{self.node.id}/maintenance', + json=None, + headers=mock.ANY, + microversion=mock.ANY, + ) + + +@mock.patch.object(node.Node, 'fetch', lambda self, session: self) +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +class TestNodeBootDevice(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock( + spec=adapter.Adapter, default_microversion='1.1' + ) + + def test_get_boot_device(self): + self.node.get_boot_device(self.session) + self.session.get.assert_called_once_with( + f'nodes/{self.node.id}/management/boot_device', + headers=mock.ANY, + microversion=mock.ANY, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_set_boot_device(self): + self.node.set_boot_device(self.session, 'pxe', persistent=False) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/management/boot_device', + json={'boot_device': 'pxe', 'persistent': False}, + headers=mock.ANY, + microversion=mock.ANY, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_get_supported_boot_devices(self): + self.node.get_supported_boot_devices(self.session) + self.session.get.assert_called_once_with( + f'nodes/{self.node.id}/management/boot_device/supported', + headers=mock.ANY, + microversion=mock.ANY, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + +@mock.patch.object(utils, 'pick_microversion', lambda session, v: v) +@mock.patch.object(node.Node, 'fetch', lambda self, session: self) +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +class TestNodeSetBootMode(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock( + spec=adapter.Adapter, default_microversion='1.1' + ) + + def test_node_set_boot_mode(self): + self.node.set_boot_mode(self.session, 'uefi') + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/boot_mode', + json={'target': 'uefi'}, + headers=mock.ANY, + microversion=mock.ANY, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_node_set_boot_mode_invalid_mode(self): + self.assertRaises( + ValueError, self.node.set_boot_mode, self.session, 'invalid-efi' + ) + + +@mock.patch.object(utils, 'pick_microversion', lambda session, v: v) +@mock.patch.object(node.Node, 'fetch', lambda self, session: self) +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +class TestNodeSetSecureBoot(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock( + spec=adapter.Adapter, default_microversion='1.1' + ) + + def test_node_set_secure_boot(self): + self.node.set_secure_boot(self.session, True) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/secure_boot', + json={'target': True}, + headers=mock.ANY, + microversion=mock.ANY, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_node_set_secure_boot_invalid_none(self): + self.assertRaises( + ValueError, self.node.set_secure_boot, self.session, None + ) + + +@mock.patch.object(utils, 'pick_microversion', lambda session, v: v) +@mock.patch.object(node.Node, 'fetch', lambda self, session: self) +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +class TestNodeTraits(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock( + spec=adapter.Adapter, default_microversion='1.37' + ) + self.session.log = mock.Mock() + + def test_node_add_trait(self): + self.node.add_trait(self.session, 'CUSTOM_FAKE') + self.session.put.assert_called_once_with( + 'nodes/{}/traits/{}'.format(self.node.id, 'CUSTOM_FAKE'), + json=None, + headers=mock.ANY, + microversion='1.37', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_remove_trait(self): + self.assertTrue(self.node.remove_trait(self.session, 'CUSTOM_FAKE')) + self.session.delete.assert_called_once_with( + 'nodes/{}/traits/{}'.format(self.node.id, 'CUSTOM_FAKE'), + headers=mock.ANY, + microversion='1.37', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_remove_trait_missing(self): + self.session.delete.return_value.status_code = 400 + self.assertFalse( + self.node.remove_trait(self.session, 'CUSTOM_MISSING') + ) + self.session.delete.assert_called_once_with( + 'nodes/{}/traits/{}'.format(self.node.id, 'CUSTOM_MISSING'), + headers=mock.ANY, + microversion='1.37', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_set_traits(self): + traits = ['CUSTOM_FAKE', 'CUSTOM_REAL', 'CUSTOM_MISSING'] + self.node.set_traits(self.session, traits) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/traits', + json={'traits': ['CUSTOM_FAKE', 'CUSTOM_REAL', 'CUSTOM_MISSING']}, + headers=mock.ANY, + microversion='1.37', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + +@mock.patch.object(node.Node, '_assert_microversion_for', _fake_assert) +@mock.patch.object(resource.Resource, 'patch', autospec=True) +class TestNodePatch(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock( + spec=adapter.Adapter, + default_microversion='1.1', + retriable_status_codes=None, + ) + self.session.log = mock.Mock() + + def test_node_patch(self, mock_patch): + patch = {'path': 'test'} + self.node.patch(self.session, patch=patch) + mock_patch.assert_called_once() + kwargs = mock_patch.call_args[1] + self.assertEqual(kwargs['patch'], {'path': 'test'}) + + @mock.patch.object(resource.Resource, '_prepare_request', autospec=True) + @mock.patch.object(resource.Resource, '_commit', autospec=True) + def test_node_patch_reset_interfaces( + self, mock__commit, mock_prepreq, mock_patch + ): + patch = {'path': 'test'} + self.node.patch( + self.session, + patch=patch, + retry_on_conflict=True, + reset_interfaces=True, + ) + mock_prepreq.assert_called_once() + prepreq_kwargs = mock_prepreq.call_args[1] + self.assertEqual( + prepreq_kwargs['params'], [('reset_interfaces', True)] + ) + mock__commit.assert_called_once() + commit_args = mock__commit.call_args[0] + commit_kwargs = mock__commit.call_args[1] + self.assertIn('1.45', commit_args) + self.assertEqual(commit_kwargs['retry_on_conflict'], True) + mock_patch.assert_not_called() + + @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) + @mock.patch.object(node.Node, '_get_session', lambda self, x: x) + def test_original_body_sync_after_commit(self, mock_patch): + """Test _original_body synchronization after commit().""" + + self.node.name = 'server-1' + self.node.description = 'initial-desc' + + mock_response1 = mock.Mock() + mock_response1.status_code = 200 + mock_response1.headers = {} + mock_response1.json.return_value = { + 'uuid': FAKE['uuid'], + 'name': 'server-1', + 'driver': FAKE['driver'], + } + self.session.patch.return_value = mock_response1 + + self.node.commit(self.session) + + self.assertEqual(self.node._original_body.get('name'), 'server-1') + self.assertEqual(self.node._body.attributes.get('name'), 'server-1') + + # NOTE(cid): _original_body should have description even though + # it wasn't in the response, because it exists in _body.attributes + self.assertEqual( + self.node._original_body.get('description'), + self.node._body.attributes.get('description'), + '_original_body is not in sync with _body.attributes', + ) + + self.node.description = 'updated-desc' + patch = self.node._prepare_request_body(patch=True, prepend_key=False) + + # Verify patch only contains description change + patch_paths = [op.get('path') for op in patch] + self.assertIn( + '/description', + patch_paths, + 'Patch should include description change', + ) + self.assertNotIn( + '/name', + patch_paths, + 'Patch should NOT include name (already committed)', + ) + + # Verify only one operation in patch + self.assertEqual( + len(patch), 1, f'Patch should only have description, got: {patch}' + ) + self.assertEqual(patch[0]['path'], '/description') + self.assertEqual(patch[0]['op'], 'replace') + self.assertEqual(patch[0]['value'], 'updated-desc') + + +@mock.patch('time.sleep', lambda _t: None) +@mock.patch.object(node.Node, 'fetch', autospec=True) +class TestNodeWaitForPowerState(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock() + + def test_success(self, mock_fetch): + self.node.power_state = 'power on' + + def _get_side_effect(_self, session): + self.node.power_state = 'power off' + self.assertIs(session, self.session) + + mock_fetch.side_effect = _get_side_effect + + node = self.node.wait_for_power_state(self.session, 'power off') + self.assertIs(node, self.node) + + def test_timeout(self, mock_fetch): + self.node.power_state = 'power on' + self.assertRaises( + exceptions.ResourceTimeout, + self.node.wait_for_power_state, + self.session, + 'power off', + timeout=0.001, + ) + + +@mock.patch.object(utils, 'pick_microversion', lambda session, v: v) +@mock.patch.object(node.Node, 'fetch', lambda self, session: self) +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +class TestNodePassthru: + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = node.Mock( + spec=adapter.Adapter, default_microversion='1.37' + ) + self.session.log = mock.Mock() + + def test_get_passthru(self): + self.node.call_vendor_passthru(self.session, "GET", "test_method") + self.session.get.assert_called_once_with( + f'nodes/{self.node.id}/vendor_passthru?method=test_method', + headers=mock.ANY, + microversion='1.37', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_post_passthru(self): + self.node.call_vendor_passthru(self.session, "POST", "test_method") + self.session.post.assert_called_once_with( + f'nodes/{self.node.id}/vendor_passthru?method=test_method', + headers=mock.ANY, + microversion='1.37', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_put_passthru(self): + self.node.call_vendor_passthru(self.session, "PUT", "test_method") + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/vendor_passthru?method=test_method', + headers=mock.ANY, + microversion='1.37', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_delete_passthru(self): + self.node.call_vendor_passthru(self.session, "DELETE", "test_method") + self.session.delete.assert_called_once_with( + f'nodes/{self.node.id}/vendor_passthru?method=test_method', + headers=mock.ANY, + microversion='1.37', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_list_passthru(self): + self.node.list_vendor_passthru(self.session) + self.session.get.assert_called_once_with( + f'nodes/{self.node.id}/vendor_passthru/methods', + headers=mock.ANY, + microversion='1.37', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + +@mock.patch.object(node.Node, 'fetch', lambda self, session: self) +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +class TestNodeConsole(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock( + spec=adapter.Adapter, + default_microversion='1.1', + ) + + def test_get_console(self): + self.node.get_console(self.session) + self.session.get.assert_called_once_with( + f'nodes/{self.node.id}/states/console', + headers=mock.ANY, + microversion=mock.ANY, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_set_console_mode(self): + self.node.set_console_mode(self.session, True) + self.session.put.assert_called_once_with( + f'nodes/{self.node.id}/states/console', + json={'enabled': True}, + headers=mock.ANY, + microversion=mock.ANY, + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + def test_set_console_mode_invalid_enabled(self): + self.assertRaises( + ValueError, + self.node.set_console_mode, + self.session, + 'true', # not a bool + ) + + +@mock.patch.object(node.Node, 'fetch', lambda self, session: self) +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +class TestNodeInventory(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock( + spec=adapter.Adapter, + default_microversion='1.81', + ) + + def test_get_inventory(self): + node_inventory = { + 'inventory': { + 'memory': {'physical_mb': 3072}, + 'cpu': { + 'count': 1, + 'model_name': 'qemu64', + 'architecture': 'x86_64', + }, + 'disks': [{'name': 'testvm1.qcow2', 'size': 11811160064}], + 'interfaces': [{'mac_address': '52:54:00:c7:02:45'}], + 'system_vendor': { + 'product_name': 'testvm1', + 'manufacturer': 'Sushy Emulator', + }, + 'boot': {'current_boot_mode': 'uefi'}, + }, + 'plugin_data': {'fake_plugin_data'}, + } + self.session.get.return_value.json.return_value = node_inventory + + res = self.node.get_node_inventory(self.session, self.node.id) + self.assertEqual(node_inventory, res) + + self.session.get.assert_called_once_with( + f'nodes/{self.node.id}/inventory', + headers=mock.ANY, + microversion='1.81', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) + + +@mock.patch.object(node.Node, 'fetch', lambda self, session: self) +@mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) +class TestNodeFirmware(base.TestCase): + def setUp(self): + super().setUp() + self.node = node.Node(**FAKE) + self.session = mock.Mock( + spec=adapter.Adapter, + default_microversion='1.86', + ) + + def test_list_firmware(self): + node_firmware = { + "firmware": [ + { + "created_at": "2016-08-18T22:28:49.653974+00:00", + "updated_at": "2016-08-18T22:28:49.653974+00:00", + "component": "BMC", + "initial_version": "v1.0.0", + "current_version": "v1.2.0", + "last_version_flashed": "v1.2.0", + }, + { + "created_at": "2016-08-18T22:28:49.653974+00:00", + "updated_at": "2016-08-18T22:28:49.653974+00:00", + "component": "BIOS", + "initial_version": "v1.0.0", + "current_version": "v1.1.5", + "last_version_flashed": "v1.1.5", + }, + ] + } + self.session.get.return_value.json.return_value = node_firmware + + res = self.node.list_firmware(self.session) + self.assertEqual(node_firmware, res) + + self.session.get.assert_called_once_with( + f'nodes/{self.node.id}/firmware', + headers=mock.ANY, + microversion='1.86', + retriable_status_codes=_common.RETRIABLE_STATUS_CODES, + ) diff --git a/openstack/tests/unit/baremetal/v1/test_port.py b/openstack/tests/unit/baremetal/v1/test_port.py new file mode 100644 index 0000000000..3bb24dd48d --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_port.py @@ -0,0 +1,101 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.baremetal.v1 import port +from openstack.tests.unit import base + + +FAKE = { + "address": "11:11:11:11:11:11", + "created_at": "2016-08-18T22:28:49.946416+00:00", + "description": "Physical network", + "extra": {}, + "internal_info": {}, + "is_smartnic": True, + "links": [ + {"href": "http://127.0.0.1:6385/v1/ports/", "rel": "self"}, + {"href": "http://127.0.0.1:6385/ports/", "rel": "bookmark"}, + ], + "local_link_connection": { + "port_id": "Ethernet3/1", + "switch_id": "0a:1b:2c:3d:4e:5f", + "switch_info": "switch1", + }, + "name": "port_name", + "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", + "portgroup_uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", + "pxe_enabled": True, + "updated_at": None, + "uuid": "d2b30520-907d-46c8-bfee-c5586e6fb3a1", +} + + +class TestPort(base.TestCase): + def test_basic(self): + sot = port.Port() + self.assertIsNone(sot.resource_key) + self.assertEqual('ports', sot.resources_key) + self.assertEqual('/ports', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + def test_instantiate(self): + sot = port.PortDetail(**FAKE) + self.assertEqual(FAKE['uuid'], sot.id) + self.assertEqual(FAKE['address'], sot.address) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['extra'], sot.extra) + self.assertEqual(FAKE['internal_info'], sot.internal_info) + self.assertEqual(FAKE['is_smartnic'], sot.is_smartnic) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual( + FAKE['local_link_connection'], sot.local_link_connection + ) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual(FAKE['node_uuid'], sot.node_id) + self.assertEqual(FAKE['portgroup_uuid'], sot.port_group_id) + self.assertEqual(FAKE['pxe_enabled'], sot.is_pxe_enabled) + self.assertEqual(FAKE['updated_at'], sot.updated_at) + + def test_list_conductor_groups(self): + self.port = port.Port() + self.session = mock.Mock( + spec=adapter.Adapter, default_microversion=None + ) + + self.session.default_microversion = float(self.port._max_microversion) + self.session.get.return_value.status_code = 200 + self.session.get.return_value.json.return_value = {'ports': []} + + result = list( + self.port.list( + self.session, + details=False, + conductor_groups=['group1', 'group2'], + allow_unknown_params=True, + ) + ) + self.assertEqual(0, len(result)) + self.session.get.assert_called_once_with( + '/ports', + headers={'Accept': 'application/json'}, + params={'conductor_groups': ['group1', 'group2']}, + microversion=float(self.port._max_microversion), + ) diff --git a/openstack/tests/unit/baremetal/v1/test_port_group.py b/openstack/tests/unit/baremetal/v1/test_port_group.py new file mode 100644 index 0000000000..ed5329b4a5 --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_port_group.py @@ -0,0 +1,75 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import port_group +from openstack.tests.unit import base + + +FAKE = { + "address": "11:11:11:11:11:11", + "created_at": "2016-08-18T22:28:48.165105+00:00", + "extra": {}, + "internal_info": {}, + "links": [ + {"href": "http://127.0.0.1:6385/v1/portgroups/", "rel": "self"}, + { + "href": "http://127.0.0.1:6385/portgroups/", + "rel": "bookmark", + }, + ], + "name": "test_portgroup", + "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", + "ports": [ + { + "href": "http://127.0.0.1:6385/v1/portgroups//ports", + "rel": "self", + }, + { + "href": "http://127.0.0.1:6385/portgroups//ports", + "rel": "bookmark", + }, + ], + "standalone_ports_supported": True, + "updated_at": None, + "uuid": "e43c722c-248e-4c6e-8ce8-0d8ff129387a", +} + + +class TestPortGroup(base.TestCase): + def test_basic(self): + sot = port_group.PortGroup() + self.assertIsNone(sot.resource_key) + self.assertEqual('portgroups', sot.resources_key) + self.assertEqual('/portgroups', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + def test_instantiate(self): + sot = port_group.PortGroup(**FAKE) + self.assertEqual(FAKE['uuid'], sot.id) + self.assertEqual(FAKE['address'], sot.address) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['extra'], sot.extra) + self.assertEqual(FAKE['internal_info'], sot.internal_info) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual(FAKE['node_uuid'], sot.node_id) + self.assertEqual(FAKE['ports'], sot.ports) + self.assertEqual( + FAKE['standalone_ports_supported'], + sot.is_standalone_ports_supported, + ) + self.assertEqual(FAKE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/baremetal/v1/test_proxy.py b/openstack/tests/unit/baremetal/v1/test_proxy.py new file mode 100644 index 0000000000..0afad7ba77 --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_proxy.py @@ -0,0 +1,559 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.baremetal.v1 import _proxy +from openstack.baremetal.v1 import allocation +from openstack.baremetal.v1 import chassis +from openstack.baremetal.v1 import deploy_templates +from openstack.baremetal.v1 import driver +from openstack.baremetal.v1 import inspection_rules +from openstack.baremetal.v1 import node +from openstack.baremetal.v1 import port +from openstack.baremetal.v1 import port_group +from openstack.baremetal.v1 import volume_connector +from openstack.baremetal.v1 import volume_target +from openstack import exceptions +from openstack.tests.unit import base +from openstack.tests.unit import test_proxy_base + + +_MOCK_METHOD = 'openstack.baremetal.v1._proxy.Proxy._get_with_fields' + + +class TestBaremetalProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + +class TestDrivers(TestBaremetalProxy): + def test_drivers(self): + self.verify_list(self.proxy.drivers, driver.Driver) + + def test_get_driver(self): + self.verify_get(self.proxy.get_driver, driver.Driver) + + +class TestChassis(TestBaremetalProxy): + @mock.patch.object(chassis.Chassis, 'list') + def test_chassis_detailed(self, mock_list): + result = self.proxy.chassis(details=True, query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, details=True, query=1) + + @mock.patch.object(chassis.Chassis, 'list') + def test_chassis_not_detailed(self, mock_list): + result = self.proxy.chassis(query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, details=False, query=1) + + def test_create_chassis(self): + self.verify_create(self.proxy.create_chassis, chassis.Chassis) + + def test_find_chassis(self): + self.verify_find( + self.proxy.find_chassis, + chassis.Chassis, + expected_kwargs={'details': True}, + ) + + def test_get_chassis(self): + self.verify_get( + self.proxy.get_chassis, + chassis.Chassis, + mock_method=_MOCK_METHOD, + expected_kwargs={'fields': None}, + ) + + def test_update_chassis(self): + self.verify_update(self.proxy.update_chassis, chassis.Chassis) + + def test_delete_chassis(self): + self.verify_delete(self.proxy.delete_chassis, chassis.Chassis, False) + + def test_delete_chassis_ignore(self): + self.verify_delete(self.proxy.delete_chassis, chassis.Chassis, True) + + +class TestNode(TestBaremetalProxy): + @mock.patch.object(node.Node, 'list') + def test_nodes_detailed(self, mock_list): + result = self.proxy.nodes(details=True, query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, details=True, query=1) + + @mock.patch.object(node.Node, 'list') + def test_nodes_not_detailed(self, mock_list): + result = self.proxy.nodes(query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, details=False, query=1) + + @mock.patch.object(node.Node, 'list') + def test_nodes_sharded(self, mock_list): + kwargs = {"shard": 'meow', "query": 1} + result = self.proxy.nodes(fields=("uuid", "instance_uuid"), **kwargs) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with( + self.proxy, + details=False, + fields=('uuid', 'instance_uuid'), + shard='meow', + query=1, + ) + + def test_create_node(self): + self.verify_create(self.proxy.create_node, node.Node) + + def test_find_node(self): + self.verify_find( + self.proxy.find_node, + node.Node, + expected_kwargs={'details': True}, + ) + + def test_get_node(self): + self.verify_get( + self.proxy.get_node, + node.Node, + mock_method=_MOCK_METHOD, + expected_kwargs={'fields': None}, + ) + + @mock.patch.object(node.Node, 'commit', autospec=True) + def test_update_node(self, mock_commit): + self.proxy.update_node('uuid', instance_id='new value') + mock_commit.assert_called_once_with( + mock.ANY, self.proxy, retry_on_conflict=True + ) + self.assertEqual('new value', mock_commit.call_args[0][0].instance_id) + + @mock.patch.object(node.Node, 'commit', autospec=True) + def test_update_node_no_retries(self, mock_commit): + self.proxy.update_node( + 'uuid', instance_id='new value', retry_on_conflict=False + ) + mock_commit.assert_called_once_with( + mock.ANY, self.proxy, retry_on_conflict=False + ) + self.assertEqual('new value', mock_commit.call_args[0][0].instance_id) + + def test_delete_node(self): + self.verify_delete(self.proxy.delete_node, node.Node, False) + + def test_delete_node_ignore(self): + self.verify_delete(self.proxy.delete_node, node.Node, True) + + +class TestPort(TestBaremetalProxy): + @mock.patch.object(port.Port, 'list') + def test_ports_detailed(self, mock_list): + result = self.proxy.ports(details=True, query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, details=True, query=1) + + @mock.patch.object(port.Port, 'list') + def test_ports_not_detailed(self, mock_list): + result = self.proxy.ports(query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, details=False, query=1) + + def test_create_port(self): + self.verify_create(self.proxy.create_port, port.Port) + + def test_find_port(self): + self.verify_find( + self.proxy.find_port, + port.Port, + expected_kwargs={'details': True}, + ) + + def test_get_port(self): + self.verify_get( + self.proxy.get_port, + port.Port, + mock_method=_MOCK_METHOD, + expected_kwargs={'fields': None}, + ) + + def test_update_port(self): + self.verify_update(self.proxy.update_port, port.Port) + + def test_delete_port(self): + self.verify_delete(self.proxy.delete_port, port.Port, False) + + def test_delete_port_ignore(self): + self.verify_delete(self.proxy.delete_port, port.Port, True) + + +class TestPortGroups(TestBaremetalProxy): + @mock.patch.object(port_group.PortGroup, 'list') + def test_port_groups_detailed(self, mock_list): + result = self.proxy.port_groups(details=True, query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, details=True, query=1) + + @mock.patch.object(port_group.PortGroup, 'list') + def test_port_groups_not_detailed(self, mock_list): + result = self.proxy.port_groups(query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, details=False, query=1) + + def test_get_port_group(self): + self.verify_get( + self.proxy.get_port_group, + port_group.PortGroup, + mock_method=_MOCK_METHOD, + expected_kwargs={'fields': None}, + ) + + +class TestAllocation(TestBaremetalProxy): + def test_create_allocation(self): + self.verify_create(self.proxy.create_allocation, allocation.Allocation) + + def test_get_allocation(self): + self.verify_get( + self.proxy.get_allocation, + allocation.Allocation, + mock_method=_MOCK_METHOD, + expected_kwargs={'fields': None}, + ) + + def test_delete_allocation(self): + self.verify_delete( + self.proxy.delete_allocation, allocation.Allocation, False + ) + + def test_delete_allocation_ignore(self): + self.verify_delete( + self.proxy.delete_allocation, allocation.Allocation, True + ) + + +class TestVolumeConnector(TestBaremetalProxy): + def test_create_volume_connector(self): + self.verify_create( + self.proxy.create_volume_connector, + volume_connector.VolumeConnector, + ) + + def test_find_volume_connector(self): + self.verify_find( + self.proxy.find_volume_connector, + volume_connector.VolumeConnector, + expected_kwargs={'details': True}, + ) + + def test_get_volume_connector(self): + self.verify_get( + self.proxy.get_volume_connector, + volume_connector.VolumeConnector, + mock_method=_MOCK_METHOD, + expected_kwargs={'fields': None}, + ) + + def test_delete_volume_connector(self): + self.verify_delete( + self.proxy.delete_volume_connector, + volume_connector.VolumeConnector, + False, + ) + + def test_delete_volume_connector_ignore(self): + self.verify_delete( + self.proxy.delete_volume_connector, + volume_connector.VolumeConnector, + True, + ) + + +class TestVolumeTarget(TestBaremetalProxy): + @mock.patch.object(volume_target.VolumeTarget, 'list') + def test_volume_target_detailed(self, mock_list): + result = self.proxy.volume_targets(details=True, query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, detail=True, query=1) + + @mock.patch.object(volume_target.VolumeTarget, 'list') + def test_volume_target_not_detailed(self, mock_list): + result = self.proxy.volume_targets(query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, query=1) + + def test_create_volume_target(self): + self.verify_create( + self.proxy.create_volume_target, volume_target.VolumeTarget + ) + + def test_find_volume_target(self): + self.verify_find( + self.proxy.find_volume_target, + volume_target.VolumeTarget, + expected_kwargs={'details': True}, + ) + + def test_get_volume_target(self): + self.verify_get( + self.proxy.get_volume_target, + volume_target.VolumeTarget, + mock_method=_MOCK_METHOD, + expected_kwargs={'fields': None}, + ) + + def test_delete_volume_target(self): + self.verify_delete( + self.proxy.delete_volume_target, volume_target.VolumeTarget, False + ) + + def test_delete_volume_target_ignore(self): + self.verify_delete( + self.proxy.delete_volume_target, volume_target.VolumeTarget, True + ) + + +class TestDeployTemplate(TestBaremetalProxy): + @mock.patch.object(deploy_templates.DeployTemplate, 'list') + def test_deploy_templates_detailed(self, mock_list): + result = self.proxy.deploy_templates(details=True, query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, detail=True, query=1) + + @mock.patch.object(deploy_templates.DeployTemplate, 'list') + def test_deploy_templates_not_detailed(self, mock_list): + result = self.proxy.deploy_templates(query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, query=1) + + def test_create_deploy_template(self): + self.verify_create( + self.proxy.create_deploy_template, + deploy_templates.DeployTemplate, + ) + + def test_find_deploy_template(self): + self.verify_find( + self.proxy.find_deploy_template, + deploy_templates.DeployTemplate, + expected_kwargs={'details': True}, + ) + + def test_get_deploy_template(self): + self.verify_get( + self.proxy.get_deploy_template, + deploy_templates.DeployTemplate, + mock_method=_MOCK_METHOD, + expected_kwargs={'fields': None}, + ) + + def test_delete_deploy_template(self): + self.verify_delete( + self.proxy.delete_deploy_template, + deploy_templates.DeployTemplate, + False, + ) + + def test_delete_deploy_template_ignore(self): + self.verify_delete( + self.proxy.delete_deploy_template, + deploy_templates.DeployTemplate, + True, + ) + + +class TestMisc(TestBaremetalProxy): + @mock.patch.object(node.Node, 'fetch', autospec=True) + def test__get_with_fields_none(self, mock_fetch): + result = self.proxy._get_with_fields(node.Node, 'value') + self.assertIs(result, mock_fetch.return_value) + mock_fetch.assert_called_once_with( + mock.ANY, self.proxy, error_message=mock.ANY + ) + + @mock.patch.object(node.Node, 'fetch', autospec=True) + def test__get_with_fields_node(self, mock_fetch): + result = self.proxy._get_with_fields( + # Mix of server-side and client-side fields + node.Node, + 'value', + fields=['maintenance', 'id', 'instance_id'], + ) + self.assertIs(result, mock_fetch.return_value) + mock_fetch.assert_called_once_with( + mock.ANY, + self.proxy, + error_message=mock.ANY, + # instance_id converted to server-side instance_uuid + fields='maintenance,uuid,instance_uuid', + ) + + @mock.patch.object(port.Port, 'fetch', autospec=True) + def test__get_with_fields_port(self, mock_fetch): + result = self.proxy._get_with_fields( + port.Port, 'value', fields=['address', 'id', 'node_id'] + ) + self.assertIs(result, mock_fetch.return_value) + mock_fetch.assert_called_once_with( + mock.ANY, + self.proxy, + error_message=mock.ANY, + # node_id converted to server-side node_uuid + fields='address,uuid,node_uuid', + ) + + +@mock.patch('time.sleep', lambda _sec: None) +@mock.patch.object(_proxy.Proxy, 'get_node', autospec=True) +class TestWaitForNodesProvisionState(base.TestCase): + def setUp(self): + super().setUp() + self.session = mock.Mock() + self.proxy = _proxy.Proxy(self.session) + + def test_success(self, mock_get): + # two attempts, one node succeeds after the 1st + nodes = [mock.Mock(spec=node.Node, id=str(i)) for i in range(3)] + for i, n in enumerate(nodes): + # 1st attempt on 1st node, 2nd attempt on 2nd node + n._check_state_reached.return_value = not (i % 2) + mock_get.side_effect = nodes + + result = self.proxy.wait_for_nodes_provision_state( + ['abcd', node.Node(id='1234')], 'fake state' + ) + self.assertEqual([nodes[0], nodes[2]], result) + + for n in nodes: + n._check_state_reached.assert_called_once_with( + self.proxy, 'fake state', True + ) + + def test_success_no_fail(self, mock_get): + # two attempts, one node succeeds after the 1st + nodes = [mock.Mock(spec=node.Node, id=str(i)) for i in range(3)] + for i, n in enumerate(nodes): + # 1st attempt on 1st node, 2nd attempt on 2nd node + n._check_state_reached.return_value = not (i % 2) + mock_get.side_effect = nodes + + result = self.proxy.wait_for_nodes_provision_state( + ['abcd', node.Node(id='1234')], 'fake state', fail=False + ) + self.assertEqual([nodes[0], nodes[2]], result.success) + self.assertEqual([], result.failure) + self.assertEqual([], result.timeout) + + for n in nodes: + n._check_state_reached.assert_called_once_with( + self.proxy, 'fake state', True + ) + + def test_timeout(self, mock_get): + mock_get.return_value._check_state_reached.return_value = False + mock_get.return_value.id = '1234' + + self.assertRaises( + exceptions.ResourceTimeout, + self.proxy.wait_for_nodes_provision_state, + ['abcd', node.Node(id='1234')], + 'fake state', + timeout=0.001, + ) + mock_get.return_value._check_state_reached.assert_called_with( + self.proxy, 'fake state', True + ) + + def test_timeout_no_fail(self, mock_get): + mock_get.return_value._check_state_reached.return_value = False + mock_get.return_value.id = '1234' + + result = self.proxy.wait_for_nodes_provision_state( + ['abcd'], 'fake state', timeout=0.001, fail=False + ) + mock_get.return_value._check_state_reached.assert_called_with( + self.proxy, 'fake state', True + ) + + self.assertEqual([], result.success) + self.assertEqual([mock_get.return_value], result.timeout) + self.assertEqual([], result.failure) + + def test_timeout_and_failures_not_fail(self, mock_get): + def _fake_get(_self, node): + result = mock.Mock() + result.id = getattr(node, 'id', node) + if result.id == '1': + result._check_state_reached.return_value = True + elif result.id == '2': + result._check_state_reached.side_effect = ( + exceptions.ResourceFailure("boom") + ) + else: + result._check_state_reached.return_value = False + return result + + mock_get.side_effect = _fake_get + + result = self.proxy.wait_for_nodes_provision_state( + ['1', '2', '3'], 'fake state', timeout=0.001, fail=False + ) + + self.assertEqual(['1'], [x.id for x in result.success]) + self.assertEqual(['3'], [x.id for x in result.timeout]) + self.assertEqual(['2'], [x.id for x in result.failure]) + + +class TestInspectionRules(TestBaremetalProxy): + @mock.patch.object(inspection_rules.InspectionRule, 'list') + def test_inspection_rules_detailed(self, mock_list): + result = self.proxy.inspection_rules(details=True, query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, details=True, query=1) + + @mock.patch.object(inspection_rules.InspectionRule, 'list') + def test_inspection_rules_not_detailed(self, mock_list): + result = self.proxy.inspection_rules(query=1) + self.assertIs(result, mock_list.return_value) + mock_list.assert_called_once_with(self.proxy, query=1) + + def test_create_inspection_rule(self): + self.verify_create( + self.proxy.create_inspection_rule, inspection_rules.InspectionRule + ) + + def test_get_inspection_rule(self): + self.verify_get( + self.proxy.get_inspection_rule, + inspection_rules.InspectionRule, + mock_method=_MOCK_METHOD, + expected_kwargs={'fields': None}, + ) + + def test_update_inspection_rule(self): + self.verify_update( + self.proxy.update_inspection_rule, inspection_rules.InspectionRule + ) + + def test_delete_inspection_rule(self): + self.verify_delete( + self.proxy.delete_inspection_rule, + inspection_rules.InspectionRule, + False, + ) + + def test_delete_inspection_rule_ignore(self): + self.verify_delete( + self.proxy.delete_inspection_rule, + inspection_rules.InspectionRule, + True, + ) diff --git a/openstack/tests/unit/baremetal/v1/test_runbooks.py b/openstack/tests/unit/baremetal/v1/test_runbooks.py new file mode 100644 index 0000000000..eed9e73beb --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_runbooks.py @@ -0,0 +1,73 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import runbooks +from openstack.tests.unit import base + + +FAKE = { + "created_at": "2024-08-18T22:28:48.643434+11:11", + "extra": {}, + "links": [ + { + "href": """http://10.60.253.180:6385/v1/runbooks + /bbb45f41-d4bc-4307-8d1d-32f95ce1e920""", + "rel": "self", + }, + { + "href": """http://10.60.253.180:6385/runbooks + /bbb45f41-d4bc-4307-8d1d-32f95ce1e920""", + "rel": "bookmark", + }, + ], + "name": "CUSTOM_AWESOME", + "public": False, + "owner": "blah", + "steps": [ + { + "args": { + "settings": [{"name": "LogicalProc", "value": "Enabled"}] + }, + "interface": "bios", + "order": 1, + "step": "apply_configuration", + } + ], + "updated_at": None, + "uuid": "32f95ce1-4307-d4bc-8d1d-e920bbb45f41", +} + + +class Runbooks(base.TestCase): + def test_basic(self): + sot = runbooks.Runbook() + self.assertIsNone(sot.resource_key) + self.assertEqual('runbooks', sot.resources_key) + self.assertEqual('/runbooks', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + def test_instantiate(self): + sot = runbooks.Runbook(**FAKE) + self.assertEqual(FAKE['steps'], sot.steps) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['extra'], sot.extra) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual(FAKE['public'], sot.public) + self.assertEqual(FAKE['owner'], sot.owner) + self.assertEqual(FAKE['updated_at'], sot.updated_at) + self.assertEqual(FAKE['uuid'], sot.id) diff --git a/openstack/tests/unit/baremetal/v1/test_volume_connector.py b/openstack/tests/unit/baremetal/v1/test_volume_connector.py new file mode 100644 index 0000000000..c148cc8261 --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_volume_connector.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import volume_connector +from openstack.tests.unit import base + + +FAKE = { + "connector_id": "iqn.2017-07.org.openstack:01:d9a51732c3f", + "created_at": "2016-08-18T22:28:48.643434+11:11", + "extra": {}, + "links": [ + { + "href": "http://127.0.0.1:6385/v1/volume/connector/", + "rel": "self", + }, + { + "href": "http://127.0.0.1:6385/volume/connector/", + "rel": "bookmark", + }, + ], + "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", + "type": "iqn", + "updated_at": None, + "uuid": "9bf93e01-d728-47a3-ad4b-5e66a835037c", +} + + +class TestVolumeconnector(base.TestCase): + def test_basic(self): + sot = volume_connector.VolumeConnector() + self.assertIsNone(sot.resource_key) + self.assertEqual('connectors', sot.resources_key) + self.assertEqual('/volume/connectors', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + def test_instantiate(self): + sot = volume_connector.VolumeConnector(**FAKE) + self.assertEqual(FAKE['connector_id'], sot.connector_id) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['extra'], sot.extra) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual(FAKE['node_uuid'], sot.node_id) + self.assertEqual(FAKE['type'], sot.type) + self.assertEqual(FAKE['updated_at'], sot.updated_at) + self.assertEqual(FAKE['uuid'], sot.id) diff --git a/openstack/tests/unit/baremetal/v1/test_volume_target.py b/openstack/tests/unit/baremetal/v1/test_volume_target.py new file mode 100644 index 0000000000..057d7414b1 --- /dev/null +++ b/openstack/tests/unit/baremetal/v1/test_volume_target.py @@ -0,0 +1,64 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal.v1 import volume_target +from openstack.tests.unit import base + + +FAKE = { + "boot_index": 0, + "created_at": "2016-08-18T22:28:48.643434+11:11", + "extra": {}, + "links": [ + { + "href": "http://127.0.0.1:6385/v1/volume/targets/", + "rel": "self", + }, + { + "href": "http://127.0.0.1:6385/volume/targets/", + "rel": "bookmark", + }, + ], + "node_uuid": "6d85703a-565d-469a-96ce-30b6de53079d", + "properties": {}, + "updated_at": None, + "uuid": "bd4d008c-7d31-463d-abf9-6c23d9d55f7f", + "volume_id": "04452bed-5367-4202-8bf5-de4335ac56d2", + "volume_type": "iscsi", +} + + +class TestVolumeTarget(base.TestCase): + def test_basic(self): + sot = volume_target.VolumeTarget() + self.assertIsNone(sot.resource_key) + self.assertEqual('targets', sot.resources_key) + self.assertEqual('/volume/targets', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + def test_instantiate(self): + sot = volume_target.VolumeTarget(**FAKE) + self.assertEqual(FAKE['boot_index'], sot.boot_index) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['extra'], sot.extra) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual(FAKE['node_uuid'], sot.node_id) + self.assertEqual(FAKE['properties'], sot.properties) + self.assertEqual(FAKE['updated_at'], sot.updated_at) + self.assertEqual(FAKE['uuid'], sot.id) + self.assertEqual(FAKE['volume_id'], sot.volume_id) + self.assertEqual(FAKE['volume_type'], sot.volume_type) diff --git a/openstack/tests/unit/baremetal_introspection/__init__.py b/openstack/tests/unit/baremetal_introspection/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/baremetal_introspection/v1/__init__.py b/openstack/tests/unit/baremetal_introspection/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/baremetal_introspection/v1/test_introspection_rule.py b/openstack/tests/unit/baremetal_introspection/v1/test_introspection_rule.py new file mode 100644 index 0000000000..5e059f80a4 --- /dev/null +++ b/openstack/tests/unit/baremetal_introspection/v1/test_introspection_rule.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.baremetal_introspection.v1 import introspection_rule +from openstack.tests.unit import base + +FAKE = { + "actions": [ + { + "action": "set-attribute", + "path": "driver_info/deploy_kernel", + "value": "8fd65-c97b-4d00-aa8b-7ed166a60971", + }, + { + "action": "set-attribute", + "path": "driver_info/deploy_ramdisk", + "value": "09e5420c-6932-4199-996e-9485c56b3394", + }, + ], + "conditions": [ + { + "field": "node://driver_info.deploy_ramdisk", + "invert": False, + "multiple": "any", + "op": "is-empty", + }, + { + "field": "node://driver_info.deploy_kernel", + "invert": False, + "multiple": "any", + "op": "is-empty", + }, + ], + "description": "Set deploy info if not already set on node", + "links": [ + { + "href": "/v1/rules/7459bf7c-9ff9-43a8-ba9f-48542ecda66c", + "rel": "self", + } + ], + "uuid": "7459bf7c-9ff9-43a8-ba9f-48542ecda66c", + "scope": "", +} + + +class TestIntrospectionRule(base.TestCase): + def test_basic(self): + sot = introspection_rule.IntrospectionRule() + self.assertIsNone(sot.resource_key) + self.assertEqual('rules', sot.resources_key) + self.assertEqual('/rules', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('POST', sot.create_method) + + def test_instantiate(self): + sot = introspection_rule.IntrospectionRule(**FAKE) + self.assertEqual(FAKE['conditions'], sot.conditions) + self.assertEqual(FAKE['actions'], sot.actions) + self.assertEqual(FAKE['description'], sot.description) + self.assertEqual(FAKE['uuid'], sot.id) + self.assertEqual(FAKE['scope'], sot.scope) diff --git a/openstack/tests/unit/baremetal_introspection/v1/test_proxy.py b/openstack/tests/unit/baremetal_introspection/v1/test_proxy.py new file mode 100644 index 0000000000..3419ce6fd2 --- /dev/null +++ b/openstack/tests/unit/baremetal_introspection/v1/test_proxy.py @@ -0,0 +1,229 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.baremetal.v1 import node as _node +from openstack.baremetal_introspection.v1 import _proxy +from openstack.baremetal_introspection.v1 import introspection +from openstack.baremetal_introspection.v1 import introspection_rule +from openstack import exceptions +from openstack.tests.unit import base +from openstack.tests.unit import test_proxy_base + + +@mock.patch.object(introspection.Introspection, 'create', autospec=True) +class TestStartIntrospection(base.TestCase): + def setUp(self): + super().setUp() + self.session = mock.Mock(spec=adapter.Adapter) + self.proxy = _proxy.Proxy(self.session) + + def test_create_introspection(self, mock_create): + self.proxy.start_introspection('abcd') + mock_create.assert_called_once_with(mock.ANY, self.proxy) + introspect = mock_create.call_args[0][0] + self.assertEqual('abcd', introspect.id) + + def test_create_introspection_with_node(self, mock_create): + self.proxy.start_introspection(_node.Node(id='abcd')) + mock_create.assert_called_once_with(mock.ANY, self.proxy) + introspect = mock_create.call_args[0][0] + self.assertEqual('abcd', introspect.id) + + def test_create_introspection_manage_boot(self, mock_create): + self.proxy.start_introspection('abcd', manage_boot=False) + mock_create.assert_called_once_with( + mock.ANY, self.proxy, manage_boot=False + ) + introspect = mock_create.call_args[0][0] + self.assertEqual('abcd', introspect.id) + + +class TestBaremetalIntrospectionProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_get_introspection(self): + self.verify_get( + self.proxy.get_introspection, introspection.Introspection + ) + + +@mock.patch('time.sleep', lambda _sec: None) +@mock.patch.object(introspection.Introspection, 'fetch', autospec=True) +class TestWaitForIntrospection(base.TestCase): + def setUp(self): + super().setUp() + self.session = mock.Mock(spec=adapter.Adapter) + self.proxy = _proxy.Proxy(self.session) + self.fake = {'state': 'waiting', 'error': None, 'finished': False} + self.introspection = introspection.Introspection(**self.fake) + + def test_already_finished(self, mock_fetch): + self.introspection.is_finished = True + self.introspection.state = 'finished' + result = self.proxy.wait_for_introspection(self.introspection) + self.assertIs(result, self.introspection) + self.assertFalse(mock_fetch.called) + + def test_wait(self, mock_fetch): + marker = [False] # mutable object to modify in the closure + + def _side_effect(allocation, session): + if marker[0]: + self.introspection.state = 'finished' + self.introspection.is_finished = True + else: + self.introspection.state = 'processing' + marker[0] = True + + mock_fetch.side_effect = _side_effect + result = self.proxy.wait_for_introspection(self.introspection) + self.assertIs(result, self.introspection) + self.assertEqual(2, mock_fetch.call_count) + + def test_timeout(self, mock_fetch): + self.assertRaises( + exceptions.ResourceTimeout, + self.proxy.wait_for_introspection, + self.introspection, + timeout=0.001, + ) + mock_fetch.assert_called_with(self.introspection, self.proxy) + + def test_failure(self, mock_fetch): + def _side_effect(allocation, session): + self.introspection.state = 'error' + self.introspection.is_finished = True + self.introspection.error = 'boom' + + mock_fetch.side_effect = _side_effect + self.assertRaisesRegex( + exceptions.ResourceFailure, + 'boom', + self.proxy.wait_for_introspection, + self.introspection, + ) + mock_fetch.assert_called_once_with(self.introspection, self.proxy) + + def test_failure_ignored(self, mock_fetch): + def _side_effect(allocation, session): + self.introspection.state = 'error' + self.introspection.is_finished = True + self.introspection.error = 'boom' + + mock_fetch.side_effect = _side_effect + result = self.proxy.wait_for_introspection( + self.introspection, ignore_error=True + ) + self.assertIs(result, self.introspection) + mock_fetch.assert_called_once_with(self.introspection, self.proxy) + + +@mock.patch.object(_proxy.Proxy, 'request', autospec=True) +class TestAbortIntrospection(base.TestCase): + def setUp(self): + super().setUp() + self.session = mock.Mock(spec=adapter.Adapter) + self.proxy = _proxy.Proxy(self.session) + self.fake = {'id': '1234', 'finished': False} + self.introspection = introspection.Introspection(**self.fake) + + def test_abort(self, mock_request): + mock_request.return_value.status_code = 202 + self.proxy.abort_introspection(self.introspection) + mock_request.assert_called_once_with( + self.proxy, + 'introspection/1234/abort', + 'POST', + headers=mock.ANY, + microversion=mock.ANY, + retriable_status_codes=[409, 503], + ) + + +@mock.patch.object(_proxy.Proxy, 'request', autospec=True) +class TestGetData(base.TestCase): + def setUp(self): + super().setUp() + self.session = mock.Mock(spec=adapter.Adapter) + self.proxy = _proxy.Proxy(self.session) + self.fake = {'id': '1234', 'finished': False} + self.introspection = introspection.Introspection(**self.fake) + + def test_get_data(self, mock_request): + mock_request.return_value.status_code = 200 + data = self.proxy.get_introspection_data(self.introspection) + mock_request.assert_called_once_with( + self.proxy, + 'introspection/1234/data', + 'GET', + headers=mock.ANY, + microversion=mock.ANY, + ) + self.assertIs(data, mock_request.return_value.json.return_value) + + def test_get_unprocessed_data(self, mock_request): + mock_request.return_value.status_code = 200 + data = self.proxy.get_introspection_data( + self.introspection, processed=False + ) + mock_request.assert_called_once_with( + self.proxy, + 'introspection/1234/data/unprocessed', + 'GET', + headers=mock.ANY, + microversion='1.17', + ) + self.assertIs(data, mock_request.return_value.json.return_value) + + +class TestIntrospectionRule(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_introspection_rule_create(self): + self.verify_create( + self.proxy.create_introspection_rule, + introspection_rule.IntrospectionRule, + ) + + def test_introspection_rule_delete(self): + self.verify_delete( + self.proxy.delete_introspection_rule, + introspection_rule.IntrospectionRule, + False, + ) + + def test_introspection_rule_delete_ignore(self): + self.verify_delete( + self.proxy.delete_introspection_rule, + introspection_rule.IntrospectionRule, + True, + ) + + def test_introspection_rule_get(self): + self.verify_get( + self.proxy.get_introspection_rule, + introspection_rule.IntrospectionRule, + ) + + def test_introspection_rules(self): + self.verify_list( + self.proxy.introspection_rules, + introspection_rule.IntrospectionRule, + ) diff --git a/openstack/tests/unit/base.py b/openstack/tests/unit/base.py index 0b5b1f4460..41ff6373ac 100644 --- a/openstack/tests/unit/base.py +++ b/openstack/tests/unit/base.py @@ -1,5 +1,3 @@ -# -*- coding: utf-8 -*- - # Copyright 2010-2011 OpenStack Foundation # Copyright (c) 2013 Hewlett-Packard Development Company, L.P. # @@ -15,39 +13,958 @@ # License for the specific language governing permissions and limitations # under the License. +import collections import os +import tempfile +import time +import urllib +import uuid import fixtures -import testtools +from keystoneauth1 import loading as ks_loading +from oslo_config import cfg +from requests import structures +from requests_mock.contrib import fixture as rm_fixture -_TRUE_VALUES = ('true', '1', 'yes') +import openstack.cloud +import openstack.config as occ +import openstack.connection +from openstack.fixture import connection as os_fixture +from openstack.tests import base +from openstack.tests import fakes -class TestCase(testtools.TestCase): +_ProjectData = collections.namedtuple( + 'ProjectData', + 'project_id, project_name, enabled, domain_id, description, ' + 'parent_id, json_response, json_request', +) - """Test case base class for all unit tests.""" - def setUp(self): +_UserData = collections.namedtuple( + 'UserData', + 'user_id, password, name, email, description, domain_id, enabled, ' + 'json_response, json_request', +) + + +_GroupData = collections.namedtuple( + 'GroupData', + 'group_id, group_name, domain_id, description, json_response, ' + 'json_request', +) + + +_DomainData = collections.namedtuple( + 'DomainData', + 'domain_id, domain_name, description, json_response, json_request', +) + + +_ServiceData = collections.namedtuple( + 'Servicedata', + 'service_id, service_name, service_type, description, enabled, ' + 'json_response_v3, json_response_v2, json_request', +) + + +_EndpointDataV3 = collections.namedtuple( + 'EndpointData', + 'endpoint_id, service_id, interface, region_id, url, enabled, ' + 'json_response, json_request', +) + + +# NOTE(notmorgan): Shade does not support domain-specific roles +# This should eventually be fixed if it becomes a main-stream feature. +_RoleData = collections.namedtuple( + 'RoleData', 'role_id, role_name, json_response, json_request' +) + + +class TestCase(base.TestCase): + strict_cloud = False + + def setUp(self, cloud_config_fixture='clouds.yaml'): """Run before each test method to initialize test environment.""" - super(TestCase, self).setUp() - test_timeout = os.environ.get('OS_TEST_TIMEOUT', 0) - try: - test_timeout = int(test_timeout) - except ValueError: - # If timeout value is invalid do not set a timeout. - test_timeout = 0 - if test_timeout > 0: - self.useFixture(fixtures.Timeout(test_timeout, gentle=True)) - - self.useFixture(fixtures.NestedTempfile()) - self.useFixture(fixtures.TempHomeDir()) - - if os.environ.get('OS_STDOUT_CAPTURE') in _TRUE_VALUES: - stdout = self.useFixture(fixtures.StringStream('stdout')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stdout', stdout)) - if os.environ.get('OS_STDERR_CAPTURE') in _TRUE_VALUES: - stderr = self.useFixture(fixtures.StringStream('stderr')).stream - self.useFixture(fixtures.MonkeyPatch('sys.stderr', stderr)) - - self.log_fixture = self.useFixture(fixtures.FakeLogger()) + super().setUp() + + # Sleeps are for real testing, but unit tests shouldn't need them + realsleep = time.sleep + + def _nosleep(seconds): + return realsleep(seconds * 0.0001) + + self.sleep_fixture = self.useFixture( + fixtures.MonkeyPatch('time.sleep', _nosleep) + ) + self.fixtures_directory = 'openstack/tests/unit/fixtures' + self.os_fixture = self.useFixture( + os_fixture.ConnectionFixture(project_id=fakes.PROJECT_ID) + ) + + # Isolate openstack.config from test environment + self.os_cloud_fixture = self.useFixture( + fixtures.EnvironmentVariable('OS_CLOUD'), + ) + config = tempfile.NamedTemporaryFile(delete=False) + cloud_path = os.path.join( + self.fixtures_directory, + 'clouds', + cloud_config_fixture, + ) + with open(cloud_path, 'rb') as f: + content = f.read() + config.write(content) + config.close() + + vendor = tempfile.NamedTemporaryFile(delete=False) + vendor.write(b'{}') + vendor.close() + + self.config = occ.OpenStackConfig( + config_files=[config.name], + vendor_files=[vendor.name], + secure_files=['non-existant'], + ) + + self.oslo_config_dict = { + # All defaults for nova + 'nova': {}, + # monasca-api not in the service catalog + 'monasca-api': {}, + # Overrides for heat + 'heat': { + 'region_name': 'SpecialRegion', + 'interface': 'internal', + 'endpoint_override': 'https://example.org:8888/heat/v2', + }, + # test a service with dashes + 'ironic_inspector': { + 'endpoint_override': 'https://example.org:5050', + }, + } + + # FIXME(notmorgan): Convert the uri_registry, discovery.json, and + # use of keystone_v3/v2 to a proper fixtures.Fixture. For now this + # is acceptable, but eventually this should become it's own fixture + # that encapsulates the registry, registering the URIs, and + # assert_calls (and calling assert_calls every test case that uses + # it on cleanup). Subclassing here could be 100% eliminated in the + # future allowing any class to simply + # self.useFixture(openstack.cloud.RequestsMockFixture) and get all + # the benefits. + + # NOTE(notmorgan): use an ordered dict here to ensure we preserve the + # order in which items are added to the uri_registry. This makes + # the behavior more consistent when dealing with ensuring the + # requests_mock uri/query_string matchers are ordered and parse the + # request in the correct orders. + self._uri_registry = collections.OrderedDict() + self.discovery_json = os.path.join( + self.fixtures_directory, 'discovery.json' + ) + self.use_keystone_v3() + self.__register_uris_called = False + + def _load_ks_cfg_opts(self): + conf = cfg.ConfigOpts() + for group, opts in self.oslo_config_dict.items(): + conf.register_group(cfg.OptGroup(group)) + if opts is not None: + ks_loading.register_adapter_conf_options(conf, group) + for name, val in opts.items(): + conf.set_override(name, val, group=group) + return conf + + # TODO(shade) Update this to handle service type aliases + def get_mock_url( + self, + service_type, + interface='public', + resource=None, + append=None, + base_url_append=None, + qs_elements=None, + ): + endpoint_url = self.cloud.endpoint_for( + service_type=service_type, interface=interface + ) + # Strip trailing slashes, so as not to produce double-slashes below + if endpoint_url.endswith('/'): + endpoint_url = endpoint_url[:-1] + to_join = [endpoint_url] + qs = '' + if base_url_append: + to_join.append(base_url_append) + if resource: + to_join.append(resource) + if append: + to_join.extend([urllib.parse.quote(i) for i in append]) + if qs_elements is not None: + qs = '?{}'.format('&'.join(qs_elements)) + return '{uri}{qs}'.format(uri='/'.join(to_join), qs=qs) + + def mock_for_keystone_projects( + self, + project=None, + v3=True, + list_get=False, + id_get=False, + project_list=None, + project_count=None, + ): + if project: + assert not (project_list or project_count) + elif project_list: + assert not (project or project_count) + elif project_count: + assert not (project or project_list) + else: + raise Exception( + 'Must specify a project, project_list, or project_count' + ) + assert list_get or id_get + + base_url_append = 'v3' if v3 else None + if project: + project_list = [project] + elif project_count: + # Generate multiple projects + project_list = [ + self._get_project_data(v3=v3) for c in range(0, project_count) + ] + uri_mock_list = [] + if list_get: + uri_mock_list.append( + dict( + method='GET', + uri=self.get_mock_url( + service_type='identity', + interface='admin', + resource='projects', + base_url_append=base_url_append, + ), + status_code=200, + json={ + 'projects': [ + p.json_response['project'] for p in project_list + ] + }, + ) + ) + if id_get: + for p in project_list: + uri_mock_list.append( + dict( + method='GET', + uri=self.get_mock_url( + service_type='identity', + interface='admin', + resource='projects', + append=[p.project_id], + base_url_append=base_url_append, + ), + status_code=200, + json=p.json_response, + ) + ) + self.__do_register_uris(uri_mock_list) + return project_list + + def _get_project_data( + self, + project_name=None, + enabled=None, + domain_id=None, + description=None, + v3=True, + project_id=None, + parent_id=None, + ): + project_name = project_name or self.getUniqueString('projectName') + project_id = uuid.UUID(project_id or uuid.uuid4().hex).hex + if parent_id: + parent_id = uuid.UUID(parent_id).hex + response = {'id': project_id, 'name': project_name} + request = {'name': project_name} + domain_id = (domain_id or uuid.uuid4().hex) if v3 else None + if domain_id: + request['domain_id'] = domain_id + response['domain_id'] = domain_id + if enabled is not None: + enabled = bool(enabled) + response['enabled'] = enabled + request['enabled'] = enabled + if parent_id: + request['parent_id'] = parent_id + response['parent_id'] = parent_id + response.setdefault('enabled', True) + request.setdefault('enabled', True) + if description: + response['description'] = description + request['description'] = description + request.setdefault('description', None) + return _ProjectData( + project_id, + project_name, + enabled, + domain_id, + description, + parent_id, + {'project': response}, + {'project': request}, + ) + + def _get_group_data(self, name=None, domain_id=None, description=None): + group_id = uuid.uuid4().hex + name = name or self.getUniqueString('groupname') + domain_id = uuid.UUID(domain_id or uuid.uuid4().hex).hex + response = {'id': group_id, 'name': name, 'domain_id': domain_id} + request = {'name': name, 'domain_id': domain_id} + if description is not None: + response['description'] = description + request['description'] = description + + return _GroupData( + group_id, + name, + domain_id, + description, + {'group': response}, + {'group': request}, + ) + + def _get_user_data(self, name=None, password=None, **kwargs): + name = name or self.getUniqueString('username') + password = password or self.getUniqueString('user_password') + user_id = uuid.uuid4().hex + + response = {'name': name, 'id': user_id} + request = {'name': name, 'password': password} + + if kwargs.get('domain_id'): + kwargs['domain_id'] = uuid.UUID(kwargs['domain_id']).hex + response['domain_id'] = kwargs.pop('domain_id') + request['domain_id'] = response['domain_id'] + + response['email'] = kwargs.pop('email', None) + request['email'] = response['email'] + + response['enabled'] = kwargs.pop('enabled', True) + request['enabled'] = response['enabled'] + + response['description'] = kwargs.pop('description', None) + if response['description']: + request['description'] = response['description'] + + self.assertIs( + 0, + len(kwargs), + message='extra key-word args received on _get_user_data', + ) + + return _UserData( + user_id, + password, + name, + response['email'], + response['description'], + response.get('domain_id'), + response.get('enabled'), + {'user': response}, + {'user': request}, + ) + + def _get_domain_data( + self, domain_name=None, description=None, enabled=None + ): + domain_id = uuid.uuid4().hex + domain_name = domain_name or self.getUniqueString('domainName') + response = {'id': domain_id, 'name': domain_name} + request = {'name': domain_name} + if enabled is not None: + request['enabled'] = bool(enabled) + response['enabled'] = bool(enabled) + if description: + response['description'] = description + request['description'] = description + response.setdefault('enabled', True) + return _DomainData( + domain_id, + domain_name, + description, + {'domain': response}, + {'domain': request}, + ) + + def _get_service_data( + self, type=None, name=None, description=None, enabled=True + ): + service_id = uuid.uuid4().hex + name = name or f'name-{uuid.uuid4().hex}' + type = type or uuid.uuid4().hex + + response = { + 'id': service_id, + 'name': name, + 'type': type, + 'enabled': enabled, + } + if description is not None: + response['description'] = description + request = response.copy() + request.pop('id') + return _ServiceData( + service_id, + name, + type, + description, + enabled, + {'service': response}, + {'OS-KSADM:service': response}, + request, + ) + + def _get_endpoint_v3_data( + self, + service_id=None, + region=None, + url=None, + interface=None, + enabled=True, + ): + endpoint_id = uuid.uuid4().hex + service_id = service_id or uuid.uuid4().hex + region = region or uuid.uuid4().hex + url = url or 'https://example.com/' + interface = interface or uuid.uuid4().hex + + response = { + 'id': endpoint_id, + 'service_id': service_id, + 'region_id': region, + 'interface': interface, + 'url': url, + 'enabled': enabled, + } + request = response.copy() + request.pop('id') + return _EndpointDataV3( + endpoint_id, + service_id, + interface, + region, + url, + enabled, + {'endpoint': response}, + {'endpoint': request}, + ) + + def _get_role_data(self, role_name=None): + role_id = uuid.uuid4().hex + role_name = role_name or uuid.uuid4().hex + request = {'name': role_name} + response = request.copy() + response['id'] = role_id + return _RoleData( + role_id, role_name, {'role': response}, {'role': request} + ) + + def use_broken_keystone(self): + self.adapter = self.useFixture(rm_fixture.Fixture()) + self.calls = [] + self._uri_registry.clear() + self.__do_register_uris( + [ + dict( + method='GET', + uri='https://identity.example.com/', + text=open(self.discovery_json).read(), + ), + dict( + method='POST', + uri='https://identity.example.com/v3/auth/tokens', + status_code=400, + ), + ] + ) + self._make_test_cloud(identity_api_version='3') + + def use_nothing(self): + self.calls = [] + self._uri_registry.clear() + + def get_keystone_v3_token( + self, + project_name='admin', + ): + return dict( + method='POST', + uri='https://identity.example.com/v3/auth/tokens', + headers={'X-Subject-Token': self.getUniqueString('KeystoneToken')}, + json=self.os_fixture.v3_token, + validate=dict( + json={ + 'auth': { + 'identity': { + 'methods': ['password'], + 'password': { + 'user': { + 'domain': { + 'name': 'default', + }, + 'name': 'admin', + 'password': 'password', + } + }, + }, + 'scope': { + 'project': { + 'domain': {'name': 'default'}, + 'name': project_name, + } + }, + } + } + ), + ) + + def get_keystone_discovery(self): + with open(self.discovery_json) as discovery_file: + return dict( + method='GET', + uri='https://identity.example.com/', + text=discovery_file.read(), + ) + + def use_keystone_v3(self): + self.adapter = self.useFixture(rm_fixture.Fixture()) + self.calls = [] + self._uri_registry.clear() + self.__do_register_uris( + [ + self.get_keystone_discovery(), + self.get_keystone_v3_token(), + ] + ) + self._make_test_cloud(identity_api_version='3') + + def use_keystone_v2(self): + self.adapter = self.useFixture(rm_fixture.Fixture()) + self.calls = [] + self._uri_registry.clear() + + self.__do_register_uris( + [ + self.get_keystone_discovery(), + dict( + method='POST', + uri='https://identity.example.com/v2.0/tokens', + json=self.os_fixture.v2_token, + ), + ] + ) + + self._make_test_cloud( + cloud_name='_test_cloud_v2_', identity_api_version='2.0' + ) + + def _make_test_cloud(self, cloud_name='_test_cloud_', **kwargs): + test_cloud = os.environ.get('OPENSTACKSDK_OS_CLOUD', cloud_name) + self.cloud_config = self.config.get_one( + cloud=test_cloud, validate=True, **kwargs + ) + self.cloud = openstack.connection.Connection( + config=self.cloud_config, strict=self.strict_cloud + ) + + def get_cinder_discovery_mock_dict( + self, + block_storage_version_json='block-storage-version.json', + block_storage_discovery_url='https://block-storage.example.com/', + ): + discovery_fixture = os.path.join( + self.fixtures_directory, block_storage_version_json + ) + return dict( + method='GET', + uri=block_storage_discovery_url, + text=open(discovery_fixture).read(), + ) + + def get_glance_discovery_mock_dict( + self, + image_version_json='image-version.json', + image_discovery_url='https://image.example.com/', + ): + discovery_fixture = os.path.join( + self.fixtures_directory, image_version_json + ) + return dict( + method='GET', + uri=image_discovery_url, + status_code=300, + text=open(discovery_fixture).read(), + ) + + def get_nova_discovery_mock_dict( + self, + compute_version_json='compute-version.json', + compute_discovery_url='https://compute.example.com/v2.1/', + ): + discovery_fixture = os.path.join( + self.fixtures_directory, compute_version_json + ) + return dict( + method='GET', + uri=compute_discovery_url, + text=open(discovery_fixture).read(), + ) + + def get_placement_discovery_mock_dict( + self, discovery_fixture='placement.json' + ): + discovery_fixture = os.path.join( + self.fixtures_directory, discovery_fixture + ) + return dict( + method='GET', + uri="https://placement.example.com/", + text=open(discovery_fixture).read(), + ) + + def get_designate_discovery_mock_dict(self): + discovery_fixture = os.path.join(self.fixtures_directory, "dns.json") + return dict( + method='GET', + uri="https://dns.example.com/", + text=open(discovery_fixture).read(), + ) + + def get_ironic_discovery_mock_dict(self): + discovery_fixture = os.path.join( + self.fixtures_directory, "baremetal.json" + ) + return dict( + method='GET', + uri="https://baremetal.example.com/", + text=open(discovery_fixture).read(), + ) + + def get_senlin_discovery_mock_dict(self): + discovery_fixture = os.path.join( + self.fixtures_directory, "clustering.json" + ) + return dict( + method='GET', + uri="https://clustering.example.com/", + text=open(discovery_fixture).read(), + ) + + def use_compute_discovery( + self, + compute_version_json='compute-version.json', + compute_discovery_url='https://compute.example.com/v2.1/', + ): + self.__do_register_uris( + [ + self.get_nova_discovery_mock_dict( + compute_version_json, compute_discovery_url + ), + ] + ) + + def get_cyborg_discovery_mock_dict(self): + discovery_fixture = os.path.join( + self.fixtures_directory, "accelerator.json" + ) + return dict( + method='GET', + uri="https://accelerator.example.com/", + text=open(discovery_fixture).read(), + ) + + def get_manila_discovery_mock_dict(self): + discovery_fixture = os.path.join( + self.fixtures_directory, "shared-file-system.json" + ) + return dict( + method='GET', + uri="https://shared-file-system.example.com/", + text=open(discovery_fixture).read(), + ) + + def use_glance( + self, + image_version_json='image-version.json', + image_discovery_url='https://image.example.com/', + ): + # NOTE(notmorgan): This method is only meant to be used in "setUp" + # where the ordering of the url being registered is tightly controlled + # if the functionality of .use_glance is meant to be used during an + # actual test case, use .get_glance_discovery_mock and apply to the + # right location in the mock_uris when calling .register_uris + self.__do_register_uris( + [ + self.get_glance_discovery_mock_dict( + image_version_json, image_discovery_url + ) + ] + ) + + def use_cinder(self): + self.__do_register_uris([self.get_cinder_discovery_mock_dict()]) + + def use_placement(self, **kwargs): + self.__do_register_uris( + [self.get_placement_discovery_mock_dict(**kwargs)] + ) + + def use_designate(self): + # NOTE(slaweq): This method is only meant to be used in "setUp" + # where the ordering of the url being registered is tightly controlled + # if the functionality of .use_designate is meant to be used during an + # actual test case, use .get_designate_discovery_mock and apply to the + # right location in the mock_uris when calling .register_uris + self.__do_register_uris([self.get_designate_discovery_mock_dict()]) + + def use_ironic(self): + # NOTE(TheJulia): This method is only meant to be used in "setUp" + # where the ordering of the url being registered is tightly controlled + # if the functionality of .use_ironic is meant to be used during an + # actual test case, use .get_ironic_discovery_mock and apply to the + # right location in the mock_uris when calling .register_uris + self.__do_register_uris([self.get_ironic_discovery_mock_dict()]) + + def use_senlin(self): + # NOTE(elachance): This method is only meant to be used in "setUp" + # where the ordering of the url being registered is tightly controlled + # if the functionality of .use_senlin is meant to be used during an + # actual test case, use .get_senlin_discovery_mock and apply to the + # right location in the mock_uris when calling .register_uris + self.__do_register_uris([self.get_senlin_discovery_mock_dict()]) + + def use_cyborg(self): + # NOTE(s_shogo): This method is only meant to be used in "setUp" + # where the ordering of the url being registered is tightly controlled + # if the functionality of .use_cyborg is meant to be used during an + # actual test case, use .get_cyborg_discovery_mock and apply to the + # right location in the mock_uris when calling .register_uris + self.__do_register_uris([self.get_cyborg_discovery_mock_dict()]) + + def use_manila(self): + # NOTE(gouthamr): This method is only meant to be used in "setUp" + # where the ordering of the url being registered is tightly controlled + # if the functionality of .use_manila is meant to be used during an + # actual test case, use .get_manila_discovery_mock and apply to the + # right location in the mock_uris when calling .register_uris + self.__do_register_uris([self.get_manila_discovery_mock_dict()]) + + def register_uris(self, uri_mock_list=None): + """Mock a list of URIs and responses via requests mock. + + This method may be called only once per test-case to avoid odd + and difficult to debug interactions. Discovery and Auth request mocking + happens separately from this method. + + :param uri_mock_list: List of dictionaries that template out what is + passed to requests_mock fixture's `register_uri`. + Format is: + {'method': , + 'uri': , + ... + } + + Common keys to pass in the dictionary: + * json: the json response (dict) + * status_code: the HTTP status (int) + * validate: The request body (dict) to + validate with assert_calls + all key-word arguments that are valid to send to + requests_mock are supported. + + This list should be in the order in which calls + are made. When `assert_calls` is executed, order + here will be validated. Duplicate URIs and + Methods are allowed and will be collapsed into a + single matcher. Each response will be returned + in order as the URI+Method is hit. + :type uri_mock_list: list + :return: None + """ + assert not self.__register_uris_called + self.__do_register_uris(uri_mock_list or []) + self.__register_uris_called = True + + def __do_register_uris(self, uri_mock_list=None): + for to_mock in uri_mock_list: + kw_params = { + k: to_mock.pop(k) + for k in ('request_headers', 'complete_qs', '_real_http') + if k in to_mock + } + + method = to_mock.pop('method') + uri = to_mock.pop('uri') + # NOTE(notmorgan): make sure the delimiter is non-url-safe, in this + # case "|" is used so that the split can be a bit easier on + # maintainers of this code. + key = f'{method}|{uri}|{kw_params}' + validate = to_mock.pop('validate', {}) + valid_keys = {'json', 'headers', 'params', 'data'} + invalid_keys = set(validate.keys()) - valid_keys + if invalid_keys: + raise TypeError( + f"Invalid values passed to validate: {invalid_keys}" + ) + headers = structures.CaseInsensitiveDict( + to_mock.pop('headers', {}) + ) + if 'content-type' not in headers: + headers['content-type'] = 'application/json' + + if 'exc' not in to_mock: + to_mock['headers'] = headers + + self.calls += [dict(method=method, url=uri, **validate)] + self._uri_registry.setdefault( + key, {'response_list': [], 'kw_params': kw_params} + ) + if self._uri_registry[key]['kw_params'] != kw_params: + raise AssertionError( + 'PROGRAMMING ERROR: key-word-params ' + 'should be part of the uri_key and cannot change, ' + 'it will affect the matcher in requests_mock. ' + '{old!r} != {new!r}'.format( + old=self._uri_registry[key]['kw_params'], + new=kw_params, + ) + ) + self._uri_registry[key]['response_list'].append(to_mock) + + for mocked, params in self._uri_registry.items(): + mock_method, mock_uri, _ignored = mocked.split('|', 2) + self.adapter.register_uri( + mock_method, + mock_uri, + params['response_list'], + **params['kw_params'], + ) + + def assert_no_calls(self): + # TODO(mordred) For now, creating the adapter for connections is + # triggering catalog lookups. Make sure no_calls is only 2. + # When we can make that on-demand through a descriptor object, + # drop this to 0. + self.assertEqual(2, len(self.adapter.request_history)) + + def assert_calls(self, stop_after=None, do_count=True): + for x, (call, history) in enumerate( + zip(self.calls, self.adapter.request_history) + ): + if stop_after and x > stop_after: + break + + call_uri_parts = urllib.parse.urlparse(call['url']) + history_uri_parts = urllib.parse.urlparse(history.url) + self.assertEqual( + ( + call['method'], + call_uri_parts.scheme, + call_uri_parts.netloc, + call_uri_parts.path, + call_uri_parts.params, + urllib.parse.parse_qs(call_uri_parts.query), + ), + ( + history.method, + history_uri_parts.scheme, + history_uri_parts.netloc, + history_uri_parts.path, + history_uri_parts.params, + urllib.parse.parse_qs(history_uri_parts.query), + ), + ( + f'REST mismatch on call {x}. ' + f'Expected {call["method"]} {call["url"]}. ' + f'Got {history.method} {history.url}. ' + 'NOTE: query string order differences wont cause mismatch' + ), + ) + if 'json' in call: + self.assertEqual( + call['json'], + history.json(), + f'json content mismatch in call {x}', + ) + # headers in a call isn't exhaustive - it's checking to make sure + # a specific header or headers are there, not that they are the + # only headers + if 'headers' in call: + for key, value in call['headers'].items(): + self.assertEqual( + value, + history.headers[key], + f'header mismatch in call {x}', + ) + if do_count: + self.assertEqual( + len(self.calls), + len(self.adapter.request_history), + "Expected:\n{}'\nGot:\n{}".format( + '\n'.join( + [f'{c["method"]} {c["url"]}' for c in self.calls] + ), + '\n'.join( + [ + f'{h.method} {h.url}' + for h in self.adapter.request_history + ] + ), + ), + ) + + def assertResourceEqual(self, actual, expected, resource_type): + """Helper for the assertEqual which compares Resource object against + dictionary representing expected state. + + :param Resource actual: actual object. + :param dict expected: dictionary representing expected object. + :param class resource_type: class type to be applied for the expected + resource. + """ + return self.assertEqual( + resource_type(**expected).to_dict(computed=False), + actual.to_dict(computed=False), + ) + + def assertResourceListEqual(self, actual, expected, resource_type): + """Helper for the assertEqual which compares Resource lists object + against dictionary representing expected state. + + :param list actual: List of actual objects. + :param listexpected: List of dictionaries representing expected + objects. + :param class resource_type: class type to be applied for the expected + resource. + """ + self.assertEqual( + [resource_type(**f).to_dict(computed=False) for f in expected], + [f.to_dict(computed=False) for f in actual], + ) + + +class IronicTestCase(TestCase): + def setUp(self): + super().setUp() + self.use_ironic() + self.uuid = str(uuid.uuid4()) + self.name = self.getUniqueString('name') + + def get_mock_url(self, **kwargs): + kwargs.setdefault('service_type', 'baremetal') + kwargs.setdefault('interface', 'public') + kwargs.setdefault('base_url_append', 'v1') + return super().get_mock_url(**kwargs) diff --git a/openstack/tests/unit/block_storage/__init__.py b/openstack/tests/unit/block_storage/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/block_storage/v2/__init__.py b/openstack/tests/unit/block_storage/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/block_storage/v2/test_backup.py b/openstack/tests/unit/block_storage/v2/test_backup.py new file mode 100644 index 0000000000..760f46ecbc --- /dev/null +++ b/openstack/tests/unit/block_storage/v2/test_backup.py @@ -0,0 +1,217 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v2 import backup +from openstack import exceptions +from openstack.tests.unit import base + + +FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" + +BACKUP = { + "availability_zone": "az1", + "container": "volumebackups", + "created_at": "2018-04-02T10:35:27.000000", + "updated_at": "2018-04-03T10:35:27.000000", + "description": 'description', + "fail_reason": 'fail reason', + "id": FAKE_ID, + "name": "backup001", + "object_count": 22, + "size": 1, + "status": "available", + "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", + "is_incremental": True, + "has_dependent_backups": False, +} + + +class TestBackup(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.headers = {} + self.resp.status_code = 202 + + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.get = mock.Mock() + self.sess.post = mock.Mock(return_value=self.resp) + + def test_basic(self): + sot = backup.Backup(BACKUP) + self.assertEqual("backup", sot.resource_key) + self.assertEqual("backups", sot.resources_key) + self.assertEqual("/backups", sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_fetch) + + self.assertDictEqual( + { + "all_tenants": "all_tenants", + "limit": "limit", + "marker": "marker", + "name": "name", + "project_id": "project_id", + "sort_dir": "sort_dir", + "sort_key": "sort_key", + "status": "status", + "volume_id": "volume_id", + }, + sot._query_mapping._mapping, + ) + + def test_create(self): + sot = backup.Backup(**BACKUP) + self.assertEqual(BACKUP["id"], sot.id) + self.assertEqual(BACKUP["name"], sot.name) + self.assertEqual(BACKUP["status"], sot.status) + self.assertEqual(BACKUP["container"], sot.container) + self.assertEqual(BACKUP["availability_zone"], sot.availability_zone) + self.assertEqual(BACKUP["created_at"], sot.created_at) + self.assertEqual(BACKUP["updated_at"], sot.updated_at) + self.assertEqual(BACKUP["description"], sot.description) + self.assertEqual(BACKUP["fail_reason"], sot.fail_reason) + self.assertEqual(BACKUP["volume_id"], sot.volume_id) + self.assertEqual(BACKUP["object_count"], sot.object_count) + self.assertEqual(BACKUP["is_incremental"], sot.is_incremental) + self.assertEqual(BACKUP["size"], sot.size) + self.assertEqual( + BACKUP["has_dependent_backups"], sot.has_dependent_backups + ) + + def test_create_incremental(self): + sot = backup.Backup(is_incremental=True) + sot2 = backup.Backup(is_incremental=False) + + create_response = mock.Mock() + create_response.status_code = 200 + create_response.json.return_value = {} + create_response.headers = {} + self.sess.post.return_value = create_response + + sot.create(self.sess) + self.sess.post.assert_called_with( + '/backups', + headers={}, + json={ + 'backup': { + 'incremental': True, + } + }, + params={}, + ) + + sot2.create(self.sess) + self.sess.post.assert_called_with( + '/backups', + headers={}, + json={ + 'backup': { + 'incremental': False, + } + }, + params={}, + ) + + def test_restore(self): + sot = backup.Backup(**BACKUP) + + restore_response = mock.Mock() + restore_response.status_code = 202 + restore_response.json.return_value = { + "restore": { + "backup_id": "back", + "volume_id": "vol", + "volume_name": "name", + } + } + restore_response.headers = {} + self.sess.post.return_value = restore_response + + self.assertEqual(sot, sot.restore(self.sess, 'vol', 'name')) + + url = f'backups/{FAKE_ID}/restore' + body = {"restore": {"volume_id": "vol", "name": "name"}} + self.sess.post.assert_called_with(url, json=body) + + def test_restore_name(self): + sot = backup.Backup(**BACKUP) + + restore_response = mock.Mock() + restore_response.status_code = 202 + restore_response.json.return_value = { + "restore": { + "backup_id": "back", + "volume_id": "vol", + "volume_name": "name", + } + } + restore_response.headers = {} + self.sess.post.return_value = restore_response + + self.assertEqual(sot, sot.restore(self.sess, name='name')) + + url = f'backups/{FAKE_ID}/restore' + body = {"restore": {"name": "name"}} + self.sess.post.assert_called_with(url, json=body) + + def test_restore_vol_id(self): + sot = backup.Backup(**BACKUP) + + restore_response = mock.Mock() + restore_response.status_code = 202 + restore_response.json.return_value = { + "restore": { + "backup_id": "back", + "volume_id": "vol", + "volume_name": "name", + } + } + restore_response.headers = {} + self.sess.post.return_value = restore_response + + self.assertEqual(sot, sot.restore(self.sess, volume_id='vol')) + + url = f'backups/{FAKE_ID}/restore' + body = {"restore": {"volume_id": "vol"}} + self.sess.post.assert_called_with(url, json=body) + + def test_restore_no_params(self): + sot = backup.Backup(**BACKUP) + + self.assertRaises(exceptions.SDKException, sot.restore, self.sess) + + def test_force_delete(self): + sot = backup.Backup(**BACKUP) + + self.assertIsNone(sot.force_delete(self.sess)) + + url = f'backups/{FAKE_ID}/action' + body = {'os-force_delete': None} + self.sess.post.assert_called_with(url, json=body) + + def test_reset_status(self): + sot = backup.Backup(**BACKUP) + + self.assertIsNone(sot.reset_status(self.sess, 'new_status')) + + url = f'backups/{FAKE_ID}/action' + body = {'os-reset_status': {'status': 'new_status'}} + self.sess.post.assert_called_with(url, json=body) diff --git a/openstack/tests/unit/block_storage/v2/test_capabilities.py b/openstack/tests/unit/block_storage/v2/test_capabilities.py new file mode 100644 index 0000000000..907a43661b --- /dev/null +++ b/openstack/tests/unit/block_storage/v2/test_capabilities.py @@ -0,0 +1,102 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v2 import capabilities +from openstack.tests.unit import base + +CAPABILITIES = { + "namespace": "OS::Storage::Capabilities::fake", + "vendor_name": "OpenStack", + "volume_backend_name": "lvmdriver-1", + "pool_name": "pool", + "driver_version": "2.0.0", + "storage_protocol": "iSCSI", + "display_name": "Capabilities of Cinder LVM driver", + "description": "These are volume type options", + "visibility": "public", + "replication_targets": [], + "properties": { + "compression": { + "title": "Compression", + "description": "Enables compression.", + "type": "boolean", + }, + "qos": { + "title": "QoS", + "description": "Enables QoS.", + "type": "boolean", + }, + "replication": { + "title": "Replication", + "description": "Enables replication.", + "type": "boolean", + }, + "thin_provisioning": { + "title": "Thin Provisioning", + "description": "Sets thin provisioning.", + "type": "boolean", + }, + }, +} + + +class TestCapabilites(base.TestCase): + def test_basic(self): + capabilities_resource = capabilities.Capabilities() + self.assertEqual(None, capabilities_resource.resource_key) + self.assertEqual(None, capabilities_resource.resources_key) + self.assertEqual("/capabilities", capabilities_resource.base_path) + self.assertTrue(capabilities_resource.allow_fetch) + self.assertFalse(capabilities_resource.allow_create) + self.assertFalse(capabilities_resource.allow_commit) + self.assertFalse(capabilities_resource.allow_delete) + self.assertFalse(capabilities_resource.allow_list) + + def test_make_capabilities(self): + capabilities_resource = capabilities.Capabilities(**CAPABILITIES) + self.assertEqual( + CAPABILITIES["description"], capabilities_resource.description + ) + self.assertEqual( + CAPABILITIES["display_name"], capabilities_resource.display_name + ) + self.assertEqual( + CAPABILITIES["driver_version"], + capabilities_resource.driver_version, + ) + self.assertEqual( + CAPABILITIES["namespace"], capabilities_resource.namespace + ) + self.assertEqual( + CAPABILITIES["pool_name"], capabilities_resource.pool_name + ) + self.assertEqual( + CAPABILITIES["properties"], capabilities_resource.properties + ) + self.assertEqual( + CAPABILITIES["replication_targets"], + capabilities_resource.replication_targets, + ) + self.assertEqual( + CAPABILITIES["storage_protocol"], + capabilities_resource.storage_protocol, + ) + self.assertEqual( + CAPABILITIES["vendor_name"], capabilities_resource.vendor_name + ) + self.assertEqual( + CAPABILITIES["visibility"], capabilities_resource.visibility + ) + self.assertEqual( + CAPABILITIES["volume_backend_name"], + capabilities_resource.volume_backend_name, + ) diff --git a/openstack/tests/unit/block_storage/v2/test_extension.py b/openstack/tests/unit/block_storage/v2/test_extension.py new file mode 100644 index 0000000000..b2ec046adc --- /dev/null +++ b/openstack/tests/unit/block_storage/v2/test_extension.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# # Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v2 import extension +from openstack.tests.unit import base + +EXTENSION = { + "alias": "os-hosts", + "description": "Admin-only host administration.", + "links": [], + "name": "Hosts", + "namespace": "https://docs.openstack.org/volume/ext/hosts/api/v1.1", + "updated": "2011-06-29T00:00:00+00:00", +} + + +class TestExtension(base.TestCase): + def test_basic(self): + extension_resource = extension.Extension() + self.assertEqual('extensions', extension_resource.resources_key) + self.assertEqual('/extensions', extension_resource.base_path) + self.assertFalse(extension_resource.allow_create) + self.assertFalse(extension_resource.allow_fetch) + self.assertFalse(extension_resource.allow_commit) + self.assertFalse(extension_resource.allow_delete) + self.assertTrue(extension_resource.allow_list) + + def test_make_extension(self): + extension_resource = extension.Extension(**EXTENSION) + self.assertEqual(EXTENSION['alias'], extension_resource.alias) + self.assertEqual( + EXTENSION['description'], extension_resource.description + ) + self.assertEqual(EXTENSION['links'], extension_resource.links) + self.assertEqual(EXTENSION['name'], extension_resource.name) + self.assertEqual(EXTENSION['namespace'], extension_resource.namespace) + self.assertEqual(EXTENSION['updated'], extension_resource.updated_at) diff --git a/openstack/tests/unit/block_storage/v2/test_limits.py b/openstack/tests/unit/block_storage/v2/test_limits.py new file mode 100644 index 0000000000..bb27c970fe --- /dev/null +++ b/openstack/tests/unit/block_storage/v2/test_limits.py @@ -0,0 +1,206 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v2 import limits +from openstack.tests.unit import base + +ABSOLUTE_LIMIT = { + "totalSnapshotsUsed": 1, + "maxTotalBackups": 10, + "maxTotalVolumeGigabytes": 1000, + "maxTotalSnapshots": 10, + "maxTotalBackupGigabytes": 1000, + "totalBackupGigabytesUsed": 1, + "maxTotalVolumes": 10, + "totalVolumesUsed": 2, + "totalBackupsUsed": 3, + "totalGigabytesUsed": 2, +} + +RATE_LIMIT = { + "verb": "POST", + "value": 80, + "remaining": 80, + "unit": "MINUTE", + "next-available": "2021-02-23T22:08:00Z", +} + +RATE_LIMITS = {"regex": ".*", "uri": "*", "limit": [RATE_LIMIT]} + +LIMIT = {"rate": [RATE_LIMITS], "absolute": ABSOLUTE_LIMIT} + + +class TestAbsoluteLimit(base.TestCase): + def test_basic(self): + limit_resource = limits.AbsoluteLimit() + self.assertIsNone(limit_resource.resource_key) + self.assertIsNone(limit_resource.resources_key) + self.assertEqual('', limit_resource.base_path) + self.assertFalse(limit_resource.allow_create) + self.assertFalse(limit_resource.allow_fetch) + self.assertFalse(limit_resource.allow_delete) + self.assertFalse(limit_resource.allow_commit) + self.assertFalse(limit_resource.allow_list) + + def test_make_absolute_limit(self): + limit_resource = limits.AbsoluteLimit(**ABSOLUTE_LIMIT) + self.assertEqual( + ABSOLUTE_LIMIT['totalSnapshotsUsed'], + limit_resource.total_snapshots_used, + ) + self.assertEqual( + ABSOLUTE_LIMIT['maxTotalBackups'], limit_resource.max_total_backups + ) + self.assertEqual( + ABSOLUTE_LIMIT['maxTotalVolumeGigabytes'], + limit_resource.max_total_volume_gigabytes, + ) + self.assertEqual( + ABSOLUTE_LIMIT['maxTotalSnapshots'], + limit_resource.max_total_snapshots, + ) + self.assertEqual( + ABSOLUTE_LIMIT['maxTotalBackupGigabytes'], + limit_resource.max_total_backup_gigabytes, + ) + self.assertEqual( + ABSOLUTE_LIMIT['totalBackupGigabytesUsed'], + limit_resource.total_backup_gigabytes_used, + ) + self.assertEqual( + ABSOLUTE_LIMIT['maxTotalVolumes'], limit_resource.max_total_volumes + ) + self.assertEqual( + ABSOLUTE_LIMIT['totalVolumesUsed'], + limit_resource.total_volumes_used, + ) + self.assertEqual( + ABSOLUTE_LIMIT['totalBackupsUsed'], + limit_resource.total_backups_used, + ) + self.assertEqual( + ABSOLUTE_LIMIT['totalGigabytesUsed'], + limit_resource.total_gigabytes_used, + ) + + +class TestRateLimit(base.TestCase): + def test_basic(self): + limit_resource = limits.RateLimit() + self.assertIsNone(limit_resource.resource_key) + self.assertIsNone(limit_resource.resources_key) + self.assertEqual('', limit_resource.base_path) + self.assertFalse(limit_resource.allow_create) + self.assertFalse(limit_resource.allow_fetch) + self.assertFalse(limit_resource.allow_delete) + self.assertFalse(limit_resource.allow_commit) + self.assertFalse(limit_resource.allow_list) + + def test_make_rate_limit(self): + limit_resource = limits.RateLimit(**RATE_LIMIT) + self.assertEqual(RATE_LIMIT['verb'], limit_resource.verb) + self.assertEqual(RATE_LIMIT['value'], limit_resource.value) + self.assertEqual(RATE_LIMIT['remaining'], limit_resource.remaining) + self.assertEqual(RATE_LIMIT['unit'], limit_resource.unit) + self.assertEqual( + RATE_LIMIT['next-available'], limit_resource.next_available + ) + + +class TestRateLimits(base.TestCase): + def test_basic(self): + limit_resource = limits.RateLimits() + self.assertIsNone(limit_resource.resource_key) + self.assertIsNone(limit_resource.resources_key) + self.assertEqual('', limit_resource.base_path) + self.assertFalse(limit_resource.allow_create) + self.assertFalse(limit_resource.allow_fetch) + self.assertFalse(limit_resource.allow_delete) + self.assertFalse(limit_resource.allow_commit) + self.assertFalse(limit_resource.allow_list) + + def _test_rate_limit(self, expected, actual): + self.assertEqual(expected[0]['verb'], actual[0].verb) + self.assertEqual(expected[0]['value'], actual[0].value) + self.assertEqual(expected[0]['remaining'], actual[0].remaining) + self.assertEqual(expected[0]['unit'], actual[0].unit) + self.assertEqual( + expected[0]['next-available'], actual[0].next_available + ) + + def test_make_rate_limits(self): + limit_resource = limits.RateLimits(**RATE_LIMITS) + self.assertEqual(RATE_LIMITS['regex'], limit_resource.regex) + self.assertEqual(RATE_LIMITS['uri'], limit_resource.uri) + self._test_rate_limit(RATE_LIMITS['limit'], limit_resource.limits) + + +class TestLimit(base.TestCase): + def test_basic(self): + limit_resource = limits.Limits() + self.assertEqual('limits', limit_resource.resource_key) + self.assertEqual('/limits', limit_resource.base_path) + self.assertTrue(limit_resource.allow_fetch) + self.assertFalse(limit_resource.allow_create) + self.assertFalse(limit_resource.allow_commit) + self.assertFalse(limit_resource.allow_delete) + self.assertFalse(limit_resource.allow_list) + + def _test_absolute_limit(self, expected, actual): + self.assertEqual( + expected['totalSnapshotsUsed'], actual.total_snapshots_used + ) + self.assertEqual(expected['maxTotalBackups'], actual.max_total_backups) + self.assertEqual( + expected['maxTotalVolumeGigabytes'], + actual.max_total_volume_gigabytes, + ) + self.assertEqual( + expected['maxTotalSnapshots'], actual.max_total_snapshots + ) + self.assertEqual( + expected['maxTotalBackupGigabytes'], + actual.max_total_backup_gigabytes, + ) + self.assertEqual( + expected['totalBackupGigabytesUsed'], + actual.total_backup_gigabytes_used, + ) + self.assertEqual(expected['maxTotalVolumes'], actual.max_total_volumes) + self.assertEqual( + expected['totalVolumesUsed'], actual.total_volumes_used + ) + self.assertEqual( + expected['totalBackupsUsed'], actual.total_backups_used + ) + self.assertEqual( + expected['totalGigabytesUsed'], actual.total_gigabytes_used + ) + + def _test_rate_limit(self, expected, actual): + self.assertEqual(expected[0]['verb'], actual[0].verb) + self.assertEqual(expected[0]['value'], actual[0].value) + self.assertEqual(expected[0]['remaining'], actual[0].remaining) + self.assertEqual(expected[0]['unit'], actual[0].unit) + self.assertEqual( + expected[0]['next-available'], actual[0].next_available + ) + + def _test_rate_limits(self, expected, actual): + self.assertEqual(expected[0]['regex'], actual[0].regex) + self.assertEqual(expected[0]['uri'], actual[0].uri) + self._test_rate_limit(expected[0]['limit'], actual[0].limits) + + def test_make_limit(self): + limit_resource = limits.Limits(**LIMIT) + self._test_rate_limits(LIMIT['rate'], limit_resource.rate) + self._test_absolute_limit(LIMIT['absolute'], limit_resource.absolute) diff --git a/openstack/tests/unit/block_storage/v2/test_proxy.py b/openstack/tests/unit/block_storage/v2/test_proxy.py new file mode 100644 index 0000000000..c97dd7044d --- /dev/null +++ b/openstack/tests/unit/block_storage/v2/test_proxy.py @@ -0,0 +1,697 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock +import warnings + +from openstack.block_storage.v2 import _proxy +from openstack.block_storage.v2 import backup +from openstack.block_storage.v2 import capabilities +from openstack.block_storage.v2 import limits +from openstack.block_storage.v2 import quota_class_set +from openstack.block_storage.v2 import quota_set +from openstack.block_storage.v2 import service +from openstack.block_storage.v2 import snapshot +from openstack.block_storage.v2 import stats +from openstack.block_storage.v2 import type +from openstack.block_storage.v2 import volume +from openstack.identity.v3 import project +from openstack import proxy as proxy_base +from openstack.tests.unit import test_proxy_base +from openstack import warnings as os_warnings + + +class TestVolumeProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + +class TestVolume(TestVolumeProxy): + def test_volume_get(self): + self.verify_get(self.proxy.get_volume, volume.Volume) + + def test_volume_find(self): + self.verify_find( + self.proxy.find_volume, + volume.Volume, + method_kwargs={'all_projects': True}, + expected_kwargs={ + 'list_base_path': '/volumes/detail', + 'all_projects': True, + }, + ) + + def test_volumes_detailed(self): + self.verify_list( + self.proxy.volumes, + volume.Volume, + method_kwargs={"details": True, "all_projects": True}, + expected_kwargs={ + "base_path": "/volumes/detail", + "all_projects": True, + }, + ) + + def test_volumes_not_detailed(self): + self.verify_list( + self.proxy.volumes, + volume.Volume, + method_kwargs={"details": False, "all_projects": True}, + expected_kwargs={"all_projects": True}, + ) + + def test_volume_create_attrs(self): + self.verify_create(self.proxy.create_volume, volume.Volume) + + def test_volume_delete(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.delete", + self.proxy.delete_volume, + method_args=["value"], + expected_args=[self.proxy], + expected_kwargs={"params": {"cascade": False}}, + ) + + def test_volume_delete_force(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.force_delete", + self.proxy.delete_volume, + method_args=["value"], + method_kwargs={"force": True}, + expected_args=[self.proxy], + ) + + def test_get_volume_metadata(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.fetch_metadata", + self.proxy.get_volume_metadata, + method_args=["value"], + expected_args=[self.proxy], + expected_result=volume.Volume(id="value", metadata={}), + ) + + def test_set_volume_metadata(self): + kwargs = {"a": "1", "b": "2"} + id = "an_id" + self._verify( + "openstack.block_storage.v2.volume.Volume.set_metadata", + self.proxy.set_volume_metadata, + method_args=[id], + method_kwargs=kwargs, + method_result=volume.Volume.existing(id=id, metadata=kwargs), + expected_args=[self.proxy], + expected_kwargs={'metadata': kwargs}, + expected_result=volume.Volume.existing(id=id, metadata=kwargs), + ) + + def test_delete_volume_metadata(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.delete_metadata_item", + self.proxy.delete_volume_metadata, + expected_result=None, + method_args=["value", ["key"]], + expected_args=[self.proxy, "key"], + ) + + def test_backend_pools(self): + self.verify_list(self.proxy.backend_pools, stats.Pools) + + def test_volume_wait_for(self): + value = volume.Volume(id='1234') + self.verify_wait_for_status( + self.proxy.wait_for_status, + method_args=[value], + expected_args=[ + self.proxy, + value, + 'available', + ['error'], + 2, + None, + 'status', + None, + ], + ) + + +class TestVolumeActions(TestVolumeProxy): + def test_volume_extend(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.extend", + self.proxy.extend_volume, + method_args=["value", "new-size"], + expected_args=[self.proxy, "new-size"], + ) + + def test_volume_set_readonly_no_argument(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.set_readonly", + self.proxy.set_volume_readonly, + method_args=["value"], + expected_args=[self.proxy, True], + ) + + def test_volume_set_readonly_false(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.set_readonly", + self.proxy.set_volume_readonly, + method_args=["value", False], + expected_args=[self.proxy, False], + ) + + def test_volume_set_bootable(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.set_bootable_status", + self.proxy.set_volume_bootable_status, + method_args=["value", True], + expected_args=[self.proxy, True], + ) + + def test_volume_reset_status(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.reset_status", + self.proxy.reset_volume_status, + method_args=["value", '1', '2', '3'], + expected_args=[self.proxy, '1', '2', '3'], + ) + + def test_set_volume_image_metadata(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.set_image_metadata", + self.proxy.set_volume_image_metadata, + method_args=["value"], + method_kwargs={'foo': 'bar'}, + expected_args=[self.proxy], + expected_kwargs={'metadata': {'foo': 'bar'}}, + ) + + def test_delete_volume_image_metadata(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.delete_image_metadata", + self.proxy.delete_volume_image_metadata, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_delete_volume_image_metadata__with_keys(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.delete_image_metadata_item", + self.proxy.delete_volume_image_metadata, + method_args=["value", ['foo']], + expected_args=[self.proxy, 'foo'], + ) + + def test_attach_instance(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.attach", + self.proxy.attach_volume, + method_args=["value", '1'], + method_kwargs={'instance': '2'}, + expected_args=[self.proxy, '1', '2', None], + ) + + def test_attach_host(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.attach", + self.proxy.attach_volume, + method_args=["value", '1'], + method_kwargs={'host_name': '3'}, + expected_args=[self.proxy, '1', None, '3'], + ) + + def test_detach_defaults(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.detach", + self.proxy.detach_volume, + method_args=["value", '1'], + expected_args=[self.proxy, '1', False, None], + ) + + def test_detach_force(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.detach", + self.proxy.detach_volume, + method_args=["value", '1', True, {'a': 'b'}], + expected_args=[self.proxy, '1', True, {'a': 'b'}], + ) + + def test_unmanage(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.unmanage", + self.proxy.unmanage_volume, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_migrate_default(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.migrate", + self.proxy.migrate_volume, + method_args=["value", '1'], + expected_args=[self.proxy, '1', False, False], + ) + + def test_migrate_nondefault(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.migrate", + self.proxy.migrate_volume, + method_args=["value", '1', True, True], + expected_args=[self.proxy, '1', True, True], + ) + + def test_complete_migration(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.complete_migration", + self.proxy.complete_volume_migration, + method_args=["value", '1'], + expected_args=[self.proxy, "1", False], + ) + + def test_complete_migration_error(self): + self._verify( + "openstack.block_storage.v2.volume.Volume.complete_migration", + self.proxy.complete_volume_migration, + method_args=["value", "1", True], + expected_args=[self.proxy, "1", True], + ) + + +class TestBackup(TestVolumeProxy): + def test_backups_detailed(self): + self.verify_list( + self.proxy.backups, + backup.Backup, + method_kwargs={"details": True, "query": 1}, + expected_kwargs={"query": 1, "base_path": "/backups/detail"}, + ) + + def test_backups_not_detailed(self): + self.verify_list( + self.proxy.backups, + backup.Backup, + method_kwargs={"details": False, "query": 1}, + expected_kwargs={"query": 1}, + ) + + def test_backup_get(self): + self.verify_get(self.proxy.get_backup, backup.Backup) + + def test_backup_find(self): + self.verify_find( + self.proxy.find_backup, + backup.Backup, + expected_kwargs={'list_base_path': '/backups/detail'}, + ) + + def test_backup_delete(self): + self.verify_delete(self.proxy.delete_backup, backup.Backup, False) + + def test_backup_delete_ignore(self): + self.verify_delete(self.proxy.delete_backup, backup.Backup, True) + + def test_backup_delete_force(self): + self._verify( + "openstack.block_storage.v2.backup.Backup.force_delete", + self.proxy.delete_backup, + method_args=["value"], + method_kwargs={"force": True}, + expected_args=[self.proxy], + ) + + def test_backup_create_attrs(self): + self.verify_create(self.proxy.create_backup, backup.Backup) + + def test_backup_restore(self): + self._verify( + 'openstack.block_storage.v2.backup.Backup.restore', + self.proxy.restore_backup, + method_args=['volume_id'], + method_kwargs={'volume_id': 'vol_id', 'name': 'name'}, + expected_args=[self.proxy], + expected_kwargs={'volume_id': 'vol_id', 'name': 'name'}, + ) + + def test_backup_reset_status(self): + self._verify( + "openstack.block_storage.v2.backup.Backup.reset_status", + self.proxy.reset_backup_status, + method_args=["value", "new_status"], + expected_args=[self.proxy, "new_status"], + ) + + +class TestLimit(TestVolumeProxy): + def test_limits_get(self): + self.verify_get( + self.proxy.get_limits, + limits.Limits, + method_args=[], + expected_kwargs={'requires_id': False}, + ) + + +class TestCapabilities(TestVolumeProxy): + def test_capabilites_get(self): + self.verify_get(self.proxy.get_capabilities, capabilities.Capabilities) + + +class TestSnapshot(TestVolumeProxy): + def test_snapshot_get(self): + self.verify_get(self.proxy.get_snapshot, snapshot.Snapshot) + + def test_snapshot_find(self): + self.verify_find( + self.proxy.find_snapshot, + snapshot.Snapshot, + method_kwargs={'all_projects': True}, + expected_kwargs={ + 'list_base_path': '/snapshots/detail', + 'all_projects': True, + }, + ) + + def test_snapshots_detailed(self): + self.verify_list( + self.proxy.snapshots, + snapshot.SnapshotDetail, + method_kwargs={"details": True, "all_projects": True}, + expected_kwargs={"all_projects": True}, + ) + + def test_snapshots_not_detailed(self): + self.verify_list( + self.proxy.snapshots, + snapshot.Snapshot, + method_kwargs={"details": False, "all_projects": True}, + expected_kwargs={"all_projects": 1}, + ) + + def test_snapshot_create_attrs(self): + self.verify_create(self.proxy.create_snapshot, snapshot.Snapshot) + + def test_snapshot_update(self): + self.verify_update(self.proxy.update_snapshot, snapshot.Snapshot) + + def test_snapshot_delete(self): + self.verify_delete( + self.proxy.delete_snapshot, snapshot.Snapshot, False + ) + + def test_snapshot_delete_ignore(self): + self.verify_delete(self.proxy.delete_snapshot, snapshot.Snapshot, True) + + def test_snapshot_reset_status(self): + self._verify( + "openstack.block_storage.v2.snapshot.Snapshot.reset_status", + self.proxy.reset_snapshot_status, + method_args=["value", "new_status"], + expected_args=[self.proxy, "new_status"], + ) + + def test_snapshot_manage(self): + kwargs = { + "volume_id": "fake_id", + "remote_source": "fake_volume", + "snapshot_name": "fake_snap", + "description": "test_snap", + "property": {"k": "v"}, + } + self._verify( + "openstack.block_storage.v2.snapshot.Snapshot.manage", + self.proxy.manage_snapshot, + method_kwargs=kwargs, + method_result=snapshot.Snapshot(id="fake_id"), + expected_args=[self.proxy], + expected_kwargs=kwargs, + expected_result=snapshot.Snapshot(id="fake_id"), + ) + + def test_snapshot_unmanage(self): + self._verify( + "openstack.block_storage.v2.snapshot.Snapshot.unmanage", + self.proxy.unmanage_snapshot, + method_args=["value"], + expected_args=[self.proxy], + expected_result=None, + ) + + def test_get_snapshot_metadata(self): + self._verify( + "openstack.block_storage.v2.snapshot.Snapshot.fetch_metadata", + self.proxy.get_snapshot_metadata, + method_args=["value"], + expected_args=[self.proxy], + expected_result=snapshot.Snapshot(id="value", metadata={}), + ) + + def test_set_snapshot_metadata(self): + kwargs = {"a": "1", "b": "2"} + id = "an_id" + self._verify( + "openstack.block_storage.v2.snapshot.Snapshot.set_metadata", + self.proxy.set_snapshot_metadata, + method_args=[id], + method_kwargs=kwargs, + method_result=snapshot.Snapshot.existing(id=id, metadata=kwargs), + expected_args=[self.proxy], + expected_kwargs={'metadata': kwargs}, + expected_result=snapshot.Snapshot.existing(id=id, metadata=kwargs), + ) + + def test_delete_snapshot_metadata(self): + self._verify( + "openstack.block_storage.v2.snapshot.Snapshot." + "delete_metadata_item", + self.proxy.delete_snapshot_metadata, + expected_result=None, + method_args=["value", ["key"]], + expected_args=[self.proxy, "key"], + ) + + +class TestType(TestVolumeProxy): + def test_type_get(self): + self.verify_get(self.proxy.get_type, type.Type) + + def test_type_find(self): + self.verify_find(self.proxy.find_type, type.Type) + + def test_types(self): + self.verify_list(self.proxy.types, type.Type) + + def test_type_create_attrs(self): + self.verify_create(self.proxy.create_type, type.Type) + + def test_type_delete(self): + self.verify_delete(self.proxy.delete_type, type.Type, False) + + def test_type_delete_ignore(self): + self.verify_delete(self.proxy.delete_type, type.Type, True) + + def test_type_get_private_access(self): + self._verify( + "openstack.block_storage.v2.type.Type.get_private_access", + self.proxy.get_type_access, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_type_add_private_access(self): + self._verify( + "openstack.block_storage.v2.type.Type.add_private_access", + self.proxy.add_type_access, + method_args=["value", "a"], + expected_args=[self.proxy, "a"], + ) + + def test_type_remove_private_access(self): + self._verify( + "openstack.block_storage.v2.type.Type.remove_private_access", + self.proxy.remove_type_access, + method_args=["value", "a"], + expected_args=[self.proxy, "a"], + ) + + +class TestQuotaClassSet(TestVolumeProxy): + def test_quota_class_set_get(self): + self.verify_get( + self.proxy.get_quota_class_set, quota_class_set.QuotaClassSet + ) + + def test_quota_class_set_update(self): + self.verify_update( + self.proxy.update_quota_class_set, + quota_class_set.QuotaClassSet, + False, + ) + + +class TestQuotaSet(TestVolumeProxy): + def test_quota_set_get(self): + self._verify( + 'openstack.resource.Resource.fetch', + self.proxy.get_quota_set, + method_args=['prj'], + expected_args=[ + self.proxy, + False, + None, + None, + False, + ], + expected_kwargs={ + 'microversion': None, + 'resource_response_key': None, + 'usage': False, + }, + method_result=quota_set.QuotaSet(), + expected_result=quota_set.QuotaSet(), + ) + + def test_quota_set_get_query(self): + self._verify( + 'openstack.resource.Resource.fetch', + self.proxy.get_quota_set, + method_args=['prj'], + method_kwargs={'usage': True, 'user_id': 'uid'}, + expected_args=[ + self.proxy, + False, + None, + None, + False, + ], + expected_kwargs={ + 'microversion': None, + 'resource_response_key': None, + 'usage': True, + 'user_id': 'uid', + }, + ) + + def test_quota_set_get_defaults(self): + self._verify( + 'openstack.resource.Resource.fetch', + self.proxy.get_quota_set_defaults, + method_args=['prj'], + expected_args=[ + self.proxy, + False, + '/os-quota-sets/defaults', + None, + False, + ], + expected_kwargs={ + 'microversion': None, + 'resource_response_key': None, + }, + ) + + def test_quota_set_reset(self): + self._verify( + 'openstack.resource.Resource.delete', + self.proxy.revert_quota_set, + method_args=['prj'], + method_kwargs={'user_id': 'uid'}, + expected_args=[self.proxy], + expected_kwargs={'user_id': 'uid'}, + ) + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_quota_set_update(self, mock_get): + fake_project = project.Project(id='prj') + mock_get.side_effect = [fake_project] + + self._verify( + 'openstack.proxy.Proxy._update', + self.proxy.update_quota_set, + method_args=['prj'], + method_kwargs={'volumes': 123}, + expected_args=[quota_set.QuotaSet, None], + expected_kwargs={'project_id': 'prj', 'volumes': 123}, + ) + mock_get.assert_called_once_with(project.Project, 'prj') + + @mock.patch.object(proxy_base.Proxy, "_get_resource") + def test_quota_set_update__legacy(self, mock_get): + fake_quota_set = quota_set.QuotaSet(project_id='prj') + mock_get.side_effect = [fake_quota_set] + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + + self._verify( + 'openstack.resource.Resource.commit', + self.proxy.update_quota_set, + method_args=[fake_quota_set], + method_kwargs={'ram': 123}, + expected_args=[self.proxy], + expected_kwargs={}, + ) + + self.assertEqual(1, len(w)) + self.assertEqual( + os_warnings.RemovedInSDK50Warning, + w[-1].category, + ) + self.assertIn( + "The signature of 'update_quota_set' has changed ", + str(w[-1]), + ) + + +class TestService(TestVolumeProxy): + def test_services(self): + self.verify_list(self.proxy.services, service.Service) + + def test_enable_service(self): + self._verify( + 'openstack.block_storage.v2.service.Service.enable', + self.proxy.enable_service, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_disable_service(self): + self._verify( + 'openstack.block_storage.v2.service.Service.disable', + self.proxy.disable_service, + method_args=["value"], + expected_kwargs={"reason": None}, + expected_args=[self.proxy], + ) + + def test_thaw_service(self): + self._verify( + 'openstack.block_storage.v2.service.Service.thaw', + self.proxy.thaw_service, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_freeze_service(self): + self._verify( + 'openstack.block_storage.v2.service.Service.freeze', + self.proxy.freeze_service, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_failover_service(self): + self._verify( + 'openstack.block_storage.v2.service.Service.failover', + self.proxy.failover_service, + method_args=["value"], + expected_args=[self.proxy], + expected_kwargs={"backend_id": None}, + ) diff --git a/openstack/tests/unit/block_storage/v2/test_service.py b/openstack/tests/unit/block_storage/v2/test_service.py new file mode 100644 index 0000000000..dc9dc2eb7a --- /dev/null +++ b/openstack/tests/unit/block_storage/v2/test_service.py @@ -0,0 +1,140 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.block_storage.v2 import service +from openstack.tests.unit import base + +EXAMPLE = { + "binary": "cinder-scheduler", + "disabled_reason": None, + "host": "devstack", + "state": "up", + "status": "enabled", + "updated_at": "2017-06-29T05:50:35.000000", + "zone": "nova", +} + + +class TestService(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None # nothing uses this + self.resp.json = mock.Mock(return_value={'service': {}}) + self.resp.status_code = 200 + self.resp.headers = {} + self.sess = mock.Mock() + self.sess.put = mock.Mock(return_value=self.resp) + + def test_basic(self): + sot = service.Service() + self.assertIsNone(sot.resource_key) + self.assertEqual('services', sot.resources_key) + self.assertEqual('/os-services', sot.base_path) + self.assertFalse(sot.allow_commit) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_delete) + + self.assertDictEqual( + { + 'binary': 'binary', + 'host': 'host', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = service.Service(**EXAMPLE) + self.assertEqual(EXAMPLE['binary'], sot.binary) + self.assertEqual(EXAMPLE['binary'], sot.name) + self.assertEqual(EXAMPLE['disabled_reason'], sot.disabled_reason) + self.assertEqual(EXAMPLE['host'], sot.host) + self.assertEqual(EXAMPLE['state'], sot.state) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['zone'], sot.availability_zone) + + def test_enable(self): + sot = service.Service(**EXAMPLE) + + res = sot.enable(self.sess) + self.assertIsNotNone(res) + + url = 'os-services/enable' + body = { + 'binary': 'cinder-scheduler', + 'host': 'devstack', + } + self.sess.put.assert_called_with(url, json=body) + + def test_disable(self): + sot = service.Service(**EXAMPLE) + + res = sot.disable(self.sess) + self.assertIsNotNone(res) + + url = 'os-services/disable' + body = { + 'binary': 'cinder-scheduler', + 'host': 'devstack', + } + self.sess.put.assert_called_with(url, json=body) + + def test_disable__with_reason(self): + sot = service.Service(**EXAMPLE) + reason = 'fencing' + + res = sot.disable(self.sess, reason=reason) + + self.assertIsNotNone(res) + + url = 'os-services/disable-log-reason' + body = { + 'binary': 'cinder-scheduler', + 'host': 'devstack', + 'disabled_reason': reason, + } + self.sess.put.assert_called_with(url, json=body) + + def test_thaw(self): + sot = service.Service(**EXAMPLE) + + res = sot.thaw(self.sess) + self.assertIsNotNone(res) + + url = 'os-services/thaw' + body = {'host': 'devstack'} + self.sess.put.assert_called_with(url, json=body) + + def test_freeze(self): + sot = service.Service(**EXAMPLE) + + res = sot.freeze(self.sess) + self.assertIsNotNone(res) + + url = 'os-services/freeze' + body = {'host': 'devstack'} + self.sess.put.assert_called_with(url, json=body) + + def test_failover(self): + sot = service.Service(**EXAMPLE) + + res = sot.failover(self.sess) + self.assertIsNotNone(res) + + url = 'os-services/failover_host' + body = {'host': 'devstack'} + self.sess.put.assert_called_with(url, json=body) diff --git a/openstack/tests/unit/block_storage/v2/test_snapshot.py b/openstack/tests/unit/block_storage/v2/test_snapshot.py new file mode 100644 index 0000000000..590c7d38be --- /dev/null +++ b/openstack/tests/unit/block_storage/v2/test_snapshot.py @@ -0,0 +1,140 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v2 import snapshot +from openstack.tests.unit import base + + +FAKE_ID = "ffa9bc5e-1172-4021-acaf-cdcd78a9584d" +FAKE_VOLUME_ID = "5aa119a8-d25b-45a7-8d1b-88e127885635" + +SNAPSHOT = { + "status": "creating", + "description": "Daily backup", + "created_at": "2015-03-09T12:14:57.233772", + "updated_at": None, + "metadata": {}, + "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", + "size": 1, + "id": FAKE_ID, + "name": "snap-001", + "force": "true", +} + +DETAILS = { + "os-extended-snapshot-attributes:progress": "100%", + "os-extended-snapshot-attributes:project_id": "0c2eba2c5af04d3f9e9d0d410b371fde", # noqa: E501 +} + +DETAILED_SNAPSHOT = SNAPSHOT.copy() +DETAILED_SNAPSHOT.update(**DETAILS) + + +class TestSnapshot(base.TestCase): + def test_basic(self): + sot = snapshot.Snapshot(SNAPSHOT) + self.assertEqual("snapshot", sot.resource_key) + self.assertEqual("snapshots", sot.resources_key) + self.assertEqual("/snapshots", sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + "name": "name", + "status": "status", + "all_projects": "all_tenants", + "volume_id": "volume_id", + "limit": "limit", + "marker": "marker", + }, + sot._query_mapping._mapping, + ) + + def test_create_basic(self): + sot = snapshot.Snapshot(**SNAPSHOT) + self.assertEqual(SNAPSHOT["id"], sot.id) + self.assertEqual(SNAPSHOT["status"], sot.status) + self.assertEqual(SNAPSHOT["created_at"], sot.created_at) + self.assertEqual(SNAPSHOT["updated_at"], sot.updated_at) + self.assertEqual(SNAPSHOT["metadata"], sot.metadata) + self.assertEqual(SNAPSHOT["volume_id"], sot.volume_id) + self.assertEqual(SNAPSHOT["size"], sot.size) + self.assertEqual(SNAPSHOT["name"], sot.name) + self.assertTrue(sot.is_forced) + + +class TestSnapshotActions(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.headers = {} + self.resp.status_code = 202 + + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.get = mock.Mock() + self.sess.post = mock.Mock(return_value=self.resp) + + def test_reset_status(self): + sot = snapshot.Snapshot(**SNAPSHOT) + + self.assertIsNone(sot.reset_status(self.sess, 'new_status')) + + url = f'snapshots/{FAKE_ID}/action' + body = {'os-reset_status': {'status': 'new_status'}} + self.sess.post.assert_called_with(url, json=body) + + def test_manage(self): + resp = mock.Mock() + resp.body = {'snapshot': copy.deepcopy(SNAPSHOT)} + resp.json = mock.Mock(return_value=resp.body) + resp.headers = {} + resp.status_code = 202 + + self.sess.post = mock.Mock(return_value=resp) + + sot = snapshot.Snapshot.manage( + self.sess, volume_id=FAKE_VOLUME_ID, ref=FAKE_ID + ) + + self.assertIsNotNone(sot) + + url = '/os-snapshot-manage' + body = { + 'snapshot': { + 'volume_id': FAKE_VOLUME_ID, + 'ref': FAKE_ID, + 'name': None, + 'description': None, + 'metadata': None, + } + } + self.sess.post.assert_called_with(url, json=body) + + def test_unmanage(self): + sot = snapshot.Snapshot(**SNAPSHOT) + + self.assertIsNone(sot.unmanage(self.sess)) + + url = f'snapshots/{FAKE_ID}/action' + body = {'os-unmanage': None} + self.sess.post.assert_called_with(url, json=body) diff --git a/openstack/tests/unit/block_storage/v2/test_stats.py b/openstack/tests/unit/block_storage/v2/test_stats.py new file mode 100644 index 0000000000..0de7970154 --- /dev/null +++ b/openstack/tests/unit/block_storage/v2/test_stats.py @@ -0,0 +1,47 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v2 import stats +from openstack.tests.unit import base + + +POOLS = { + "name": "pool1", + "capabilities": { + "updated": "2014-10-28T00=00=00-00=00", + "total_capacity": 1024, + "free_capacity": 100, + "volume_backend_name": "pool1", + "reserved_percentage": "0", + "driver_version": "1.0.0", + "storage_protocol": "iSCSI", + "QoS_support": "false", + }, +} + + +class TestBackendPools(base.TestCase): + def setUp(self): + super().setUp() + + def test_basic(self): + sot = stats.Pools(POOLS) + self.assertEqual("", sot.resource_key) + self.assertEqual("pools", sot.resources_key) + self.assertEqual( + "/scheduler-stats/get_pools?detail=True", sot.base_path + ) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_commit) diff --git a/openstack/tests/unit/block_storage/v2/test_transfer.py b/openstack/tests/unit/block_storage/v2/test_transfer.py new file mode 100644 index 0000000000..01f603b7de --- /dev/null +++ b/openstack/tests/unit/block_storage/v2/test_transfer.py @@ -0,0 +1,86 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v2 import transfer +from openstack import resource +from openstack.tests.unit import base + + +FAKE_ID = "09d18b36-9e8d-4438-a4da-3f5eff5e1130" +FAKE_VOL_ID = "390de1bc-19d1-41e7-ba67-c492bb36cae5" +FAKE_VOL_NAME = "test-volume" +FAKE_TRANSFER = "7d048960-7c3f-4bf0-952f-4312fdea1dec" +FAKE_AUTH_KEY = "95bc670c0068821d" + +TRANSFER = { + "auth_key": FAKE_AUTH_KEY, + "created_at": "2023-06-27T08:47:23.035010", + "id": FAKE_ID, + "name": FAKE_VOL_NAME, + "volume_id": FAKE_VOL_ID, +} + + +class TestTransfer(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None # nothing uses this + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.headers = {} + self.resp.status_code = 202 + + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.post = mock.Mock(return_value=self.resp) + self.sess.default_microversion = "3.55" + + def test_basic(self): + sot = transfer.Transfer() + self.assertEqual("transfer", sot.resource_key) + self.assertEqual("transfers", sot.resources_key) + self.assertEqual("/os-volume-transfer", sot.base_path) + self.assertTrue(sot.allow_create) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = transfer.Transfer(**TRANSFER) + self.assertEqual(TRANSFER["auth_key"], sot.auth_key) + self.assertEqual(TRANSFER["created_at"], sot.created_at) + self.assertEqual(TRANSFER["id"], sot.id) + self.assertEqual(TRANSFER["name"], sot.name) + self.assertEqual(TRANSFER["volume_id"], sot.volume_id) + + @mock.patch.object(resource.Resource, '_translate_response') + def test_accept(self, mock_translate): + sot = transfer.Transfer() + sot.id = FAKE_TRANSFER + + sot.accept(self.sess, auth_key=FAKE_AUTH_KEY) + self.sess.post.assert_called_with( + f'os-volume-transfer/{FAKE_TRANSFER}/accept', + json={ + 'accept': { + 'auth_key': FAKE_AUTH_KEY, + } + }, + ) diff --git a/openstack/tests/unit/block_storage/v2/test_type.py b/openstack/tests/unit/block_storage/v2/test_type.py new file mode 100644 index 0000000000..5271f2c359 --- /dev/null +++ b/openstack/tests/unit/block_storage/v2/test_type.py @@ -0,0 +1,94 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from keystoneauth1 import adapter + + +from openstack.block_storage.v2 import type +from openstack.tests.unit import base + + +FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" +TYPE = {"extra_specs": {"capabilities": "gpu"}, "id": FAKE_ID, "name": "SSD"} + + +class TestType(base.TestCase): + def setUp(self): + super().setUp() + self.extra_specs_result = {"extra_specs": {"go": "cubs", "boo": "sox"}} + self.resp = mock.Mock() + self.resp.body = None + self.resp.status_code = 200 + self.resp.json = mock.Mock(return_value=self.resp.body) + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.post = mock.Mock(return_value=self.resp) + self.sess._get_connection = mock.Mock(return_value=self.cloud) + + def test_basic(self): + sot = type.Type(**TYPE) + self.assertEqual("volume_type", sot.resource_key) + self.assertEqual("volume_types", sot.resources_key) + self.assertEqual("/types", sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_commit) + + def test_new(self): + sot = type.Type.new(id=FAKE_ID) + self.assertEqual(FAKE_ID, sot.id) + + def test_create(self): + sot = type.Type(**TYPE) + self.assertEqual(TYPE["id"], sot.id) + self.assertEqual(TYPE["extra_specs"], sot.extra_specs) + self.assertEqual(TYPE["name"], sot.name) + + def test_get_private_access(self): + sot = type.Type(**TYPE) + + response = mock.Mock() + response.status_code = 200 + response.body = { + "volume_type_access": [{"project_id": "a", "volume_type_id": "b"}] + } + response.json = mock.Mock(return_value=response.body) + self.sess.get = mock.Mock(return_value=response) + + self.assertEqual( + response.body["volume_type_access"], + sot.get_private_access(self.sess), + ) + + self.sess.get.assert_called_with( + f"types/{sot.id}/os-volume-type-access" + ) + + def test_add_private_access(self): + sot = type.Type(**TYPE) + + self.assertIsNone(sot.add_private_access(self.sess, "a")) + + url = f"types/{sot.id}/action" + body = {"addProjectAccess": {"project": "a"}} + self.sess.post.assert_called_with(url, json=body) + + def test_remove_private_access(self): + sot = type.Type(**TYPE) + + self.assertIsNone(sot.remove_private_access(self.sess, "a")) + + url = f"types/{sot.id}/action" + body = {"removeProjectAccess": {"project": "a"}} + self.sess.post.assert_called_with(url, json=body) diff --git a/openstack/tests/unit/block_storage/v2/test_volume.py b/openstack/tests/unit/block_storage/v2/test_volume.py new file mode 100644 index 0000000000..2672ebf366 --- /dev/null +++ b/openstack/tests/unit/block_storage/v2/test_volume.py @@ -0,0 +1,374 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v2 import volume +from openstack.tests.unit import base + +FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" +IMAGE_METADATA = { + 'container_format': 'bare', + 'min_ram': '64', + 'disk_format': 'qcow2', + 'image_name': 'TestVM', + 'image_id': '625d4f2c-cf67-4af3-afb6-c7220f766947', + 'checksum': '64d7c1cd2b6f60c92c14662941cb7913', + 'min_disk': '0', + 'size': '13167616', +} + +VOLUME = { + "status": "creating", + "name": "my_volume", + "attachments": [], + "availability_zone": "nova", + "bootable": "false", + "created_at": "2015-03-09T12:14:57.233772", + "updated_at": None, + "description": "something", + "volume_type": "some_type", + "snapshot_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", + "source_volid": None, + "imageRef": "some_image", + "metadata": {}, + "volume_image_metadata": IMAGE_METADATA, + "id": FAKE_ID, + "size": 10, + "os-vol-host-attr:host": "127.0.0.1", + "os-vol-tenant-attr:tenant_id": "some tenant", + "os-vol-mig-status-attr:migstat": "done", + "os-vol-mig-status-attr:name_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", + "replication_status": "nah", + "os-volume-replication:extended_status": "really nah", + "consistencygroup_id": "123asf-asdf123", + "os-volume-replication:driver_data": "ahasadfasdfasdfasdfsdf", + "encrypted": "false", + "OS-SCH-HNT:scheduler_hints": { + "same_host": [ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287", + ] + }, +} + + +class TestVolume(base.TestCase): + def test_basic(self): + sot = volume.Volume(VOLUME) + self.assertEqual("volume", sot.resource_key) + self.assertEqual("volumes", sot.resources_key) + self.assertEqual("/volumes", sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + "name": "name", + "status": "status", + "all_projects": "all_tenants", + "project_id": "project_id", + "limit": "limit", + "marker": "marker", + }, + sot._query_mapping._mapping, + ) + + def test_create(self): + sot = volume.Volume(**VOLUME) + self.assertEqual(VOLUME["id"], sot.id) + self.assertEqual(VOLUME["status"], sot.status) + self.assertEqual(VOLUME["attachments"], sot.attachments) + self.assertEqual(VOLUME["availability_zone"], sot.availability_zone) + self.assertFalse(sot.is_bootable) + self.assertEqual(VOLUME["created_at"], sot.created_at) + self.assertEqual(VOLUME["updated_at"], sot.updated_at) + self.assertEqual(VOLUME["description"], sot.description) + self.assertEqual(VOLUME["volume_type"], sot.volume_type) + self.assertEqual(VOLUME["snapshot_id"], sot.snapshot_id) + self.assertEqual(VOLUME["source_volid"], sot.source_volume_id) + self.assertEqual(VOLUME["metadata"], sot.metadata) + self.assertEqual( + VOLUME["volume_image_metadata"], sot.volume_image_metadata + ) + self.assertEqual(VOLUME["size"], sot.size) + self.assertEqual(VOLUME["imageRef"], sot.image_id) + self.assertEqual(VOLUME["os-vol-host-attr:host"], sot.host) + self.assertEqual( + VOLUME["os-vol-tenant-attr:tenant_id"], sot.project_id + ) + self.assertEqual( + VOLUME["os-vol-mig-status-attr:migstat"], sot.migration_status + ) + self.assertEqual( + VOLUME["os-vol-mig-status-attr:name_id"], sot.migration_id + ) + self.assertEqual(VOLUME["replication_status"], sot.replication_status) + self.assertEqual( + VOLUME["os-volume-replication:extended_status"], + sot.extended_replication_status, + ) + self.assertEqual( + VOLUME["consistencygroup_id"], sot.consistency_group_id + ) + self.assertEqual( + VOLUME["os-volume-replication:driver_data"], + sot.replication_driver_data, + ) + self.assertDictEqual( + VOLUME["OS-SCH-HNT:scheduler_hints"], sot.scheduler_hints + ) + self.assertFalse(sot.is_encrypted) + + +class TestVolumeActions(TestVolume): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.status_code = 200 + self.resp.json = mock.Mock(return_value=self.resp.body) + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.post = mock.Mock(return_value=self.resp) + self.sess._get_connection = mock.Mock(return_value=self.cloud) + + def test_extend(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.extend(self.sess, '20')) + + url = f'volumes/{FAKE_ID}/action' + body = {"os-extend": {"new_size": "20"}} + self.sess.post.assert_called_with(url, json=body) + + def test_set_volume_readonly(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.set_readonly(self.sess, True)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-update_readonly_flag': {'readonly': True}} + self.sess.post.assert_called_with(url, json=body) + + def test_set_volume_readonly_false(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.set_readonly(self.sess, False)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-update_readonly_flag': {'readonly': False}} + self.sess.post.assert_called_with(url, json=body) + + def test_set_volume_bootable(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.set_bootable_status(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-set_bootable': {'bootable': True}} + self.sess.post.assert_called_with(url, json=body) + + def test_set_volume_bootable_false(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.set_bootable_status(self.sess, False)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-set_bootable': {'bootable': False}} + self.sess.post.assert_called_with(url, json=body) + + def test_set_image_metadata(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.set_image_metadata(self.sess, {'foo': 'bar'})) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-set_image_metadata': {'metadata': {'foo': 'bar'}}} + self.sess.post.assert_called_with(url, json=body) + + def test_delete_image_metadata(self): + _volume = copy.deepcopy(VOLUME) + _volume['metadata'] = { + 'foo': 'bar', + 'baz': 'wow', + } + sot = volume.Volume(**_volume) + + self.assertIsNone(sot.delete_image_metadata(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body_a = {'os-unset_image_metadata': 'foo'} + body_b = {'os-unset_image_metadata': 'baz'} + self.sess.post.assert_has_calls( + [ + mock.call(url, json=body_a), + mock.call(url, json=body_b), + ] + ) + + def test_delete_image_metadata_item(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.delete_image_metadata_item(self.sess, 'foo')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-unset_image_metadata': 'foo'} + self.sess.post.assert_called_with(url, json=body) + + def test_reset_status(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.reset_status(self.sess, '1', '2', '3')) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-reset_status': { + 'status': '1', + 'attach_status': '2', + 'migration_status': '3', + } + } + self.sess.post.assert_called_with(url, json=body) + + def test_reset_status__single_option(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.reset_status(self.sess, status='1')) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-reset_status': { + 'status': '1', + } + } + self.sess.post.assert_called_with(url, json=body) + + def test_attach_instance(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.attach(self.sess, '1', '2')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-attach': {'mountpoint': '1', 'instance_uuid': '2'}} + self.sess.post.assert_called_with(url, json=body) + + def test_detach(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.detach(self.sess, '1')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-detach': {'attachment_id': '1'}} + self.sess.post.assert_called_with(url, json=body) + + def test_detach_force(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.detach(self.sess, '1', force=True)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-force_detach': {'attachment_id': '1'}} + self.sess.post.assert_called_with(url, json=body) + + def test_unmanage(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.unmanage(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-unmanage': None} + self.sess.post.assert_called_with(url, json=body) + + def test_retype(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.retype(self.sess, '1')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-retype': {'new_type': '1'}} + self.sess.post.assert_called_with(url, json=body) + + def test_retype_mp(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.retype(self.sess, '1', migration_policy='2')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-retype': {'new_type': '1', 'migration_policy': '2'}} + self.sess.post.assert_called_with(url, json=body) + + def test_migrate(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.migrate(self.sess, host='1')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-migrate_volume': {'host': '1'}} + self.sess.post.assert_called_with(url, json=body) + + def test_migrate_flags(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone( + sot.migrate( + self.sess, host='1', force_host_copy=True, lock_volume=True + ) + ) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-migrate_volume': { + 'host': '1', + 'force_host_copy': True, + 'lock_volume': True, + } + } + self.sess.post.assert_called_with(url, json=body) + + def test_complete_migration(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.complete_migration(self.sess, new_volume_id='1')) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-migrate_volume_completion': {'new_volume': '1', 'error': False} + } + self.sess.post.assert_called_with(url, json=body) + + def test_complete_migration_error(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone( + sot.complete_migration(self.sess, new_volume_id='1', error=True) + ) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-migrate_volume_completion': {'new_volume': '1', 'error': True} + } + self.sess.post.assert_called_with(url, json=body) + + def test_force_delete(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.force_delete(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-force_delete': None} + self.sess.post.assert_called_with(url, json=body) diff --git a/openstack/tests/unit/block_storage/v3/__init__.py b/openstack/tests/unit/block_storage/v3/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/block_storage/v3/test_attachment.py b/openstack/tests/unit/block_storage/v3/test_attachment.py new file mode 100644 index 0000000000..2f6639c99e --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_attachment.py @@ -0,0 +1,193 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v3 import attachment +from openstack import resource +from openstack.tests.unit import base + + +FAKE_ID = "92dc3671-d0ab-4370-8058-c88a71661ec5" +FAKE_VOL_ID = "138e4a2e-85ef-4f96-a0d0-9f3ef9f32987" +FAKE_INSTANCE_UUID = "ee9ae89e-d4fc-4c95-93ad-d9e80f240cae" + +CONNECTION_INFO = { + "access_mode": "rw", + "attachment_id": "92dc3671-d0ab-4370-8058-c88a71661ec5", + "auth_enabled": True, + "auth_username": "cinder", + "cacheable": False, + "cluster_name": "ceph", + "discard": True, + "driver_volume_type": "rbd", + "encrypted": False, + "hosts": ["127.0.0.1"], + "name": "volumes/volume-138e4a2e-85ef-4f96-a0d0-9f3ef9f32987", + "ports": ["6789"], + "secret_type": "ceph", + "secret_uuid": "e5d27872-64ab-4d8c-8c25-4dbdc522fbbf", + "volume_id": "138e4a2e-85ef-4f96-a0d0-9f3ef9f32987", +} + +CONNECTOR = { + "do_local_attach": False, + "host": "devstack-VirtualBox", + "initiator": "iqn.2005-03.org.open-iscsi:1f6474a01f9a", + "ip": "127.0.0.1", + "multipath": False, + "nqn": "nqn.2014-08.org.nvmexpress:uuid:4dfe457e-6206-4a61-b547-5a9d0e2fa557", # noqa: E501 + "nvme_native_multipath": False, + "os_type": "linux", + "platform": "x86_64", + "system_uuid": "2f4d1bf2-8a9e-864f-80ec-d265222bf145", + "uuid": "87c73a20-e7f9-4370-ad85-5829b54675d7", +} + +ATTACHMENT = { + "id": FAKE_ID, + "status": "attached", + "instance": FAKE_INSTANCE_UUID, + "volume_id": FAKE_VOL_ID, + "attached_at": "2023-07-07T10:30:40.000000", + "detached_at": None, + "attach_mode": "rw", + "connection_info": CONNECTION_INFO, +} + + +class TestAttachment(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.headers = {} + self.resp.status_code = 202 + + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.get = mock.Mock() + self.sess.post = mock.Mock(return_value=self.resp) + self.sess.put = mock.Mock(return_value=self.resp) + self.sess.default_microversion = "3.54" + + def test_basic(self): + sot = attachment.Attachment(ATTACHMENT) + self.assertEqual("attachment", sot.resource_key) + self.assertEqual("attachments", sot.resources_key) + self.assertEqual("/attachments", sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_commit) + self.assertIsNotNone(sot._max_microversion) + + self.assertDictEqual( + { + "id": "id", + "status": "status", + "project_id": "project_id", + "volume_id": "volume_id", + "instance_id": "instance_id", + "all_projects": "all_tenants", + "limit": "limit", + "marker": "marker", + }, + sot._query_mapping._mapping, + ) + + def test_create_resource(self): + sot = attachment.Attachment(**ATTACHMENT) + self.assertEqual(ATTACHMENT["id"], sot.id) + self.assertEqual(ATTACHMENT["status"], sot.status) + self.assertEqual(ATTACHMENT["instance"], sot.instance) + self.assertEqual(ATTACHMENT["volume_id"], sot.volume_id) + self.assertEqual(ATTACHMENT["attached_at"], sot.attached_at) + self.assertEqual(ATTACHMENT["detached_at"], sot.detached_at) + self.assertEqual(ATTACHMENT["attach_mode"], sot.attach_mode) + self.assertEqual(ATTACHMENT["connection_info"], sot.connection_info) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + @mock.patch.object(resource.Resource, '_translate_response') + def test_create_no_mode_no_instance_id(self, mock_translate, mock_mv): + self.sess.default_microversion = "3.27" + mock_mv.return_value = False + sot = attachment.Attachment() + FAKE_MODE = "rw" + sot.create( + self.sess, + volume_id=FAKE_VOL_ID, + connector=CONNECTOR, + instance=None, + mode=FAKE_MODE, + ) + self.sess.post.assert_called_with( + '/attachments', + json={'attachment': {}}, + headers={}, + microversion="3.27", + params={ + 'volume_id': FAKE_VOL_ID, + 'connector': CONNECTOR, + 'instance': None, + 'mode': 'rw', + }, + ) + self.sess.default_microversion = "3.71" + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + @mock.patch.object(resource.Resource, '_translate_response') + def test_create_with_mode_with_instance_id(self, mock_translate, mock_mv): + sot = attachment.Attachment() + FAKE_MODE = "rw" + sot.create( + self.sess, + volume_id=FAKE_VOL_ID, + connector=CONNECTOR, + instance=FAKE_INSTANCE_UUID, + mode=FAKE_MODE, + ) + self.sess.post.assert_called_with( + '/attachments', + json={'attachment': {}}, + headers={}, + microversion="3.54", + params={ + 'volume_id': FAKE_VOL_ID, + 'connector': CONNECTOR, + 'instance': FAKE_INSTANCE_UUID, + 'mode': FAKE_MODE, + }, + ) + + @mock.patch.object(resource.Resource, '_translate_response') + def test_complete(self, mock_translate): + sot = attachment.Attachment() + sot.id = FAKE_ID + sot.complete(self.sess) + self.sess.post.assert_called_with( + f'/attachments/{FAKE_ID}/action', + json={ + 'os-complete': '92dc3671-d0ab-4370-8058-c88a71661ec5', + }, + microversion="3.54", + ) diff --git a/openstack/tests/unit/block_storage/v3/test_availability_zone.py b/openstack/tests/unit/block_storage/v3/test_availability_zone.py new file mode 100644 index 0000000000..f7431729c1 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_availability_zone.py @@ -0,0 +1,36 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import availability_zone as az +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + "id": IDENTIFIER, + "zoneState": {"available": True}, + "zoneName": "zone1", +} + + +class TestAvailabilityZone(base.TestCase): + def test_basic(self): + sot = az.AvailabilityZone() + self.assertEqual('availabilityZoneInfo', sot.resources_key) + self.assertEqual('/os-availability-zone', sot.base_path) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = az.AvailabilityZone(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['zoneState'], sot.state) + self.assertEqual(EXAMPLE['zoneName'], sot.name) diff --git a/openstack/tests/unit/block_storage/v3/test_backup.py b/openstack/tests/unit/block_storage/v3/test_backup.py new file mode 100644 index 0000000000..cff2e17428 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_backup.py @@ -0,0 +1,250 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v3 import backup +from openstack import exceptions +from openstack.tests.unit import base + + +FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" + +BACKUP = { + "availability_zone": "az1", + "container": "volumebackups", + "created_at": "2018-04-02T10:35:27.000000", + "updated_at": "2018-04-03T10:35:27.000000", + "description": 'description', + "encryption_key_id": "fake_encry_id", + "fail_reason": 'fail reason', + "id": FAKE_ID, + "name": "backup001", + "object_count": 22, + "size": 1, + "status": "available", + "volume_id": "e5185058-943a-4cb4-96d9-72c184c337d6", + "is_incremental": True, + "has_dependent_backups": False, + "os-backup-project-attr:project_id": "2c67a14be9314c5dae2ee6c4ec90cf0b", + "user_id": "515ba0dd59f84f25a6a084a45d8d93b2", + "metadata": {"key": "value"}, +} + + +class TestBackup(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.headers = {} + self.resp.status_code = 202 + + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.get = mock.Mock() + self.sess.post = mock.Mock(return_value=self.resp) + self.sess.default_microversion = "3.64" + + def test_basic(self): + sot = backup.Backup(BACKUP) + self.assertEqual("backup", sot.resource_key) + self.assertEqual("backups", sot.resources_key) + self.assertEqual("/backups", sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertIsNotNone(sot._max_microversion) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + "offset": "offset", + "project_id": "project_id", + "name": "name", + "status": "status", + "volume_id": "volume_id", + "sort_dir": "sort_dir", + "sort_key": "sort_key", + "sort": "sort", + "all_projects": "all_tenants", + }, + sot._query_mapping._mapping, + ) + + def test_create(self): + sot = backup.Backup(**BACKUP) + self.assertEqual(BACKUP["id"], sot.id) + self.assertEqual(BACKUP["name"], sot.name) + self.assertEqual(BACKUP["status"], sot.status) + self.assertEqual(BACKUP["container"], sot.container) + self.assertEqual(BACKUP["availability_zone"], sot.availability_zone) + self.assertEqual(BACKUP["created_at"], sot.created_at) + self.assertEqual(BACKUP["updated_at"], sot.updated_at) + self.assertEqual(BACKUP["description"], sot.description) + self.assertEqual(BACKUP["fail_reason"], sot.fail_reason) + self.assertEqual(BACKUP["volume_id"], sot.volume_id) + self.assertEqual(BACKUP["object_count"], sot.object_count) + self.assertEqual(BACKUP["is_incremental"], sot.is_incremental) + self.assertEqual(BACKUP["size"], sot.size) + self.assertEqual( + BACKUP["has_dependent_backups"], sot.has_dependent_backups + ) + self.assertEqual( + BACKUP['os-backup-project-attr:project_id'], sot.project_id + ) + self.assertEqual(BACKUP['metadata'], sot.metadata) + self.assertEqual(BACKUP['user_id'], sot.user_id) + self.assertEqual(BACKUP['encryption_key_id'], sot.encryption_key_id) + + def test_create_incremental(self): + sot = backup.Backup(is_incremental=True) + sot2 = backup.Backup(is_incremental=False) + + create_response = mock.Mock() + create_response.status_code = 200 + create_response.json.return_value = {} + create_response.headers = {} + self.sess.post.return_value = create_response + + sot.create(self.sess) + self.sess.post.assert_called_with( + '/backups', + headers={}, + json={ + 'backup': { + 'incremental': True, + } + }, + microversion="3.64", + params={}, + ) + + sot2.create(self.sess) + self.sess.post.assert_called_with( + '/backups', + headers={}, + json={ + 'backup': { + 'incremental': False, + } + }, + microversion="3.64", + params={}, + ) + + def test_export(self): + sot = backup.Backup(**BACKUP) + + create_response = mock.Mock() + create_response.status_code = 200 + create_response.json.return_value = {} + create_response.headers = {} + self.sess.get.return_value = create_response + url = f'backups/{FAKE_ID}/export_record' + sot.export(self.sess) + self.sess.get.assert_called_with(url) + + def test_restore(self): + sot = backup.Backup(**BACKUP) + + restore_response = mock.Mock() + restore_response.status_code = 202 + restore_response.json.return_value = { + "restore": { + "backup_id": "back", + "volume_id": "vol", + "volume_name": "name", + } + } + restore_response.headers = {} + self.sess.post.return_value = restore_response + + self.assertEqual(sot, sot.restore(self.sess, 'vol', 'name')) + + url = f'backups/{FAKE_ID}/restore' + body = {"restore": {"volume_id": "vol", "name": "name"}} + self.sess.post.assert_called_with(url, json=body) + + def test_restore_name(self): + sot = backup.Backup(**BACKUP) + + restore_response = mock.Mock() + restore_response.status_code = 202 + restore_response.json.return_value = { + "restore": { + "backup_id": "back", + "volume_id": "vol", + "volume_name": "name", + } + } + restore_response.headers = {} + self.sess.post.return_value = restore_response + + self.assertEqual(sot, sot.restore(self.sess, name='name')) + + url = f'backups/{FAKE_ID}/restore' + body = {"restore": {"name": "name"}} + self.sess.post.assert_called_with(url, json=body) + + def test_restore_vol_id(self): + sot = backup.Backup(**BACKUP) + + restore_response = mock.Mock() + restore_response.status_code = 202 + restore_response.json.return_value = { + "restore": { + "backup_id": "back", + "volume_id": "vol", + "volume_name": "name", + } + } + restore_response.headers = {} + self.sess.post.return_value = restore_response + + self.assertEqual(sot, sot.restore(self.sess, volume_id='vol')) + + url = f'backups/{FAKE_ID}/restore' + body = {"restore": {"volume_id": "vol"}} + self.sess.post.assert_called_with(url, json=body) + + def test_restore_no_params(self): + sot = backup.Backup(**BACKUP) + + self.assertRaises(exceptions.SDKException, sot.restore, self.sess) + + def test_force_delete(self): + sot = backup.Backup(**BACKUP) + + self.assertIsNone(sot.force_delete(self.sess)) + + url = f'backups/{FAKE_ID}/action' + body = {'os-force_delete': None} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_reset_status(self): + sot = backup.Backup(**BACKUP) + + self.assertIsNone(sot.reset_status(self.sess, 'new_status')) + + url = f'backups/{FAKE_ID}/action' + body = {'os-reset_status': {'status': 'new_status'}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) diff --git a/openstack/tests/unit/block_storage/v3/test_block_storage_summary.py b/openstack/tests/unit/block_storage/v3/test_block_storage_summary.py new file mode 100644 index 0000000000..32b00689fa --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_block_storage_summary.py @@ -0,0 +1,69 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from openstack.block_storage.v3 import block_storage_summary as summary +from openstack.tests.unit import base + + +BLOCK_STORAGE_SUMMARY_312 = { + "total_size": "4", + "total_count": "2", + "metadata": {"key1": "value1"}, +} + + +BLOCK_STORAGE_SUMMARY_326 = copy.deepcopy(BLOCK_STORAGE_SUMMARY_312) +BLOCK_STORAGE_SUMMARY_326['metadata'] = {"key1": "value1"} + + +class TestBlockStorageSummary(base.TestCase): + def test_basic(self): + summary_resource = summary.BlockStorageSummary() + self.assertEqual(None, summary_resource.resource_key) + self.assertEqual(None, summary_resource.resources_key) + self.assertEqual("/volumes/summary", summary_resource.base_path) + self.assertTrue(summary_resource.allow_fetch) + self.assertFalse(summary_resource.allow_create) + self.assertFalse(summary_resource.allow_commit) + self.assertFalse(summary_resource.allow_delete) + self.assertFalse(summary_resource.allow_list) + + def test_get_summary_312(self): + summary_resource = summary.BlockStorageSummary( + **BLOCK_STORAGE_SUMMARY_312 + ) + self.assertEqual( + BLOCK_STORAGE_SUMMARY_312["total_size"], + summary_resource.total_size, + ) + self.assertEqual( + BLOCK_STORAGE_SUMMARY_312["total_count"], + summary_resource.total_count, + ) + + def test_get_summary_326(self): + summary_resource = summary.BlockStorageSummary( + **BLOCK_STORAGE_SUMMARY_326 + ) + self.assertEqual( + BLOCK_STORAGE_SUMMARY_326["total_size"], + summary_resource.total_size, + ) + self.assertEqual( + BLOCK_STORAGE_SUMMARY_326["total_count"], + summary_resource.total_count, + ) + self.assertEqual( + BLOCK_STORAGE_SUMMARY_326["metadata"], summary_resource.metadata + ) diff --git a/openstack/tests/unit/block_storage/v3/test_capabilities.py b/openstack/tests/unit/block_storage/v3/test_capabilities.py new file mode 100644 index 0000000000..0eaef69abd --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_capabilities.py @@ -0,0 +1,102 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import capabilities +from openstack.tests.unit import base + +CAPABILITIES = { + "namespace": "OS::Storage::Capabilities::fake", + "vendor_name": "OpenStack", + "volume_backend_name": "lvmdriver-1", + "pool_name": "pool", + "driver_version": "2.0.0", + "storage_protocol": "iSCSI", + "display_name": "Capabilities of Cinder LVM driver", + "description": "These are volume type options", + "visibility": "public", + "replication_targets": [], + "properties": { + "compression": { + "title": "Compression", + "description": "Enables compression.", + "type": "boolean", + }, + "qos": { + "title": "QoS", + "description": "Enables QoS.", + "type": "boolean", + }, + "replication": { + "title": "Replication", + "description": "Enables replication.", + "type": "boolean", + }, + "thin_provisioning": { + "title": "Thin Provisioning", + "description": "Sets thin provisioning.", + "type": "boolean", + }, + }, +} + + +class TestCapabilites(base.TestCase): + def test_basic(self): + capabilities_resource = capabilities.Capabilities() + self.assertEqual(None, capabilities_resource.resource_key) + self.assertEqual(None, capabilities_resource.resources_key) + self.assertEqual("/capabilities", capabilities_resource.base_path) + self.assertTrue(capabilities_resource.allow_fetch) + self.assertFalse(capabilities_resource.allow_create) + self.assertFalse(capabilities_resource.allow_commit) + self.assertFalse(capabilities_resource.allow_delete) + self.assertFalse(capabilities_resource.allow_list) + + def test_make_capabilities(self): + capabilities_resource = capabilities.Capabilities(**CAPABILITIES) + self.assertEqual( + CAPABILITIES["description"], capabilities_resource.description + ) + self.assertEqual( + CAPABILITIES["display_name"], capabilities_resource.display_name + ) + self.assertEqual( + CAPABILITIES["driver_version"], + capabilities_resource.driver_version, + ) + self.assertEqual( + CAPABILITIES["namespace"], capabilities_resource.namespace + ) + self.assertEqual( + CAPABILITIES["pool_name"], capabilities_resource.pool_name + ) + self.assertEqual( + CAPABILITIES["properties"], capabilities_resource.properties + ) + self.assertEqual( + CAPABILITIES["replication_targets"], + capabilities_resource.replication_targets, + ) + self.assertEqual( + CAPABILITIES["storage_protocol"], + capabilities_resource.storage_protocol, + ) + self.assertEqual( + CAPABILITIES["vendor_name"], capabilities_resource.vendor_name + ) + self.assertEqual( + CAPABILITIES["visibility"], capabilities_resource.visibility + ) + self.assertEqual( + CAPABILITIES["volume_backend_name"], + capabilities_resource.volume_backend_name, + ) diff --git a/openstack/tests/unit/block_storage/v3/test_default_type.py b/openstack/tests/unit/block_storage/v3/test_default_type.py new file mode 100644 index 0000000000..447eaac942 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_default_type.py @@ -0,0 +1,55 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v3 import default_type +from openstack.tests.unit import base + + +PROJECT_ID = 'd5e678b5-f88b-411c-876b-f6ec2ba999bf' +VOLUME_TYPE_ID = 'adef1cf8-736e-4b62-a2db-f8b6b6c1d953' + +DEFAULT_TYPE = { + 'project_id': PROJECT_ID, + 'volume_type_id': VOLUME_TYPE_ID, +} + + +class TestDefaultType(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.status_code = 200 + self.resp.json = mock.Mock(return_value=self.resp.body) + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = '3.67' + self.sess.post = mock.Mock(return_value=self.resp) + self.sess._get_connection = mock.Mock(return_value=self.cloud) + + def test_basic(self): + sot = default_type.DefaultType(**DEFAULT_TYPE) + self.assertEqual("default_type", sot.resource_key) + self.assertEqual("default_types", sot.resources_key) + self.assertEqual("/default-types", sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_create(self): + sot = default_type.DefaultType(**DEFAULT_TYPE) + self.assertEqual(DEFAULT_TYPE["project_id"], sot.project_id) + self.assertEqual(DEFAULT_TYPE["volume_type_id"], sot.volume_type_id) diff --git a/openstack/tests/unit/block_storage/v3/test_extension.py b/openstack/tests/unit/block_storage/v3/test_extension.py new file mode 100644 index 0000000000..5c092b70e2 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_extension.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# # Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import extension +from openstack.tests.unit import base + +EXTENSION = { + "alias": "os-hosts", + "description": "Admin-only host administration.", + "links": [], + "name": "Hosts", + "updated": "2011-06-29T00:00:00+00:00", +} + + +class TestExtension(base.TestCase): + def test_basic(self): + extension_resource = extension.Extension() + self.assertEqual('extensions', extension_resource.resources_key) + self.assertEqual('/extensions', extension_resource.base_path) + self.assertFalse(extension_resource.allow_create) + self.assertFalse(extension_resource.allow_fetch) + self.assertFalse(extension_resource.allow_commit) + self.assertFalse(extension_resource.allow_delete) + self.assertTrue(extension_resource.allow_list) + + def test_make_extension(self): + extension_resource = extension.Extension(**EXTENSION) + self.assertEqual(EXTENSION['alias'], extension_resource.alias) + self.assertEqual( + EXTENSION['description'], extension_resource.description + ) + self.assertEqual(EXTENSION['links'], extension_resource.links) + self.assertEqual(EXTENSION['name'], extension_resource.name) + self.assertEqual(EXTENSION['updated'], extension_resource.updated_at) diff --git a/openstack/tests/unit/block_storage/v3/test_group.py b/openstack/tests/unit/block_storage/v3/test_group.py new file mode 100644 index 0000000000..3925d15e99 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_group.py @@ -0,0 +1,219 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v3 import group +from openstack.tests.unit import base + +GROUP_ID = "6f519a48-3183-46cf-a32f-41815f813986" + +GROUP = { + "id": GROUP_ID, + "status": "available", + "availability_zone": "az1", + "created_at": "2015-09-16T09:28:52.000000", + "name": "first_group", + "description": "my first group", + "group_type": "29514915-5208-46ab-9ece-1cc4688ad0c1", + "volume_types": ["c4daaf47-c530-4901-b28e-f5f0a359c4e6"], + "volumes": ["a2cdf1ad-5497-4e57-bd7d-f573768f3d03"], + "group_snapshot_id": None, + "source_group_id": None, + "project_id": "7ccf4863071f44aeb8f141f65780c51b", +} + + +class TestGroup(base.TestCase): + def test_basic(self): + resource = group.Group() + self.assertEqual("group", resource.resource_key) + self.assertEqual("groups", resource.resources_key) + self.assertEqual("/groups", resource.base_path) + self.assertTrue(resource.allow_create) + self.assertTrue(resource.allow_fetch) + self.assertTrue(resource.allow_delete) + self.assertTrue(resource.allow_commit) + self.assertTrue(resource.allow_list) + + def test_make_resource(self): + resource = group.Group(**GROUP) + self.assertEqual(GROUP["id"], resource.id) + self.assertEqual(GROUP["status"], resource.status) + self.assertEqual( + GROUP["availability_zone"], resource.availability_zone + ) + self.assertEqual(GROUP["created_at"], resource.created_at) + self.assertEqual(GROUP["name"], resource.name) + self.assertEqual(GROUP["description"], resource.description) + self.assertEqual(GROUP["group_type"], resource.group_type) + self.assertEqual(GROUP["volume_types"], resource.volume_types) + self.assertEqual(GROUP["volumes"], resource.volumes) + self.assertEqual( + GROUP["group_snapshot_id"], resource.group_snapshot_id + ) + self.assertEqual(GROUP["source_group_id"], resource.source_group_id) + self.assertEqual(GROUP["project_id"], resource.project_id) + + +class TestGroupAction(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.headers = {} + self.resp.status_code = 202 + + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.get = mock.Mock() + self.sess.post = mock.Mock(return_value=self.resp) + self.sess.default_microversion = '3.38' + + def test_delete(self): + sot = group.Group(**GROUP) + + self.assertIsNone(sot.delete(self.sess)) + + url = f'groups/{GROUP_ID}/action' + body = {'delete': {'delete-volumes': False}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_enable_replication(self): + sot = group.Group(**GROUP) + + ret = sot.enable_replication(self.sess) + self.assertIsNone(ret) + + url = f'groups/{GROUP_ID}/action' + body = {'enable_replication': None} + self.sess.post.assert_called_with( + url, + json=body, + microversion=sot._max_microversion, + ) + + def test_disable_replication(self): + sot = group.Group(**GROUP) + + ret = sot.disable_replication(self.sess) + self.assertIsNone(ret) + + url = f'groups/{GROUP_ID}/action' + body = {'disable_replication': None} + self.sess.post.assert_called_with( + url, + json=body, + microversion=sot._max_microversion, + ) + + def test_failover_replication(self): + sot = group.Group(**GROUP) + + ret = sot.failover_replication( + self.sess, allowed_attached_volume=True, secondary_backend_id=None + ) + self.assertIsNone(ret) + + url = f'groups/{GROUP_ID}/action' + body = { + 'modify_body_for_action': { + 'allow_attached_volume': True, + 'secondary_backend_id': None, + } + } + self.sess.post.assert_called_with( + url, + json=body, + microversion=sot._max_microversion, + ) + + def test_fetch_replication_targets(self): + resp = mock.Mock() + resp.links = {} + resp.json = mock.Mock( + return_value={ + 'replication_targets': [ + { + 'backend_id': 'vendor-id-1', + 'unique_key': 'val1', + }, + ], + } + ) + resp.status_code = 200 + + self.sess.post = mock.Mock(return_value=resp) + + sot = group.Group(**GROUP) + result = sot.fetch_replication_targets(self.sess) + self.assertEqual( + [ + { + 'backend_id': 'vendor-id-1', + 'unique_key': 'val1', + }, + ], + sot.replication_targets, + ) + self.assertEqual(sot, result) + + def test_reset_status(self): + sot = group.Group(**GROUP) + + self.assertIsNone(sot.reset_status(self.sess, 'new_status')) + + url = f'groups/{GROUP_ID}/action' + body = {'reset_status': {'status': 'new_status'}} + self.sess.post.assert_called_with( + url, + json=body, + microversion=sot._max_microversion, + ) + + def test_create_from_source(self): + resp = mock.Mock() + resp.body = {'group': copy.deepcopy(GROUP)} + resp.json = mock.Mock(return_value=resp.body) + resp.headers = {} + resp.status_code = 202 + + self.sess.post = mock.Mock(return_value=resp) + + sot = group.Group.create_from_source( + self.sess, + group_snapshot_id='9a591346-e595-4bc1-94e7-08f264406b63', + source_group_id='6c5259f6-42ed-4e41-8ffe-e1c667ae9dff', + name='group_from_source', + description='a group from source', + ) + self.assertIsNotNone(sot) + + url = 'groups/action' + body = { + 'create-from-src': { + 'name': 'group_from_source', + 'description': 'a group from source', + 'group_snapshot_id': '9a591346-e595-4bc1-94e7-08f264406b63', + 'source_group_id': '6c5259f6-42ed-4e41-8ffe-e1c667ae9dff', + }, + } + self.sess.post.assert_called_with( + url, + json=body, + microversion=sot._max_microversion, + ) diff --git a/openstack/tests/unit/block_storage/v3/test_group_snapshot.py b/openstack/tests/unit/block_storage/v3/test_group_snapshot.py new file mode 100644 index 0000000000..0db81196ea --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_group_snapshot.py @@ -0,0 +1,84 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v3 import group_snapshot +from openstack.tests.unit import base + +GROUP_SNAPSHOT = { + "id": "6f519a48-3183-46cf-a32f-41815f813986", + "group_id": "6f519a48-3183-46cf-a32f-41815f814444", + "status": "available", + "created_at": "2015-09-16T09:28:52.000000", + "name": "my_group_snapshot1", + "description": "my first group snapshot", + "group_type_id": "7270c56e-6354-4528-8e8b-f54dee2232c8", + "project_id": "7ccf4863071f44aeb8f141f65780c51b", +} + + +class TestGroupSnapshot(base.TestCase): + def test_basic(self): + resource = group_snapshot.GroupSnapshot() + self.assertEqual("group_snapshot", resource.resource_key) + self.assertEqual("group_snapshots", resource.resources_key) + self.assertEqual("/group_snapshots", resource.base_path) + self.assertTrue(resource.allow_create) + self.assertTrue(resource.allow_fetch) + self.assertTrue(resource.allow_delete) + self.assertTrue(resource.allow_list) + self.assertFalse(resource.allow_commit) + + def test_make_resource(self): + resource = group_snapshot.GroupSnapshot(**GROUP_SNAPSHOT) + self.assertEqual(GROUP_SNAPSHOT["created_at"], resource.created_at) + self.assertEqual(GROUP_SNAPSHOT["description"], resource.description) + self.assertEqual(GROUP_SNAPSHOT["group_id"], resource.group_id) + self.assertEqual( + GROUP_SNAPSHOT["group_type_id"], resource.group_type_id + ) + self.assertEqual(GROUP_SNAPSHOT["id"], resource.id) + self.assertEqual(GROUP_SNAPSHOT["name"], resource.name) + self.assertEqual(GROUP_SNAPSHOT["project_id"], resource.project_id) + self.assertEqual(GROUP_SNAPSHOT["status"], resource.status) + + +class TestGroupSnapshotActions(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.headers = {} + self.resp.status_code = 202 + + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.get = mock.Mock() + self.sess.post = mock.Mock(return_value=self.resp) + self.sess.default_microversion = '3.0' + + def test_reset_status(self): + resource = group_snapshot.GroupSnapshot(**GROUP_SNAPSHOT) + + self.assertIsNotNone(resource.reset_status(self.sess, 'new_status')) + + url = f'group_snapshots/{GROUP_SNAPSHOT["id"]}/action' + body = {'reset_status': {'status': 'new_status'}} + self.sess.post.assert_called_with( + url, + json=body, + headers={'Accept': ''}, + microversion='3.0', + ) diff --git a/openstack/tests/unit/block_storage/v3/test_group_type.py b/openstack/tests/unit/block_storage/v3/test_group_type.py new file mode 100644 index 0000000000..842e4de434 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_group_type.py @@ -0,0 +1,143 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v3 import group_type +from openstack.tests.unit import base + +GROUP_TYPE = { + "id": "6685584b-1eac-4da6-b5c3-555430cf68ff", + "name": "grp-type-001", + "description": "group type 001", + "is_public": True, + "group_specs": {"consistent_group_snapshot_enabled": " False"}, +} + + +class TestGroupType(base.TestCase): + def setUp(self): + super().setUp() + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = 1 + self.sess._get_connection = mock.Mock(return_value=self.cloud) + + def test_basic(self): + resource = group_type.GroupType() + self.assertEqual("group_type", resource.resource_key) + self.assertEqual("group_types", resource.resources_key) + self.assertEqual("/group_types", resource.base_path) + self.assertTrue(resource.allow_create) + self.assertTrue(resource.allow_fetch) + self.assertTrue(resource.allow_delete) + self.assertTrue(resource.allow_commit) + self.assertTrue(resource.allow_list) + + def test_make_resource(self): + resource = group_type.GroupType(**GROUP_TYPE) + self.assertEqual(GROUP_TYPE["id"], resource.id) + self.assertEqual(GROUP_TYPE["name"], resource.name) + self.assertEqual(GROUP_TYPE["description"], resource.description) + self.assertEqual(GROUP_TYPE["is_public"], resource.is_public) + self.assertEqual(GROUP_TYPE["group_specs"], resource.group_specs) + + def test_fetch_group_specs(self): + sot = group_type.GroupType(**GROUP_TYPE) + resp = mock.Mock() + resp.body = {'group_specs': {'a': 'b', 'c': 'd'}} + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.get = mock.Mock(return_value=resp) + + rsp = sot.fetch_group_specs(self.sess) + + self.sess.get.assert_called_with( + f"group_types/{GROUP_TYPE['id']}/group_specs", + microversion=self.sess.default_microversion, + ) + + self.assertEqual(resp.body['group_specs'], rsp.group_specs) + self.assertIsInstance(rsp, group_type.GroupType) + + def test_create_group_specs(self): + sot = group_type.GroupType(**GROUP_TYPE) + specs = {'a': 'b', 'c': 'd'} + resp = mock.Mock() + resp.body = {'group_specs': specs} + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.post = mock.Mock(return_value=resp) + + rsp = sot.create_group_specs(self.sess, specs) + + self.sess.post.assert_called_with( + f"group_types/{GROUP_TYPE['id']}/group_specs", + json={'group_specs': specs}, + microversion=self.sess.default_microversion, + ) + + self.assertEqual(resp.body['group_specs'], rsp.group_specs) + self.assertIsInstance(rsp, group_type.GroupType) + + def test_get_group_specs_property(self): + sot = group_type.GroupType(**GROUP_TYPE) + resp = mock.Mock() + resp.body = {'a': 'b'} + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.get = mock.Mock(return_value=resp) + + rsp = sot.get_group_specs_property(self.sess, 'a') + + self.sess.get.assert_called_with( + f"group_types/{GROUP_TYPE['id']}/group_specs/a", + microversion=self.sess.default_microversion, + ) + + self.assertEqual('b', rsp) + + def test_update_group_specs_property(self): + sot = group_type.GroupType(**GROUP_TYPE) + resp = mock.Mock() + resp.body = {'a': 'b'} + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.put = mock.Mock(return_value=resp) + + rsp = sot.update_group_specs_property(self.sess, 'a', 'b') + + self.sess.put.assert_called_with( + f"group_types/{GROUP_TYPE['id']}/group_specs/a", + json={'a': 'b'}, + microversion=self.sess.default_microversion, + ) + + self.assertEqual('b', rsp) + + def test_delete_group_specs_property(self): + sot = group_type.GroupType(**GROUP_TYPE) + resp = mock.Mock() + resp.body = None + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.delete = mock.Mock(return_value=resp) + + rsp = sot.delete_group_specs_property(self.sess, 'a') + + self.sess.delete.assert_called_with( + f"group_types/{GROUP_TYPE['id']}/group_specs/a", + microversion=self.sess.default_microversion, + ) + + self.assertIsNone(rsp) diff --git a/openstack/tests/unit/block_storage/v3/test_limits.py b/openstack/tests/unit/block_storage/v3/test_limits.py new file mode 100644 index 0000000000..b0638cb627 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_limits.py @@ -0,0 +1,206 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import limits +from openstack.tests.unit import base + +ABSOLUTE_LIMIT = { + "totalSnapshotsUsed": 1, + "maxTotalBackups": 10, + "maxTotalVolumeGigabytes": 1000, + "maxTotalSnapshots": 10, + "maxTotalBackupGigabytes": 1000, + "totalBackupGigabytesUsed": 1, + "maxTotalVolumes": 10, + "totalVolumesUsed": 2, + "totalBackupsUsed": 3, + "totalGigabytesUsed": 2, +} + +RATE_LIMIT = { + "verb": "POST", + "value": 80, + "remaining": 80, + "unit": "MINUTE", + "next-available": "2021-02-23T22:08:00Z", +} + +RATE_LIMITS = {"regex": ".*", "uri": "*", "limit": [RATE_LIMIT]} + +LIMIT = {"rate": [RATE_LIMITS], "absolute": ABSOLUTE_LIMIT} + + +class TestAbsoluteLimit(base.TestCase): + def test_basic(self): + limit_resource = limits.AbsoluteLimit() + self.assertIsNone(limit_resource.resource_key) + self.assertIsNone(limit_resource.resources_key) + self.assertEqual('', limit_resource.base_path) + self.assertFalse(limit_resource.allow_create) + self.assertFalse(limit_resource.allow_fetch) + self.assertFalse(limit_resource.allow_delete) + self.assertFalse(limit_resource.allow_commit) + self.assertFalse(limit_resource.allow_list) + + def test_make_absolute_limit(self): + limit_resource = limits.AbsoluteLimit(**ABSOLUTE_LIMIT) + self.assertEqual( + ABSOLUTE_LIMIT['totalSnapshotsUsed'], + limit_resource.total_snapshots_used, + ) + self.assertEqual( + ABSOLUTE_LIMIT['maxTotalBackups'], limit_resource.max_total_backups + ) + self.assertEqual( + ABSOLUTE_LIMIT['maxTotalVolumeGigabytes'], + limit_resource.max_total_volume_gigabytes, + ) + self.assertEqual( + ABSOLUTE_LIMIT['maxTotalSnapshots'], + limit_resource.max_total_snapshots, + ) + self.assertEqual( + ABSOLUTE_LIMIT['maxTotalBackupGigabytes'], + limit_resource.max_total_backup_gigabytes, + ) + self.assertEqual( + ABSOLUTE_LIMIT['totalBackupGigabytesUsed'], + limit_resource.total_backup_gigabytes_used, + ) + self.assertEqual( + ABSOLUTE_LIMIT['maxTotalVolumes'], limit_resource.max_total_volumes + ) + self.assertEqual( + ABSOLUTE_LIMIT['totalVolumesUsed'], + limit_resource.total_volumes_used, + ) + self.assertEqual( + ABSOLUTE_LIMIT['totalBackupsUsed'], + limit_resource.total_backups_used, + ) + self.assertEqual( + ABSOLUTE_LIMIT['totalGigabytesUsed'], + limit_resource.total_gigabytes_used, + ) + + +class TestRateLimit(base.TestCase): + def test_basic(self): + limit_resource = limits.RateLimit() + self.assertIsNone(limit_resource.resource_key) + self.assertIsNone(limit_resource.resources_key) + self.assertEqual('', limit_resource.base_path) + self.assertFalse(limit_resource.allow_create) + self.assertFalse(limit_resource.allow_fetch) + self.assertFalse(limit_resource.allow_delete) + self.assertFalse(limit_resource.allow_commit) + self.assertFalse(limit_resource.allow_list) + + def test_make_rate_limit(self): + limit_resource = limits.RateLimit(**RATE_LIMIT) + self.assertEqual(RATE_LIMIT['verb'], limit_resource.verb) + self.assertEqual(RATE_LIMIT['value'], limit_resource.value) + self.assertEqual(RATE_LIMIT['remaining'], limit_resource.remaining) + self.assertEqual(RATE_LIMIT['unit'], limit_resource.unit) + self.assertEqual( + RATE_LIMIT['next-available'], limit_resource.next_available + ) + + +class TestRateLimits(base.TestCase): + def test_basic(self): + limit_resource = limits.RateLimits() + self.assertIsNone(limit_resource.resource_key) + self.assertIsNone(limit_resource.resources_key) + self.assertEqual('', limit_resource.base_path) + self.assertFalse(limit_resource.allow_create) + self.assertFalse(limit_resource.allow_fetch) + self.assertFalse(limit_resource.allow_delete) + self.assertFalse(limit_resource.allow_commit) + self.assertFalse(limit_resource.allow_list) + + def _test_rate_limit(self, expected, actual): + self.assertEqual(expected[0]['verb'], actual[0].verb) + self.assertEqual(expected[0]['value'], actual[0].value) + self.assertEqual(expected[0]['remaining'], actual[0].remaining) + self.assertEqual(expected[0]['unit'], actual[0].unit) + self.assertEqual( + expected[0]['next-available'], actual[0].next_available + ) + + def test_make_rate_limits(self): + limit_resource = limits.RateLimits(**RATE_LIMITS) + self.assertEqual(RATE_LIMITS['regex'], limit_resource.regex) + self.assertEqual(RATE_LIMITS['uri'], limit_resource.uri) + self._test_rate_limit(RATE_LIMITS['limit'], limit_resource.limits) + + +class TestLimit(base.TestCase): + def test_basic(self): + limit_resource = limits.Limits() + self.assertEqual('limits', limit_resource.resource_key) + self.assertEqual('/limits', limit_resource.base_path) + self.assertTrue(limit_resource.allow_fetch) + self.assertFalse(limit_resource.allow_create) + self.assertFalse(limit_resource.allow_commit) + self.assertFalse(limit_resource.allow_delete) + self.assertFalse(limit_resource.allow_list) + + def _test_absolute_limit(self, expected, actual): + self.assertEqual( + expected['totalSnapshotsUsed'], actual.total_snapshots_used + ) + self.assertEqual(expected['maxTotalBackups'], actual.max_total_backups) + self.assertEqual( + expected['maxTotalVolumeGigabytes'], + actual.max_total_volume_gigabytes, + ) + self.assertEqual( + expected['maxTotalSnapshots'], actual.max_total_snapshots + ) + self.assertEqual( + expected['maxTotalBackupGigabytes'], + actual.max_total_backup_gigabytes, + ) + self.assertEqual( + expected['totalBackupGigabytesUsed'], + actual.total_backup_gigabytes_used, + ) + self.assertEqual(expected['maxTotalVolumes'], actual.max_total_volumes) + self.assertEqual( + expected['totalVolumesUsed'], actual.total_volumes_used + ) + self.assertEqual( + expected['totalBackupsUsed'], actual.total_backups_used + ) + self.assertEqual( + expected['totalGigabytesUsed'], actual.total_gigabytes_used + ) + + def _test_rate_limit(self, expected, actual): + self.assertEqual(expected[0]['verb'], actual[0].verb) + self.assertEqual(expected[0]['value'], actual[0].value) + self.assertEqual(expected[0]['remaining'], actual[0].remaining) + self.assertEqual(expected[0]['unit'], actual[0].unit) + self.assertEqual( + expected[0]['next-available'], actual[0].next_available + ) + + def _test_rate_limits(self, expected, actual): + self.assertEqual(expected[0]['regex'], actual[0].regex) + self.assertEqual(expected[0]['uri'], actual[0].uri) + self._test_rate_limit(expected[0]['limit'], actual[0].limits) + + def test_make_limit(self): + limit_resource = limits.Limits(**LIMIT) + self._test_rate_limits(LIMIT['rate'], limit_resource.rate) + self._test_absolute_limit(LIMIT['absolute'], limit_resource.absolute) diff --git a/openstack/tests/unit/block_storage/v3/test_proxy.py b/openstack/tests/unit/block_storage/v3/test_proxy.py new file mode 100644 index 0000000000..669a93d822 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_proxy.py @@ -0,0 +1,1274 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock +import warnings + +from openstack.block_storage.v3 import _proxy +from openstack.block_storage.v3 import backup +from openstack.block_storage.v3 import capabilities +from openstack.block_storage.v3 import extension +from openstack.block_storage.v3 import group +from openstack.block_storage.v3 import group_snapshot +from openstack.block_storage.v3 import group_type +from openstack.block_storage.v3 import quota_class_set +from openstack.block_storage.v3 import quota_set +from openstack.block_storage.v3 import resource_filter +from openstack.block_storage.v3 import service +from openstack.block_storage.v3 import snapshot +from openstack.block_storage.v3 import stats +from openstack.block_storage.v3 import transfer +from openstack.block_storage.v3 import type +from openstack.block_storage.v3 import volume +from openstack.identity.v3 import project +from openstack import proxy as proxy_base +from openstack.tests.unit import test_proxy_base +from openstack import warnings as os_warnings + + +class TestVolumeProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + +class TestVolume(TestVolumeProxy): + def test_volume_get(self): + self.verify_get(self.proxy.get_volume, volume.Volume) + + def test_volume_find(self): + self.verify_find( + self.proxy.find_volume, + volume.Volume, + method_kwargs={'all_projects': True}, + expected_kwargs={ + "list_base_path": "/volumes/detail", + "all_projects": True, + }, + ) + + def test_volumes_detailed(self): + self.verify_list( + self.proxy.volumes, + volume.Volume, + method_kwargs={"details": True, "query": 1}, + expected_kwargs={"query": 1, "base_path": "/volumes/detail"}, + ) + + def test_volumes_not_detailed(self): + self.verify_list( + self.proxy.volumes, + volume.Volume, + method_kwargs={"details": False, "query": 1}, + expected_kwargs={"query": 1}, + ) + + def test_volume_create_attrs(self): + self.verify_create(self.proxy.create_volume, volume.Volume) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_volume_delete(self, mock_mv): + self._verify( + "openstack.block_storage.v3.volume.Volume.delete", + self.proxy.delete_volume, + method_args=["value"], + expected_args=[self.proxy], + expected_kwargs={"params": {"cascade": False}}, + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_volume_delete_force(self, mock_mv): + self._verify( + "openstack.block_storage.v3.volume.Volume.force_delete", + self.proxy.delete_volume, + method_args=["value"], + method_kwargs={"force": True}, + expected_args=[self.proxy], + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + def test_volume_delete_force_v323(self, mock_mv): + self._verify( + "openstack.block_storage.v3.volume.Volume.delete", + self.proxy.delete_volume, + method_args=["value"], + method_kwargs={"force": True}, + expected_args=[self.proxy], + expected_kwargs={"params": {"cascade": False, "force": True}}, + ) + + def test_volume_update(self): + self.verify_update(self.proxy.update_volume, volume.Volume) + + def test_get_volume_metadata(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.fetch_metadata", + self.proxy.get_volume_metadata, + method_args=["value"], + expected_args=[self.proxy], + expected_result=volume.Volume(id="value", metadata={}), + ) + + def test_set_volume_metadata(self): + kwargs = {"a": "1", "b": "2"} + id = "an_id" + self._verify( + "openstack.block_storage.v3.volume.Volume.set_metadata", + self.proxy.set_volume_metadata, + method_args=[id], + method_kwargs=kwargs, + method_result=volume.Volume.existing(id=id, metadata=kwargs), + expected_args=[self.proxy], + expected_kwargs={'metadata': kwargs}, + expected_result=volume.Volume.existing(id=id, metadata=kwargs), + ) + + def test_delete_volume_metadata(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.delete_metadata_item", + self.proxy.delete_volume_metadata, + expected_result=None, + method_args=["value", ["key"]], + expected_args=[self.proxy, "key"], + ) + + def test_volume_wait_for(self): + value = volume.Volume(id='1234') + self.verify_wait_for_status( + self.proxy.wait_for_status, + method_args=[value], + expected_args=[ + self.proxy, + value, + 'available', + ['error'], + 2, + None, + 'status', + None, + ], + ) + + +class TestPools(TestVolumeProxy): + def test_backend_pools(self): + self.verify_list(self.proxy.backend_pools, stats.Pools) + + +class TestLimit(TestVolumeProxy): + def test_limits_get(self): + self._verify( + 'openstack.resource.Resource.fetch', + self.proxy.get_limits, + method_args=[], + method_kwargs={'project': 'foo'}, + expected_args=[self.proxy], + expected_kwargs={'requires_id': False, 'project_id': 'foo'}, + ) + + +class TestCapabilities(TestVolumeProxy): + def test_capabilites_get(self): + self.verify_get(self.proxy.get_capabilities, capabilities.Capabilities) + + +class TestResourceFilter(TestVolumeProxy): + def test_resource_filters(self): + self.verify_list( + self.proxy.resource_filters, resource_filter.ResourceFilter + ) + + +class TestGroup(TestVolumeProxy): + def test_group_get(self): + self.verify_get(self.proxy.get_group, group.Group) + + def test_group_find(self): + self.verify_find( + self.proxy.find_group, + group.Group, + expected_kwargs={'list_base_path': '/groups/detail'}, + ) + + def test_groups(self): + self.verify_list(self.proxy.groups, group.Group) + + def test_group_create(self): + self.verify_create(self.proxy.create_group, group.Group) + + def test_group_create_from_source(self): + self._verify( + "openstack.block_storage.v3.group.Group.create_from_source", + self.proxy.create_group_from_source, + method_args=[], + expected_args=[self.proxy], + ) + + def test_group_delete(self): + self._verify( + "openstack.block_storage.v3.group.Group.delete", + self.proxy.delete_group, + method_args=['delete_volumes'], + expected_args=[self.proxy], + expected_kwargs={'delete_volumes': False}, + ) + + def test_group_update(self): + self.verify_update(self.proxy.update_group, group.Group) + + def test_group_reset_status(self): + self._verify( + "openstack.block_storage.v3.group.Group.reset_status", + self.proxy.reset_group_status, + method_args=["value", "new_status"], + expected_args=[self.proxy, "new_status"], + ) + + def test_group_enable_replication(self): + self._verify( + "openstack.block_storage.v3.group.Group.enable_replication", + self.proxy.enable_group_replication, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_group_disable_replication(self): + self._verify( + "openstack.block_storage.v3.group.Group.disable_replication", + self.proxy.disable_group_replication, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_group_failover_replication(self): + self._verify( + "openstack.block_storage.v3.group.Group.failover_replication", + self.proxy.failover_group_replication, + method_args=["value"], + expected_args=[self.proxy], + expected_kwargs={ + 'allowed_attached_volume': False, + 'secondary_backend_id': None, + }, + ) + + +class TestGroupSnapshot(TestVolumeProxy): + def test_group_snapshot_get(self): + self.verify_get( + self.proxy.get_group_snapshot, group_snapshot.GroupSnapshot + ) + + def test_group_snapshot_find(self): + self.verify_find( + self.proxy.find_group_snapshot, + group_snapshot.GroupSnapshot, + expected_kwargs={ + 'list_base_path': '/group_snapshots/detail', + }, + ) + + def test_group_snapshots(self): + self.verify_list( + self.proxy.group_snapshots, + group_snapshot.GroupSnapshot, + expected_kwargs={}, + ) + + def test_group_snapshots__detailed(self): + self.verify_list( + self.proxy.group_snapshots, + group_snapshot.GroupSnapshot, + method_kwargs={'details': True, 'query': 1}, + expected_kwargs={ + 'query': 1, + 'base_path': '/group_snapshots/detail', + }, + ) + + def test_group_snapshot_create(self): + self.verify_create( + self.proxy.create_group_snapshot, group_snapshot.GroupSnapshot + ) + + def test_group_snapshot_delete(self): + self.verify_delete( + self.proxy.delete_group_snapshot, + group_snapshot.GroupSnapshot, + False, + ) + + def test_group_snapshot_delete_ignore(self): + self.verify_delete( + self.proxy.delete_group_snapshot, + group_snapshot.GroupSnapshot, + True, + ) + + +class TestGroupType(TestVolumeProxy): + def test_group_type_get(self): + self.verify_get(self.proxy.get_group_type, group_type.GroupType) + + def test_group_type_find(self): + self.verify_find(self.proxy.find_group_type, group_type.GroupType) + + def test_group_types(self): + self.verify_list(self.proxy.group_types, group_type.GroupType) + + def test_group_type_create(self): + self.verify_create(self.proxy.create_group_type, group_type.GroupType) + + def test_group_type_delete(self): + self.verify_delete( + self.proxy.delete_group_type, group_type.GroupType, False + ) + + def test_group_type_delete_ignore(self): + self.verify_delete( + self.proxy.delete_group_type, group_type.GroupType, True + ) + + def test_group_type_update(self): + self.verify_update(self.proxy.update_group_type, group_type.GroupType) + + def test_group_type_fetch_group_specs(self): + self._verify( + "openstack.block_storage.v3.group_type.GroupType.fetch_group_specs", + self.proxy.fetch_group_type_group_specs, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_group_type_create_group_specs(self): + self._verify( + "openstack.block_storage.v3.group_type.GroupType.create_group_specs", + self.proxy.create_group_type_group_specs, + method_args=["value", {'a': 'b'}], + expected_args=[self.proxy], + expected_kwargs={"specs": {'a': 'b'}}, + ) + + def test_group_type_get_group_specs_prop(self): + self._verify( + "openstack.block_storage.v3.group_type.GroupType.get_group_specs_property", + self.proxy.get_group_type_group_specs_property, + method_args=["value", "prop"], + expected_args=[self.proxy, "prop"], + ) + + def test_group_type_update_group_specs_prop(self): + self._verify( + "openstack.block_storage.v3.group_type.GroupType.update_group_specs_property", + self.proxy.update_group_type_group_specs_property, + method_args=["value", "prop", "val"], + expected_args=[self.proxy, "prop", "val"], + ) + + def test_group_type_delete_group_specs_prop(self): + self._verify( + "openstack.block_storage.v3.group_type.GroupType.delete_group_specs_property", + self.proxy.delete_group_type_group_specs_property, + method_args=["value", "prop"], + expected_args=[self.proxy, "prop"], + ) + + +class TestService(TestVolumeProxy): + def test_services(self): + self.verify_list(self.proxy.services, service.Service) + + def test_enable_service(self): + self._verify( + 'openstack.block_storage.v3.service.Service.enable', + self.proxy.enable_service, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_disable_service(self): + self._verify( + 'openstack.block_storage.v3.service.Service.disable', + self.proxy.disable_service, + method_args=["value"], + expected_kwargs={"reason": None}, + expected_args=[self.proxy], + ) + + def test_thaw_service(self): + self._verify( + 'openstack.block_storage.v3.service.Service.thaw', + self.proxy.thaw_service, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_freeze_service(self): + self._verify( + 'openstack.block_storage.v3.service.Service.freeze', + self.proxy.freeze_service, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_set_service_log_levels(self): + self._verify( + 'openstack.block_storage.v3.service.Service.set_log_levels', + self.proxy.set_service_log_levels, + method_kwargs={"level": service.Level.INFO}, + expected_args=[self.proxy], + expected_kwargs={ + "level": service.Level.INFO, + "binary": None, + "server": None, + "prefix": None, + }, + ) + + def test_get_service_log_level(self): + self._verify( + 'openstack.block_storage.v3.service.Service.get_log_levels', + self.proxy.get_service_log_levels, + method_args=[], + expected_args=[self.proxy], + expected_kwargs={ + "binary": None, + "server": None, + "prefix": None, + }, + ) + + def test_failover_service(self): + self._verify( + 'openstack.block_storage.v3.service.Service.failover', + self.proxy.failover_service, + method_args=["value"], + expected_args=[self.proxy], + expected_kwargs={"backend_id": None, "cluster": None}, + ) + + +class TestExtension(TestVolumeProxy): + def test_extensions(self): + self.verify_list(self.proxy.extensions, extension.Extension) + + +class TestVolumeActions(TestVolumeProxy): + def test_volume_extend(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.extend", + self.proxy.extend_volume, + method_args=["value", "new-size"], + expected_args=[self.proxy, "new-size"], + ) + + def test_complete_extend(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.complete_extend", + self.proxy.complete_volume_extend, + method_args=["value"], + expected_args=[self.proxy, False], + ) + + def test_complete_extend_error(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.complete_extend", + self.proxy.complete_volume_extend, + method_args=["value", True], + expected_args=[self.proxy, True], + ) + + def test_volume_set_readonly_no_argument(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.set_readonly", + self.proxy.set_volume_readonly, + method_args=["value"], + expected_args=[self.proxy, True], + ) + + def test_volume_set_readonly_false(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.set_readonly", + self.proxy.set_volume_readonly, + method_args=["value", False], + expected_args=[self.proxy, False], + ) + + def test_volume_set_bootable(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.set_bootable_status", + self.proxy.set_volume_bootable_status, + method_args=["value", True], + expected_args=[self.proxy, True], + ) + + def test_volume_reset_volume_status(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.reset_status", + self.proxy.reset_volume_status, + method_args=["value", '1', '2', '3'], + expected_args=[self.proxy, '1', '2', '3'], + ) + + def test_set_volume_image_metadata(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.set_image_metadata", + self.proxy.set_volume_image_metadata, + method_args=["value"], + method_kwargs={'foo': 'bar'}, + expected_args=[self.proxy], + expected_kwargs={'metadata': {'foo': 'bar'}}, + ) + + def test_delete_volume_image_metadata(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.delete_image_metadata", + self.proxy.delete_volume_image_metadata, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_delete_volume_image_metadata__with_keys(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.delete_image_metadata_item", + self.proxy.delete_volume_image_metadata, + method_args=["value", ['foo']], + expected_args=[self.proxy, 'foo'], + ) + + def test_volume_revert_to_snapshot(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.revert_to_snapshot", + self.proxy.revert_volume_to_snapshot, + method_args=["value", '1'], + expected_args=[self.proxy, '1'], + ) + + def test_attach_instance(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.attach", + self.proxy.attach_volume, + method_args=["value", '1'], + method_kwargs={'instance': '2'}, + expected_args=[self.proxy, '1', '2', None], + ) + + def test_attach_host(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.attach", + self.proxy.attach_volume, + method_args=["value", '1'], + method_kwargs={'host_name': '3'}, + expected_args=[self.proxy, '1', None, '3'], + ) + + def test_detach_defaults(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.detach", + self.proxy.detach_volume, + method_args=["value", '1'], + expected_args=[self.proxy, '1', False, None], + ) + + def test_detach_force(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.detach", + self.proxy.detach_volume, + method_args=["value", '1', True, {'a': 'b'}], + expected_args=[self.proxy, '1', True, {'a': 'b'}], + ) + + def test_unmanage(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.unmanage", + self.proxy.unmanage_volume, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_migrate_default(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.migrate", + self.proxy.migrate_volume, + method_args=["value", '1'], + expected_args=[self.proxy, '1', False, False, None], + ) + + def test_migrate_nondefault(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.migrate", + self.proxy.migrate_volume, + method_args=["value", '1', True, True], + expected_args=[self.proxy, '1', True, True, None], + ) + + def test_migrate_cluster(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.migrate", + self.proxy.migrate_volume, + method_args=["value"], + method_kwargs={'cluster': '3'}, + expected_args=[self.proxy, None, False, False, '3'], + ) + + def test_complete_migration(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.complete_migration", + self.proxy.complete_volume_migration, + method_args=["value", '1'], + expected_args=[self.proxy, "1", False], + ) + + def test_complete_migration_error(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.complete_migration", + self.proxy.complete_volume_migration, + method_args=["value", "1", True], + expected_args=[self.proxy, "1", True], + ) + + def test_upload_to_image(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.upload_to_image", + self.proxy.upload_volume_to_image, + method_args=["value", "1"], + expected_args=[self.proxy, "1"], + expected_kwargs={ + "force": False, + "disk_format": None, + "container_format": None, + "visibility": None, + "protected": None, + }, + ) + + def test_upload_to_image_extended(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.upload_to_image", + self.proxy.upload_volume_to_image, + method_args=["value", "1"], + method_kwargs={ + "disk_format": "2", + "container_format": "3", + "visibility": "4", + "protected": "5", + }, + expected_args=[self.proxy, "1"], + expected_kwargs={ + "force": False, + "disk_format": "2", + "container_format": "3", + "visibility": "4", + "protected": "5", + }, + ) + + def test_reserve(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.reserve", + self.proxy.reserve_volume, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_unreserve(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.unreserve", + self.proxy.unreserve_volume, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_begin_detaching(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.begin_detaching", + self.proxy.begin_volume_detaching, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_abort_detaching(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.abort_detaching", + self.proxy.abort_volume_detaching, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_init_attachment(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.init_attachment", + self.proxy.init_volume_attachment, + method_args=["value", "1"], + expected_args=[self.proxy, "1"], + ) + + def test_terminate_attachment(self): + self._verify( + "openstack.block_storage.v3.volume.Volume.terminate_attachment", + self.proxy.terminate_volume_attachment, + method_args=["value", "1"], + expected_args=[self.proxy, "1"], + ) + + +class TestBackup(TestVolumeProxy): + def test_backups_detailed(self): + # NOTE: mock has_service + self.proxy._connection = mock.Mock() + self.proxy._connection.has_service = mock.Mock(return_value=True) + self.verify_list( + self.proxy.backups, + backup.Backup, + method_kwargs={"details": True, "query": 1}, + expected_kwargs={"query": 1, "base_path": "/backups/detail"}, + ) + + def test_backups_not_detailed(self): + # NOTE: mock has_service + self.proxy._connection = mock.Mock() + self.proxy._connection.has_service = mock.Mock(return_value=True) + self.verify_list( + self.proxy.backups, + backup.Backup, + method_kwargs={"details": False, "query": 1}, + expected_kwargs={"query": 1}, + ) + + def test_backup_get(self): + # NOTE: mock has_service + self.proxy._connection = mock.Mock() + self.proxy._connection.has_service = mock.Mock(return_value=True) + self.verify_get(self.proxy.get_backup, backup.Backup) + + def test_backup_find(self): + # NOTE: mock has_service + self.proxy._connection = mock.Mock() + self.proxy._connection.has_service = mock.Mock(return_value=True) + self.verify_find( + self.proxy.find_backup, + backup.Backup, + expected_kwargs={'list_base_path': '/backups/detail'}, + ) + + def test_backup_delete(self): + # NOTE: mock has_service + self.proxy._connection = mock.Mock() + self.proxy._connection.has_service = mock.Mock(return_value=True) + self.verify_delete(self.proxy.delete_backup, backup.Backup, False) + + def test_backup_delete_ignore(self): + # NOTE: mock has_service + self.proxy._connection = mock.Mock() + self.proxy._connection.has_service = mock.Mock(return_value=True) + self.verify_delete(self.proxy.delete_backup, backup.Backup, True) + + def test_backup_delete_force(self): + self._verify( + "openstack.block_storage.v3.backup.Backup.force_delete", + self.proxy.delete_backup, + method_args=["value"], + method_kwargs={"force": True}, + expected_args=[self.proxy], + ) + + def test_backup_update(self): + self.verify_update(self.proxy.update_backup, backup.Backup) + + def test_backup_create_attrs(self): + # NOTE: mock has_service + self.proxy._connection = mock.Mock() + self.proxy._connection.has_service = mock.Mock(return_value=True) + self.verify_create(self.proxy.create_backup, backup.Backup) + + def test_backup_restore(self): + # NOTE: mock has_service + self.proxy._connection = mock.Mock() + self.proxy._connection.has_service = mock.Mock(return_value=True) + self._verify( + 'openstack.block_storage.v3.backup.Backup.restore', + self.proxy.restore_backup, + method_args=['volume_id'], + method_kwargs={'volume_id': 'vol_id', 'name': 'name'}, + expected_args=[self.proxy], + expected_kwargs={'volume_id': 'vol_id', 'name': 'name'}, + ) + + def test_backup_reset_status(self): + self._verify( + "openstack.block_storage.v3.backup.Backup.reset_status", + self.proxy.reset_backup_status, + method_args=["value", "new_status"], + expected_args=[self.proxy, "new_status"], + ) + + def test_backup_get_metadata(self): + self._verify( + "openstack.block_storage.v3.backup.Backup.fetch_metadata", + self.proxy.get_backup_metadata, + method_args=["value"], + expected_args=[self.proxy], + expected_result=volume.Volume(id="value", metadata={}), + ) + + def test_backup_set_metadata(self): + kwargs = {"a": "1", "b": "2"} + id = "an_id" + self._verify( + "openstack.block_storage.v3.backup.Backup.set_metadata", + self.proxy.set_backup_metadata, + method_args=[id], + method_kwargs=kwargs, + method_result=volume.Volume.existing(id=id, metadata=kwargs), + expected_args=[self.proxy], + expected_kwargs={'metadata': kwargs}, + expected_result=volume.Volume.existing(id=id, metadata=kwargs), + ) + + def test_backup_delete_metadata(self): + self._verify( + "openstack.block_storage.v3.backup.Backup.delete_metadata_item", + self.proxy.delete_backup_metadata, + expected_result=None, + method_args=["value", ["key"]], + expected_args=[self.proxy, "key"], + ) + + +class TestSnapshot(TestVolumeProxy): + def test_snapshot_get(self): + self.verify_get(self.proxy.get_snapshot, snapshot.Snapshot) + + def test_snapshot_find(self): + self.verify_find( + self.proxy.find_snapshot, + snapshot.Snapshot, + method_kwargs={'all_projects': True}, + expected_kwargs={ + 'list_base_path': '/snapshots/detail', + 'all_projects': True, + }, + ) + + def test_snapshots_detailed(self): + self.verify_list( + self.proxy.snapshots, + snapshot.SnapshotDetail, + method_kwargs={"details": True, "query": 1}, + expected_kwargs={"query": 1, "base_path": "/snapshots/detail"}, + ) + + def test_snapshots_not_detailed(self): + self.verify_list( + self.proxy.snapshots, + snapshot.Snapshot, + method_kwargs={"details": False, "query": 1}, + expected_kwargs={"query": 1}, + ) + + def test_snapshot_create_attrs(self): + self.verify_create(self.proxy.create_snapshot, snapshot.Snapshot) + + def test_snapshot_update(self): + self.verify_update(self.proxy.update_snapshot, snapshot.Snapshot) + + def test_snapshot_delete(self): + self.verify_delete( + self.proxy.delete_snapshot, snapshot.Snapshot, False + ) + + def test_snapshot_delete_ignore(self): + self.verify_delete(self.proxy.delete_snapshot, snapshot.Snapshot, True) + + def test_snapshot_delete_force(self): + self._verify( + "openstack.block_storage.v3.snapshot.Snapshot.force_delete", + self.proxy.delete_snapshot, + method_args=["value"], + method_kwargs={"force": True}, + expected_args=[self.proxy], + ) + + def test_snapshot_reset_status(self): + self._verify( + "openstack.block_storage.v3.snapshot.Snapshot.reset_status", + self.proxy.reset_snapshot_status, + method_args=["value", "new_status"], + expected_args=[self.proxy, "new_status"], + ) + + def test_snapshot_set_status(self): + self._verify( + "openstack.block_storage.v3.snapshot.Snapshot.set_status", + self.proxy.set_snapshot_status, + method_args=["value", "new_status"], + expected_args=[self.proxy, "new_status", None], + ) + + def test_snapshot_set_status_percentage(self): + self._verify( + "openstack.block_storage.v3.snapshot.Snapshot.set_status", + self.proxy.set_snapshot_status, + method_args=["value", "new_status", "per"], + expected_args=[self.proxy, "new_status", "per"], + ) + + def test_snapshot_manage(self): + kwargs = { + "volume_id": "fake_id", + "remote_source": "fake_volume", + "snapshot_name": "fake_snap", + "description": "test_snap", + "property": {"k": "v"}, + } + self._verify( + "openstack.block_storage.v3.snapshot.Snapshot.manage", + self.proxy.manage_snapshot, + method_kwargs=kwargs, + method_result=snapshot.Snapshot(id="fake_id"), + expected_args=[self.proxy], + expected_kwargs=kwargs, + expected_result=snapshot.Snapshot(id="fake_id"), + ) + + def test_snapshot_unmanage(self): + self._verify( + "openstack.block_storage.v3.snapshot.Snapshot.unmanage", + self.proxy.unmanage_snapshot, + method_args=["value"], + expected_args=[self.proxy], + expected_result=None, + ) + + def test_get_snapshot_metadata(self): + self._verify( + "openstack.block_storage.v3.snapshot.Snapshot.fetch_metadata", + self.proxy.get_snapshot_metadata, + method_args=["value"], + expected_args=[self.proxy], + expected_result=snapshot.Snapshot(id="value", metadata={}), + ) + + def test_set_snapshot_metadata(self): + kwargs = {"a": "1", "b": "2"} + id = "an_id" + self._verify( + "openstack.block_storage.v3.snapshot.Snapshot.set_metadata", + self.proxy.set_snapshot_metadata, + method_args=[id], + method_kwargs=kwargs, + method_result=snapshot.Snapshot.existing(id=id, metadata=kwargs), + expected_args=[self.proxy], + expected_kwargs={'metadata': kwargs}, + expected_result=snapshot.Snapshot.existing(id=id, metadata=kwargs), + ) + + def test_delete_snapshot_metadata(self): + self._verify( + "openstack.block_storage.v3.snapshot.Snapshot." + "delete_metadata_item", + self.proxy.delete_snapshot_metadata, + expected_result=None, + method_args=["value", ["key"]], + expected_args=[self.proxy, "key"], + ) + + +class TestType(TestVolumeProxy): + def test_type_get(self): + self.verify_get(self.proxy.get_type, type.Type) + + def test_type_find(self): + self.verify_find(self.proxy.find_type, type.Type) + + def test_types(self): + self.verify_list(self.proxy.types, type.Type) + + def test_type_create_attrs(self): + self.verify_create(self.proxy.create_type, type.Type) + + def test_type_delete(self): + self.verify_delete(self.proxy.delete_type, type.Type, False) + + def test_type_delete_ignore(self): + self.verify_delete(self.proxy.delete_type, type.Type, True) + + def test_type_update(self): + self.verify_update(self.proxy.update_type, type.Type) + + def test_type_extra_specs_update(self): + kwargs = {"a": "1", "b": "2"} + id = "an_id" + self._verify( + "openstack.block_storage.v3.type.Type.set_extra_specs", + self.proxy.update_type_extra_specs, + method_args=[id], + method_kwargs=kwargs, + method_result=type.Type.existing(id=id, extra_specs=kwargs), + expected_args=[self.proxy], + expected_kwargs=kwargs, + expected_result=kwargs, + ) + + def test_type_extra_specs_delete(self): + self._verify( + "openstack.block_storage.v3.type.Type.delete_extra_specs", + self.proxy.delete_type_extra_specs, + expected_result=None, + method_args=["value", "key"], + expected_args=[self.proxy, "key"], + ) + + def test_type_get_private_access(self): + self._verify( + "openstack.block_storage.v3.type.Type.get_private_access", + self.proxy.get_type_access, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_type_add_private_access(self): + self._verify( + "openstack.block_storage.v3.type.Type.add_private_access", + self.proxy.add_type_access, + method_args=["value", "a"], + expected_args=[self.proxy, "a"], + ) + + def test_type_remove_private_access(self): + self._verify( + "openstack.block_storage.v3.type.Type.remove_private_access", + self.proxy.remove_type_access, + method_args=["value", "a"], + expected_args=[self.proxy, "a"], + ) + + def test_type_encryption_get(self): + self.verify_get( + self.proxy.get_type_encryption, + type.TypeEncryption, + method_args=['value'], + expected_args=[], + expected_kwargs={'volume_type_id': 'value', 'requires_id': False}, + ) + + def test_type_encryption_create(self): + self.verify_create( + self.proxy.create_type_encryption, + type.TypeEncryption, + method_kwargs={'volume_type': 'id'}, + expected_kwargs={'volume_type_id': 'id'}, + ) + + def test_type_encryption_update(self): + # Verify that the get call was made with correct kwargs + self.verify_get( + self.proxy.get_type_encryption, + type.TypeEncryption, + method_args=['value'], + expected_args=[], + expected_kwargs={'volume_type_id': 'value', 'requires_id': False}, + ) + self.verify_update( + self.proxy.update_type_encryption, type.TypeEncryption + ) + + def test_type_encryption_delete(self): + # Verify that the get call was made with correct kwargs + self.verify_get( + self.proxy.get_type_encryption, + type.TypeEncryption, + method_args=['value'], + expected_args=[], + expected_kwargs={'volume_type_id': 'value', 'requires_id': False}, + ) + self.verify_delete( + self.proxy.delete_type_encryption, type.TypeEncryption, False + ) + + def test_type_encryption_delete_ignore(self): + self.verify_delete( + self.proxy.delete_type_encryption, type.TypeEncryption, True + ) + + +class TestTransfer(TestVolumeProxy): + def test_transfer_create(self): + self.verify_create(self.proxy.create_transfer, transfer.Transfer) + + def test_transfer_delete(self): + self.verify_delete( + self.proxy.delete_transfer, transfer.Transfer, False + ) + + def test_transfer_get(self): + self.verify_get(self.proxy.get_transfer, transfer.Transfer) + + def test_transfer_find(self): + self.verify_find(self.proxy.find_transfer, transfer.Transfer) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_transfers(self, mock_mv): + self.verify_list(self.proxy.transfers, transfer.Transfer) + + def test_accept_transfer(self): + self._verify( + 'openstack.block_storage.v3.transfer.Transfer.accept', + self.proxy.accept_transfer, + method_args=['value', 'auth_key'], + expected_args=[self.proxy], + expected_kwargs={'auth_key': 'auth_key'}, + ) + + +class TestQuotaClassSet(TestVolumeProxy): + def test_quota_class_set_get(self): + self.verify_get( + self.proxy.get_quota_class_set, quota_class_set.QuotaClassSet + ) + + def test_quota_class_set_update(self): + self.verify_update( + self.proxy.update_quota_class_set, + quota_class_set.QuotaClassSet, + False, + ) + + +class TestQuotaSet(TestVolumeProxy): + def test_quota_set_get(self): + self._verify( + 'openstack.resource.Resource.fetch', + self.proxy.get_quota_set, + method_args=['prj'], + expected_args=[ + self.proxy, + False, + None, + None, + False, + ], + expected_kwargs={ + 'microversion': None, + 'resource_response_key': None, + 'usage': False, + }, + method_result=quota_set.QuotaSet(), + expected_result=quota_set.QuotaSet(), + ) + + def test_quota_set_get_query(self): + self._verify( + 'openstack.resource.Resource.fetch', + self.proxy.get_quota_set, + method_args=['prj'], + method_kwargs={'usage': True, 'user_id': 'uid'}, + expected_args=[ + self.proxy, + False, + None, + None, + False, + ], + expected_kwargs={ + 'microversion': None, + 'resource_response_key': None, + 'usage': True, + 'user_id': 'uid', + }, + ) + + def test_quota_set_get_defaults(self): + project_id = 'prj' + self._verify( + 'openstack.resource.Resource.fetch', + self.proxy.get_quota_set_defaults, + method_args=[project_id], + expected_args=[ + self.proxy, + False, + f'/os-quota-sets/{project_id}/defaults', + None, + False, + ], + expected_kwargs={ + 'microversion': None, + 'resource_response_key': None, + }, + ) + + def test_quota_set_reset(self): + self._verify( + 'openstack.resource.Resource.delete', + self.proxy.revert_quota_set, + method_args=['prj'], + method_kwargs={'user_id': 'uid'}, + expected_args=[self.proxy], + expected_kwargs={'user_id': 'uid'}, + ) + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_quota_set_update(self, mock_get): + fake_project = project.Project(id='prj') + mock_get.side_effect = [fake_project] + + self._verify( + 'openstack.proxy.Proxy._update', + self.proxy.update_quota_set, + method_args=['prj'], + method_kwargs={'volumes': 123}, + expected_args=[quota_set.QuotaSet, None], + expected_kwargs={'project_id': 'prj', 'volumes': 123}, + ) + mock_get.assert_called_once_with(project.Project, 'prj') + + @mock.patch.object(proxy_base.Proxy, "_get_resource") + def test_quota_set_update__legacy(self, mock_get): + fake_quota_set = quota_set.QuotaSet(project_id='prj') + mock_get.side_effect = [fake_quota_set] + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + + self._verify( + 'openstack.resource.Resource.commit', + self.proxy.update_quota_set, + method_args=[fake_quota_set], + method_kwargs={'ram': 123}, + expected_args=[self.proxy], + expected_kwargs={}, + ) + + self.assertEqual(1, len(w)) + self.assertEqual( + os_warnings.RemovedInSDK50Warning, + w[-1].category, + ) + self.assertIn( + "The signature of 'update_quota_set' has changed ", + str(w[-1]), + ) diff --git a/openstack/tests/unit/block_storage/v3/test_resource_filter.py b/openstack/tests/unit/block_storage/v3/test_resource_filter.py new file mode 100644 index 0000000000..e8c5c47988 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_resource_filter.py @@ -0,0 +1,48 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# # Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import resource_filter +from openstack.tests.unit import base + +RESOURCE_FILTER = { + 'filters': [ + 'name', + 'status', + 'image_metadata', + 'bootable', + 'migration_status', + ], + 'resource': 'volume', +} + + +class TestResourceFilter(base.TestCase): + def test_basic(self): + resource = resource_filter.ResourceFilter() + self.assertEqual('resource_filters', resource.resources_key) + self.assertEqual('/resource_filters', resource.base_path) + self.assertFalse(resource.allow_create) + self.assertFalse(resource.allow_fetch) + self.assertFalse(resource.allow_commit) + self.assertFalse(resource.allow_delete) + self.assertTrue(resource.allow_list) + + self.assertDictEqual( + { + "resource": "resource", + }, + resource._query_mapping._mapping, + ) + + def test_make_resource_filter(self): + resource = resource_filter.ResourceFilter(**RESOURCE_FILTER) + self.assertEqual(RESOURCE_FILTER['filters'], resource.filters) + self.assertEqual(RESOURCE_FILTER['resource'], resource.resource) diff --git a/openstack/tests/unit/block_storage/v3/test_service.py b/openstack/tests/unit/block_storage/v3/test_service.py new file mode 100644 index 0000000000..fe2f301330 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_service.py @@ -0,0 +1,254 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.block_storage.v3 import service +from openstack.tests.unit import base + +EXAMPLE = { + "binary": "cinder-scheduler", + "disabled_reason": None, + "host": "devstack", + "state": "up", + "status": "enabled", + "updated_at": "2017-06-29T05:50:35.000000", + "zone": "nova", +} + + +class TestService(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None # nothing uses this + self.resp.json = mock.Mock(return_value={'service': {}}) + self.resp.status_code = 200 + self.resp.headers = {} + self.sess = mock.Mock() + self.sess.put = mock.Mock(return_value=self.resp) + self.sess.default_microversion = '3.0' + + def test_basic(self): + sot = service.Service() + self.assertIsNone(sot.resource_key) + self.assertEqual('services', sot.resources_key) + self.assertEqual('/os-services', sot.base_path) + self.assertFalse(sot.allow_commit) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_delete) + + self.assertDictEqual( + { + 'binary': 'binary', + 'host': 'host', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = service.Service(**EXAMPLE) + self.assertEqual(EXAMPLE['binary'], sot.binary) + self.assertEqual(EXAMPLE['binary'], sot.name) + self.assertEqual(EXAMPLE['disabled_reason'], sot.disabled_reason) + self.assertEqual(EXAMPLE['host'], sot.host) + self.assertEqual(EXAMPLE['state'], sot.state) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['zone'], sot.availability_zone) + + def test_enable(self): + sot = service.Service(**EXAMPLE) + + res = sot.enable(self.sess) + self.assertIsNotNone(res) + + url = 'os-services/enable' + body = { + 'binary': 'cinder-scheduler', + 'host': 'devstack', + } + self.sess.put.assert_called_with( + url, + json=body, + microversion=self.sess.default_microversion, + ) + + def test_disable(self): + sot = service.Service(**EXAMPLE) + + res = sot.disable(self.sess) + self.assertIsNotNone(res) + + url = 'os-services/disable' + body = { + 'binary': 'cinder-scheduler', + 'host': 'devstack', + } + self.sess.put.assert_called_with( + url, + json=body, + microversion=self.sess.default_microversion, + ) + + def test_disable__with_reason(self): + sot = service.Service(**EXAMPLE) + reason = 'fencing' + + res = sot.disable(self.sess, reason=reason) + + self.assertIsNotNone(res) + + url = 'os-services/disable-log-reason' + body = { + 'binary': 'cinder-scheduler', + 'host': 'devstack', + 'disabled_reason': reason, + } + self.sess.put.assert_called_with( + url, + json=body, + microversion=self.sess.default_microversion, + ) + + def test_thaw(self): + sot = service.Service(**EXAMPLE) + + res = sot.thaw(self.sess) + self.assertIsNotNone(res) + + url = 'os-services/thaw' + body = {'host': 'devstack'} + self.sess.put.assert_called_with( + url, + json=body, + microversion=self.sess.default_microversion, + ) + + def test_freeze(self): + sot = service.Service(**EXAMPLE) + + res = sot.freeze(self.sess) + self.assertIsNotNone(res) + + url = 'os-services/freeze' + body = {'host': 'devstack'} + self.sess.put.assert_called_with( + url, + json=body, + microversion=self.sess.default_microversion, + ) + + def test_set_log_levels(self): + self.sess.default_microversion = '3.32' + res = service.Service.set_log_levels( + self.sess, + level=service.Level.DEBUG, + binary=service.Binary.ANY, + server='foo', + prefix='cinder.', + ) + self.assertIsNone(res) + + url = 'os-services/set-log' + body = { + 'level': service.Level.DEBUG, + 'binary': service.Binary.ANY, + 'server': 'foo', + 'prefix': 'cinder.', + } + self.sess.put.assert_called_with( + url, + json=body, + microversion=self.sess.default_microversion, + ) + + def test_get_log_levels(self): + self.sess.default_microversion = '3.32' + self.resp.json = mock.Mock( + return_value={ + 'log_levels': [ + { + "binary": "cinder-api", + "host": "devstack", + "levels": {"cinder.volume.api": "DEBUG"}, + }, + ], + }, + ) + res = list( + service.Service.get_log_levels( + self.sess, + binary=service.Binary.ANY, + server='foo', + prefix='cinder.', + ) + ) + self.assertIsNotNone(res) + + url = 'os-services/get-log' + body = { + 'binary': service.Binary.ANY, + 'server': 'foo', + 'prefix': 'cinder.', + } + self.sess.put.assert_called_with( + url, + json=body, + microversion=self.sess.default_microversion, + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_failover(self, mock_supports): + sot = service.Service(**EXAMPLE) + + res = sot.failover(self.sess) + self.assertIsNotNone(res) + + url = 'os-services/failover_host' + body = {'host': 'devstack'} + self.sess.put.assert_called_with( + url, + json=body, + microversion=self.sess.default_microversion, + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + def test_failover__with_cluster(self, mock_supports): + self.sess.default_microversion = '3.26' + + sot = service.Service(**EXAMPLE) + + res = sot.failover(self.sess, cluster='foo', backend_id='bar') + self.assertIsNotNone(res) + + url = 'os-services/failover' + body = { + 'host': 'devstack', + 'cluster': 'foo', + 'backend_id': 'bar', + } + self.sess.put.assert_called_with( + url, + json=body, + microversion='3.26', + ) diff --git a/openstack/tests/unit/block_storage/v3/test_snapshot.py b/openstack/tests/unit/block_storage/v3/test_snapshot.py new file mode 100644 index 0000000000..ca8a19d2a3 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_snapshot.py @@ -0,0 +1,213 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import copy +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v3 import snapshot +from openstack.tests.unit import base + + +FAKE_ID = "ffa9bc5e-1172-4021-acaf-cdcd78a9584d" +FAKE_VOLUME_ID = "5aa119a8-d25b-45a7-8d1b-88e127885635" + +SNAPSHOT = { + "status": "creating", + "description": "Daily backup", + "created_at": "2015-03-09T12:14:57.233772", + "updated_at": None, + "metadata": {}, + "volume_id": FAKE_VOLUME_ID, + "size": 1, + "id": FAKE_ID, + "name": "snap-001", + "force": "true", + "os-extended-snapshot-attributes:progress": "100%", + "os-extended-snapshot-attributes:project_id": "0c2eba2c5af04d3f9e9d0d410b371fde", # noqa: E501 +} + + +class TestSnapshot(base.TestCase): + def test_basic(self): + sot = snapshot.Snapshot(SNAPSHOT) + self.assertEqual("snapshot", sot.resource_key) + self.assertEqual("snapshots", sot.resources_key) + self.assertEqual("/snapshots", sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + "name": "name", + "status": "status", + "volume_id": "volume_id", + "project_id": "project_id", + "limit": "limit", + "offset": "offset", + "marker": "marker", + "sort_dir": "sort_dir", + "sort_key": "sort_key", + "sort": "sort", + "all_projects": "all_tenants", + }, + sot._query_mapping._mapping, + ) + + def test_create_basic(self): + sot = snapshot.Snapshot(**SNAPSHOT) + self.assertEqual(SNAPSHOT["id"], sot.id) + self.assertEqual(SNAPSHOT["status"], sot.status) + self.assertEqual(SNAPSHOT["created_at"], sot.created_at) + self.assertEqual(SNAPSHOT["updated_at"], sot.updated_at) + self.assertEqual(SNAPSHOT["metadata"], sot.metadata) + self.assertEqual(SNAPSHOT["volume_id"], sot.volume_id) + self.assertEqual(SNAPSHOT["size"], sot.size) + self.assertEqual(SNAPSHOT["name"], sot.name) + self.assertEqual( + SNAPSHOT["os-extended-snapshot-attributes:progress"], sot.progress + ) + self.assertEqual( + SNAPSHOT["os-extended-snapshot-attributes:project_id"], + sot.project_id, + ) + self.assertTrue(sot.is_forced) + + +class TestSnapshotActions(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.headers = {} + self.resp.status_code = 202 + + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.get = mock.Mock() + self.sess.post = mock.Mock(return_value=self.resp) + self.sess.default_microversion = None + + def test_force_delete(self): + sot = snapshot.Snapshot(**SNAPSHOT) + + self.assertIsNone(sot.force_delete(self.sess)) + + url = f'snapshots/{FAKE_ID}/action' + body = {'os-force_delete': None} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_reset_status(self): + sot = snapshot.Snapshot(**SNAPSHOT) + + self.assertIsNone(sot.reset_status(self.sess, 'new_status')) + + url = f'snapshots/{FAKE_ID}/action' + body = {'os-reset_status': {'status': 'new_status'}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_set_status(self): + sot = snapshot.Snapshot(**SNAPSHOT) + + self.assertIsNone(sot.set_status(self.sess, 'new_status')) + + url = f'snapshots/{FAKE_ID}/action' + body = {'os-update_snapshot_status': {'status': 'new_status'}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + def test_manage(self, mock_mv): + resp = mock.Mock() + resp.body = {'snapshot': copy.deepcopy(SNAPSHOT)} + resp.json = mock.Mock(return_value=resp.body) + resp.headers = {} + resp.status_code = 202 + + self.sess.post = mock.Mock(return_value=resp) + + sot = snapshot.Snapshot.manage( + self.sess, volume_id=FAKE_VOLUME_ID, ref=FAKE_ID + ) + + self.assertIsNotNone(sot) + + url = '/manageable_snapshots' + body = { + 'snapshot': { + 'volume_id': FAKE_VOLUME_ID, + 'ref': FAKE_ID, + 'name': None, + 'description': None, + 'metadata': None, + } + } + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_manage_pre_38(self, mock_mv): + resp = mock.Mock() + resp.body = {'snapshot': copy.deepcopy(SNAPSHOT)} + resp.json = mock.Mock(return_value=resp.body) + resp.headers = {} + resp.status_code = 202 + + self.sess.post = mock.Mock(return_value=resp) + + sot = snapshot.Snapshot.manage( + self.sess, volume_id=FAKE_VOLUME_ID, ref=FAKE_ID + ) + + self.assertIsNotNone(sot) + + url = '/os-snapshot-manage' + body = { + 'snapshot': { + 'volume_id': FAKE_VOLUME_ID, + 'ref': FAKE_ID, + 'name': None, + 'description': None, + 'metadata': None, + } + } + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_unmanage(self): + sot = snapshot.Snapshot(**SNAPSHOT) + + self.assertIsNone(sot.unmanage(self.sess)) + + url = f'snapshots/{FAKE_ID}/action' + body = {'os-unmanage': None} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) diff --git a/openstack/tests/unit/block_storage/v3/test_transfer.py b/openstack/tests/unit/block_storage/v3/test_transfer.py new file mode 100644 index 0000000000..185146fc76 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_transfer.py @@ -0,0 +1,152 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v3 import transfer +from openstack import resource +from openstack.tests.unit import base + + +FAKE_ID = "09d18b36-9e8d-4438-a4da-3f5eff5e1130" +FAKE_VOL_ID = "390de1bc-19d1-41e7-ba67-c492bb36cae5" +FAKE_VOL_NAME = "test-volume" +FAKE_TRANSFER = "7d048960-7c3f-4bf0-952f-4312fdea1dec" +FAKE_AUTH_KEY = "95bc670c0068821d" + +TRANSFER = { + "auth_key": FAKE_AUTH_KEY, + "created_at": "2023-06-27T08:47:23.035010", + "id": FAKE_ID, + "name": FAKE_VOL_NAME, + "volume_id": FAKE_VOL_ID, +} + + +class TestTransfer(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None # nothing uses this + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.headers = {} + self.resp.status_code = 202 + + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.post = mock.Mock(return_value=self.resp) + self.sess.default_microversion = "3.55" + + def test_basic(self): + tr = transfer.Transfer() + self.assertEqual("transfer", tr.resource_key) + self.assertEqual("transfers", tr.resources_key) + self.assertEqual("/volume-transfers", tr.base_path) + self.assertTrue(tr.allow_create) + self.assertIsNotNone(tr._max_microversion) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + }, + tr._query_mapping._mapping, + ) + + def test_make_it(self): + sot = transfer.Transfer(**TRANSFER) + self.assertEqual(TRANSFER["auth_key"], sot.auth_key) + self.assertEqual(TRANSFER["created_at"], sot.created_at) + self.assertEqual(TRANSFER["id"], sot.id) + self.assertEqual(TRANSFER["name"], sot.name) + self.assertEqual(TRANSFER["volume_id"], sot.volume_id) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + @mock.patch.object(resource.Resource, '_translate_response') + def test_create(self, mock_mv, mock_translate): + sot = transfer.Transfer() + + sot.create(self.sess, volume_id=FAKE_VOL_ID, name=FAKE_VOL_NAME) + self.sess.post.assert_called_with( + '/volume-transfers', + json={'transfer': {}}, + microversion="3.55", + headers={}, + params={'volume_id': FAKE_VOL_ID, 'name': FAKE_VOL_NAME}, + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + @mock.patch.object(resource.Resource, '_translate_response') + def test_create_pre_v355(self, mock_mv, mock_translate): + self.sess.default_microversion = "3.0" + sot = transfer.Transfer() + + sot.create(self.sess, volume_id=FAKE_VOL_ID, name=FAKE_VOL_NAME) + self.sess.post.assert_called_with( + '/os-volume-transfer', + json={'transfer': {}}, + microversion="3.0", + headers={}, + params={'volume_id': FAKE_VOL_ID, 'name': FAKE_VOL_NAME}, + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + @mock.patch.object(resource.Resource, '_translate_response') + def test_accept(self, mock_mv, mock_translate): + sot = transfer.Transfer() + sot.id = FAKE_TRANSFER + + sot.accept(self.sess, auth_key=FAKE_AUTH_KEY) + self.sess.post.assert_called_with( + f'volume-transfers/{FAKE_TRANSFER}/accept', + json={ + 'accept': { + 'auth_key': FAKE_AUTH_KEY, + } + }, + microversion="3.55", + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + @mock.patch.object(resource.Resource, '_translate_response') + def test_accept_pre_v355(self, mock_mv, mock_translate): + self.sess.default_microversion = "3.0" + sot = transfer.Transfer() + sot.id = FAKE_TRANSFER + + sot.accept(self.sess, auth_key=FAKE_AUTH_KEY) + self.sess.post.assert_called_with( + f'os-volume-transfer/{FAKE_TRANSFER}/accept', + json={ + 'accept': { + 'auth_key': FAKE_AUTH_KEY, + } + }, + microversion="3.0", + ) diff --git a/openstack/tests/unit/block_storage/v3/test_type.py b/openstack/tests/unit/block_storage/v3/test_type.py new file mode 100644 index 0000000000..174d213b7a --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_type.py @@ -0,0 +1,172 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v3 import type +from openstack import exceptions +from openstack.tests.unit import base + + +FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" +TYPE = { + "extra_specs": {"capabilities": "gpu"}, + "id": FAKE_ID, + "name": "SSD", + "description": "Test type", +} + + +class TestType(base.TestCase): + def setUp(self): + super().setUp() + self.extra_specs_result = {"extra_specs": {"go": "cubs", "boo": "sox"}} + self.resp = mock.Mock() + self.resp.body = None + self.resp.status_code = 200 + self.resp.json = mock.Mock(return_value=self.resp.body) + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = '3.0' + self.sess.post = mock.Mock(return_value=self.resp) + self.sess._get_connection = mock.Mock(return_value=self.cloud) + + def test_basic(self): + sot = type.Type(**TYPE) + self.assertEqual("volume_type", sot.resource_key) + self.assertEqual("volume_types", sot.resources_key) + self.assertEqual("/types", sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_commit) + + def test_new(self): + sot = type.Type.new(id=FAKE_ID) + self.assertEqual(FAKE_ID, sot.id) + + def test_create(self): + sot = type.Type(**TYPE) + self.assertEqual(TYPE["id"], sot.id) + self.assertEqual(TYPE["extra_specs"], sot.extra_specs) + self.assertEqual(TYPE["name"], sot.name) + self.assertEqual(TYPE["description"], sot.description) + + def test_set_extra_specs(self): + response = mock.Mock() + response.status_code = 200 + response.json.return_value = self.extra_specs_result + sess = mock.Mock() + sess.post.return_value = response + + sot = type.Type(id=FAKE_ID) + + set_specs = {"lol": "rofl"} + + result = sot.set_extra_specs(sess, **set_specs) + + self.assertEqual(result, self.extra_specs_result["extra_specs"]) + sess.post.assert_called_once_with( + "types/" + FAKE_ID + "/extra_specs", + headers={}, + json={"extra_specs": set_specs}, + ) + + def test_set_extra_specs_error(self): + sess = mock.Mock() + response = mock.Mock() + response.status_code = 400 + response.content = None + sess.post.return_value = response + + sot = type.Type(id=FAKE_ID) + + set_specs = {"lol": "rofl"} + + self.assertRaises( + exceptions.BadRequestException, + sot.set_extra_specs, + sess, + **set_specs, + ) + + def test_delete_extra_specs(self): + sess = mock.Mock() + response = mock.Mock() + response.status_code = 200 + sess.delete.return_value = response + + sot = type.Type(id=FAKE_ID) + + key = "hey" + + sot.delete_extra_specs(sess, [key]) + + sess.delete.assert_called_once_with( + "types/" + FAKE_ID + "/extra_specs/" + key, + headers={}, + ) + + def test_delete_extra_specs_error(self): + sess = mock.Mock() + response = mock.Mock() + response.status_code = 400 + response.content = None + sess.delete.return_value = response + + sot = type.Type(id=FAKE_ID) + + key = "hey" + + self.assertRaises( + exceptions.BadRequestException, sot.delete_extra_specs, sess, [key] + ) + + def test_get_private_access(self): + sot = type.Type(**TYPE) + + response = mock.Mock() + response.status_code = 200 + response.body = { + "volume_type_access": [{"project_id": "a", "volume_type_id": "b"}] + } + response.json = mock.Mock(return_value=response.body) + self.sess.get = mock.Mock(return_value=response) + + self.assertEqual( + response.body["volume_type_access"], + sot.get_private_access(self.sess), + ) + + self.sess.get.assert_called_with( + f"types/{sot.id}/os-volume-type-access" + ) + + def test_add_private_access(self): + sot = type.Type(**TYPE) + + self.assertIsNone(sot.add_private_access(self.sess, "a")) + + url = f"types/{sot.id}/action" + body = {"addProjectAccess": {"project": "a"}} + self.sess.post.assert_called_with(url, json=body) + + def test_remove_private_access(self): + sot = type.Type(**TYPE) + + self.assertIsNone(sot.remove_private_access(self.sess, "a")) + + url = f"types/{sot.id}/action" + body = {"removeProjectAccess": {"project": "a"}} + self.sess.post.assert_called_with(url, json=body) diff --git a/openstack/tests/unit/block_storage/v3/test_type_encryption.py b/openstack/tests/unit/block_storage/v3/test_type_encryption.py new file mode 100644 index 0000000000..a0ebf22d65 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_type_encryption.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.block_storage.v3 import type +from openstack.tests.unit import base + + +FAKE_ID = "479394ab-2f25-416e-8f58-721d8e5e29de" +TYPE_ID = "22373aed-c4a8-4072-b66c-bf0a90dc9a12" +TYPE_ENC = { + "key_size": 256, + "volume_type_id": TYPE_ID, + "encryption_id": FAKE_ID, + "provider": "nova.volume.encryptors.luks.LuksEncryptor", + "control_location": "front-end", + "cipher": "aes-xts-plain64", + "deleted": False, + "created_at": "2020-10-07T07:52:30.000000", + "updated_at": "2020-10-08T07:42:45.000000", + "deleted_at": None, +} + + +class TestTypeEncryption(base.TestCase): + def test_basic(self): + sot = type.TypeEncryption(**TYPE_ENC) + self.assertEqual("encryption", sot.resource_key) + self.assertEqual("encryption", sot.resources_key) + self.assertEqual("/types/%(volume_type_id)s/encryption", sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_delete) + self.assertFalse(sot.allow_list) + self.assertTrue(sot.allow_commit) + + def test_new(self): + sot = type.TypeEncryption.new(encryption_id=FAKE_ID) + self.assertEqual(FAKE_ID, sot.encryption_id) + + def test_create(self): + sot = type.TypeEncryption(**TYPE_ENC) + self.assertEqual(TYPE_ENC["volume_type_id"], sot.volume_type_id) + self.assertEqual(TYPE_ENC["encryption_id"], sot.encryption_id) + self.assertEqual(TYPE_ENC["key_size"], sot.key_size) + self.assertEqual(TYPE_ENC["provider"], sot.provider) + self.assertEqual(TYPE_ENC["control_location"], sot.control_location) + self.assertEqual(TYPE_ENC["cipher"], sot.cipher) + self.assertEqual(TYPE_ENC["deleted"], sot.deleted) + self.assertEqual(TYPE_ENC["created_at"], sot.created_at) + self.assertEqual(TYPE_ENC["updated_at"], sot.updated_at) + self.assertEqual(TYPE_ENC["deleted_at"], sot.deleted_at) diff --git a/openstack/tests/unit/block_storage/v3/test_volume.py b/openstack/tests/unit/block_storage/v3/test_volume.py new file mode 100644 index 0000000000..e2c8eb2ab8 --- /dev/null +++ b/openstack/tests/unit/block_storage/v3/test_volume.py @@ -0,0 +1,761 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.block_storage.v3 import volume +from openstack import exceptions +from openstack.tests.unit import base + +FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" +IMAGE_METADATA = { + 'container_format': 'bare', + 'min_ram': '64', + 'disk_format': 'qcow2', + 'image_name': 'TestVM', + 'image_id': '625d4f2c-cf67-4af3-afb6-c7220f766947', + 'checksum': '64d7c1cd2b6f60c92c14662941cb7913', + 'min_disk': '0', + 'size': '13167616', +} + +FAKE_HOST = "fake_host@fake_backend#fake_pool" +VOLUME = { + "status": "creating", + "name": "my_volume", + "attachments": [], + "availability_zone": "nova", + "bootable": "false", + "created_at": "2015-03-09T12:14:57.233772", + "updated_at": None, + "description": "something", + "volume_type": "some_type", + "snapshot_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", + "source_volid": None, + "imageRef": "some_image", + "metadata": {}, + "multiattach": False, + "volume_image_metadata": IMAGE_METADATA, + "id": FAKE_ID, + "size": 10, + "os-vol-host-attr:host": "127.0.0.1", + "os-vol-tenant-attr:tenant_id": "some tenant", + "os-vol-mig-status-attr:migstat": "done", + "os-vol-mig-status-attr:name_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", + "replication_status": "nah", + "os-volume-replication:extended_status": "really nah", + "consistencygroup_id": "123asf-asdf123", + "os-volume-replication:driver_data": "ahasadfasdfasdfasdfsdf", + "encrypted": "false", + "OS-SCH-HNT:scheduler_hints": { + "same_host": [ + "a0cf03a5-d921-4877-bb5c-86d26cf818e1", + "8c19174f-4220-44f0-824a-cd1eeef10287", + ] + }, +} + + +class TestVolume(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.status_code = 200 + self.resp.json = mock.Mock(return_value=self.resp.body) + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = '3.71' + self.sess.post = mock.Mock(return_value=self.resp) + self.sess._get_connection = mock.Mock(return_value=self.cloud) + + def test_basic(self): + sot = volume.Volume(VOLUME) + self.assertEqual("volume", sot.resource_key) + self.assertEqual("volumes", sot.resources_key) + self.assertEqual("/volumes", sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + "name": "name", + "status": "status", + "all_projects": "all_tenants", + "user_id": "user_id", + "project_id": "project_id", + "created_at": "created_at", + "updated_at": "updated_at", + "properties": "metadata", + "limit": "limit", + "marker": "marker", + }, + sot._query_mapping._mapping, + ) + + def test_create(self): + sot = volume.Volume(**VOLUME) + self.assertEqual(VOLUME["id"], sot.id) + self.assertEqual(VOLUME["status"], sot.status) + self.assertEqual(VOLUME["attachments"], sot.attachments) + self.assertEqual(VOLUME["availability_zone"], sot.availability_zone) + self.assertFalse(sot.is_bootable) + self.assertEqual(VOLUME["created_at"], sot.created_at) + self.assertEqual(VOLUME["updated_at"], sot.updated_at) + self.assertEqual(VOLUME["description"], sot.description) + self.assertEqual(VOLUME["volume_type"], sot.volume_type) + self.assertEqual(VOLUME["snapshot_id"], sot.snapshot_id) + self.assertEqual(VOLUME["source_volid"], sot.source_volume_id) + self.assertEqual(VOLUME["metadata"], sot.metadata) + self.assertEqual(VOLUME["multiattach"], sot.is_multiattach) + self.assertEqual( + VOLUME["volume_image_metadata"], sot.volume_image_metadata + ) + self.assertEqual(VOLUME["size"], sot.size) + self.assertEqual(VOLUME["imageRef"], sot.image_id) + self.assertEqual(VOLUME["os-vol-host-attr:host"], sot.host) + self.assertEqual( + VOLUME["os-vol-tenant-attr:tenant_id"], sot.project_id + ) + self.assertEqual( + VOLUME["os-vol-mig-status-attr:migstat"], sot.migration_status + ) + self.assertEqual( + VOLUME["os-vol-mig-status-attr:name_id"], sot.migration_id + ) + self.assertEqual(VOLUME["replication_status"], sot.replication_status) + self.assertEqual( + VOLUME["os-volume-replication:extended_status"], + sot.extended_replication_status, + ) + self.assertEqual( + VOLUME["consistencygroup_id"], sot.consistency_group_id + ) + self.assertEqual( + VOLUME["os-volume-replication:driver_data"], + sot.replication_driver_data, + ) + self.assertFalse(sot.is_encrypted) + self.assertDictEqual( + VOLUME["OS-SCH-HNT:scheduler_hints"], sot.scheduler_hints + ) + + def test_extend(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.extend(self.sess, '20')) + + url = f'volumes/{FAKE_ID}/action' + body = {"os-extend": {"new_size": "20"}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_complete_extend(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.complete_extend(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-extend_volume_completion': {'error': False}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_complete_extend_error(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.complete_extend(self.sess, error=True)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-extend_volume_completion': {'error': True}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_set_volume_readonly(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.set_readonly(self.sess, True)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-update_readonly_flag': {'readonly': True}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_set_volume_readonly_false(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.set_readonly(self.sess, False)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-update_readonly_flag': {'readonly': False}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_set_volume_bootable(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.set_bootable_status(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-set_bootable': {'bootable': True}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_set_volume_bootable_false(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.set_bootable_status(self.sess, False)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-set_bootable': {'bootable': False}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_set_image_metadata(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.set_image_metadata(self.sess, {'foo': 'bar'})) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-set_image_metadata': {'metadata': {'foo': 'bar'}}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_delete_image_metadata(self): + _volume = copy.deepcopy(VOLUME) + _volume['metadata'] = { + 'foo': 'bar', + 'baz': 'wow', + } + sot = volume.Volume(**_volume) + + self.assertIsNone(sot.delete_image_metadata(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body_a = {'os-unset_image_metadata': 'foo'} + body_b = {'os-unset_image_metadata': 'baz'} + self.sess.post.assert_has_calls( + [ + mock.call( + url, json=body_a, microversion=sot._max_microversion + ), + mock.call( + url, json=body_b, microversion=sot._max_microversion + ), + ] + ) + + def test_delete_image_metadata_item(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.delete_image_metadata_item(self.sess, 'foo')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-unset_image_metadata': 'foo'} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_reset_status(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.reset_status(self.sess, '1', '2', '3')) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-reset_status': { + 'status': '1', + 'attach_status': '2', + 'migration_status': '3', + } + } + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_reset_status__single_option(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.reset_status(self.sess, status='1')) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-reset_status': { + 'status': '1', + } + } + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + @mock.patch( + 'openstack.utils.require_microversion', + autospec=True, + side_effect=[exceptions.SDKException()], + ) + def test_revert_to_snapshot_before_340(self, mv_mock): + sot = volume.Volume(**VOLUME) + + self.assertRaises( + exceptions.SDKException, sot.revert_to_snapshot, self.sess, '1' + ) + + @mock.patch( + 'openstack.utils.require_microversion', + autospec=True, + side_effect=[None], + ) + def test_revert_to_snapshot_after_340(self, mv_mock): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.revert_to_snapshot(self.sess, '1')) + + url = f'volumes/{FAKE_ID}/action' + body = {'revert': {'snapshot_id': '1'}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + mv_mock.assert_called_with(self.sess, '3.40') + + def test_attach_instance(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.attach(self.sess, '1', instance='2')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-attach': {'mountpoint': '1', 'instance_uuid': '2'}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_attach_host(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.attach(self.sess, '1', host_name='2')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-attach': {'mountpoint': '1', 'host_name': '2'}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_attach_error(self): + sot = volume.Volume(**VOLUME) + + self.assertRaises(ValueError, sot.attach, self.sess, '1') + + def test_detach(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.detach(self.sess, '1')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-detach': {'attachment_id': '1'}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_detach_force(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone( + sot.detach(self.sess, '1', force=True, connector={'a': 'b'}) + ) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-force_detach': {'attachment_id': '1', 'connector': {'a': 'b'}} + } + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_unmanage(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.unmanage(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-unmanage': None} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_retype(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.retype(self.sess, '1')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-retype': {'new_type': '1'}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_retype_mp(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.retype(self.sess, '1', migration_policy='2')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-retype': {'new_type': '1', 'migration_policy': '2'}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_migrate(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.migrate(self.sess, host='1')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-migrate_volume': {'host': '1'}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_migrate_flags(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone( + sot.migrate( + self.sess, host='1', force_host_copy=True, lock_volume=True + ) + ) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-migrate_volume': { + 'host': '1', + 'force_host_copy': True, + 'lock_volume': True, + } + } + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + @mock.patch( + 'openstack.utils.require_microversion', + autospec=True, + side_effect=[None], + ) + def test_migrate_cluster(self, mv_mock): + sot = volume.Volume(**VOLUME) + + self.assertIsNone( + sot.migrate( + self.sess, cluster='1', force_host_copy=True, lock_volume=True + ) + ) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-migrate_volume': { + 'cluster': '1', + 'force_host_copy': True, + 'lock_volume': True, + } + } + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + mv_mock.assert_called_with(self.sess, '3.16') + + def test_complete_migration(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.complete_migration(self.sess, new_volume_id='1')) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-migrate_volume_completion': {'new_volume': '1', 'error': False} + } + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_complete_migration_error(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone( + sot.complete_migration(self.sess, new_volume_id='1', error=True) + ) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-migrate_volume_completion': {'new_volume': '1', 'error': True} + } + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_force_delete(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.force_delete(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-force_delete': None} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_upload_image(self): + sot = volume.Volume(**VOLUME) + + self.resp = mock.Mock() + self.resp.body = {'os-volume_upload_image': {'a': 'b'}} + self.resp.status_code = 200 + self.resp.json = mock.Mock(return_value=self.resp.body) + self.sess.post = mock.Mock(return_value=self.resp) + + self.assertDictEqual({'a': 'b'}, sot.upload_to_image(self.sess, '1')) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-volume_upload_image': {'image_name': '1', 'force': False}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + @mock.patch( + 'openstack.utils.require_microversion', + autospec=True, + side_effect=[None], + ) + def test_upload_image_args(self, mv_mock): + sot = volume.Volume(**VOLUME) + + self.resp = mock.Mock() + self.resp.body = {'os-volume_upload_image': {'a': 'b'}} + self.resp.status_code = 200 + self.resp.json = mock.Mock(return_value=self.resp.body) + self.sess.post = mock.Mock(return_value=self.resp) + + self.assertDictEqual( + {'a': 'b'}, + sot.upload_to_image( + self.sess, + '1', + disk_format='2', + container_format='3', + visibility='4', + protected='5', + ), + ) + + url = f'volumes/{FAKE_ID}/action' + body = { + 'os-volume_upload_image': { + 'image_name': '1', + 'force': False, + 'disk_format': '2', + 'container_format': '3', + 'visibility': '4', + 'protected': '5', + } + } + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + mv_mock.assert_called_with(self.sess, '3.1') + + def test_reserve(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.reserve(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-reserve': None} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_unreserve(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.unreserve(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-unreserve': None} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_begin_detaching(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.begin_detaching(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-begin_detaching': None} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_abort_detaching(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.abort_detaching(self.sess)) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-roll_detaching': None} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_init_attachment(self): + sot = volume.Volume(**VOLUME) + + self.resp = mock.Mock() + self.resp.body = {'connection_info': {'c': 'd'}} + self.resp.status_code = 200 + self.resp.json = mock.Mock(return_value=self.resp.body) + self.sess.post = mock.Mock(return_value=self.resp) + self.assertEqual( + {'c': 'd'}, sot.init_attachment(self.sess, {'a': 'b'}) + ) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-initialize_connection': {'connector': {'a': 'b'}}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_terminate_attachment(self): + sot = volume.Volume(**VOLUME) + + self.assertIsNone(sot.terminate_attachment(self.sess, {'a': 'b'})) + + url = f'volumes/{FAKE_ID}/action' + body = {'os-terminate_connection': {'connector': {'a': 'b'}}} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test__prepare_request_body(self): + sot = volume.Volume(**VOLUME) + body = sot._prepare_request_body(patch=False, prepend_key=True) + original_body = copy.deepcopy(sot._body.dirty) + # Verify that scheduler hints aren't modified after preparing request + # but also not part of 'volume' JSON object + self.assertEqual( + original_body['OS-SCH-HNT:scheduler_hints'], + body['OS-SCH-HNT:scheduler_hints'], + ) + # Pop scheduler hints to verify other parameters in body + original_body.pop('OS-SCH-HNT:scheduler_hints') + # Verify that other request parameters are same but in 'volume' JSON + self.assertEqual(original_body, body['volume']) + + def test_create_scheduler_hints(self): + sot = volume.Volume(**VOLUME) + sot._translate_response = mock.Mock() + sot.create(self.sess) + + url = '/volumes' + volume_body = copy.deepcopy(VOLUME) + scheduler_hints = volume_body.pop('OS-SCH-HNT:scheduler_hints') + body = { + "volume": volume_body, + 'OS-SCH-HNT:scheduler_hints': scheduler_hints, + } + self.sess.post.assert_called_with( + url, + json=body, + microversion=sot._max_microversion, + headers={}, + params={}, + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + def test_manage(self, mock_mv): + resp = mock.Mock() + resp.body = {'volume': copy.deepcopy(VOLUME)} + resp.json = mock.Mock(return_value=resp.body) + resp.headers = {} + resp.status_code = 202 + self.sess.post = mock.Mock(return_value=resp) + sot = volume.Volume.manage(self.sess, host=FAKE_HOST, ref=FAKE_ID) + self.assertIsNotNone(sot) + url = '/manageable_volumes' + body = { + 'volume': { + 'host': FAKE_HOST, + 'ref': FAKE_ID, + 'name': None, + 'description': None, + 'volume_type': None, + 'availability_zone': None, + 'metadata': None, + 'bootable': False, + } + } + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_manage_pre_38(self, mock_mv): + resp = mock.Mock() + resp.body = {'volume': copy.deepcopy(VOLUME)} + resp.json = mock.Mock(return_value=resp.body) + resp.headers = {} + resp.status_code = 202 + self.sess.post = mock.Mock(return_value=resp) + sot = volume.Volume.manage(self.sess, host=FAKE_HOST, ref=FAKE_ID) + self.assertIsNotNone(sot) + url = '/os-volume-manage' + body = { + 'volume': { + 'host': FAKE_HOST, + 'ref': FAKE_ID, + 'name': None, + 'description': None, + 'volume_type': None, + 'availability_zone': None, + 'metadata': None, + 'bootable': False, + } + } + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) + + def test_set_microversion(self): + sot = volume.Volume(**VOLUME) + self.sess.default_microversion = '3.50' + self.assertIsNone(sot.extend(self.sess, '20')) + + url = f'volumes/{FAKE_ID}/action' + body = {"os-extend": {"new_size": "20"}} + self.sess.post.assert_called_with(url, json=body, microversion="3.50") diff --git a/openstack/tests/unit/block_store/test_block_store_service.py b/openstack/tests/unit/block_store/test_block_store_service.py deleted file mode 100644 index f02d12ac5c..0000000000 --- a/openstack/tests/unit/block_store/test_block_store_service.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.block_store import block_store_service - - -class TestBlockStoreService(testtools.TestCase): - - def test_service(self): - sot = block_store_service.BlockStoreService() - self.assertEqual("volume", sot.service_type) - self.assertEqual("public", sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(1, len(sot.valid_versions)) - self.assertEqual("v2", sot.valid_versions[0].module) - self.assertEqual("v2", sot.valid_versions[0].path) diff --git a/openstack/tests/unit/block_store/v2/test_proxy.py b/openstack/tests/unit/block_store/v2/test_proxy.py deleted file mode 100644 index 4af3b4d81a..0000000000 --- a/openstack/tests/unit/block_store/v2/test_proxy.py +++ /dev/null @@ -1,88 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.block_store.v2 import _proxy -from openstack.block_store.v2 import snapshot -from openstack.block_store.v2 import type -from openstack.block_store.v2 import volume -from openstack.tests.unit import test_proxy_base2 - - -class TestVolumeProxy(test_proxy_base2.TestProxyBase): - def setUp(self): - super(TestVolumeProxy, self).setUp() - self.proxy = _proxy.Proxy(self.session) - - def test_snapshot_get(self): - self.verify_get(self.proxy.get_snapshot, snapshot.Snapshot) - - def test_snapshots_detailed(self): - self.verify_list(self.proxy.snapshots, snapshot.SnapshotDetail, - paginated=True, - method_kwargs={"details": True, "query": 1}, - expected_kwargs={"query": 1}) - - def test_snapshots_not_detailed(self): - self.verify_list(self.proxy.snapshots, snapshot.Snapshot, - paginated=True, - method_kwargs={"details": False, "query": 1}, - expected_kwargs={"query": 1}) - - def test_snapshot_create_attrs(self): - self.verify_create(self.proxy.create_snapshot, snapshot.Snapshot) - - def test_snapshot_delete(self): - self.verify_delete(self.proxy.delete_snapshot, - snapshot.Snapshot, False) - - def test_snapshot_delete_ignore(self): - self.verify_delete(self.proxy.delete_snapshot, - snapshot.Snapshot, True) - - def test_type_get(self): - self.verify_get(self.proxy.get_type, type.Type) - - def test_types(self): - self.verify_list(self.proxy.types, type.Type, paginated=False) - - def test_type_create_attrs(self): - self.verify_create(self.proxy.create_type, type.Type) - - def test_type_delete(self): - self.verify_delete(self.proxy.delete_type, type.Type, False) - - def test_type_delete_ignore(self): - self.verify_delete(self.proxy.delete_type, type.Type, True) - - def test_volume_get(self): - self.verify_get(self.proxy.get_volume, volume.Volume) - - def test_volumes_detailed(self): - self.verify_list(self.proxy.volumes, volume.VolumeDetail, - paginated=True, - method_kwargs={"details": True, "query": 1}, - expected_kwargs={"query": 1}) - - def test_volumes_not_detailed(self): - self.verify_list(self.proxy.volumes, volume.Volume, - paginated=True, - method_kwargs={"details": False, "query": 1}, - expected_kwargs={"query": 1}) - - def test_volume_create_attrs(self): - self.verify_create(self.proxy.create_volume, volume.Volume) - - def test_volume_delete(self): - self.verify_delete(self.proxy.delete_volume, volume.Volume, False) - - def test_volume_delete_ignore(self): - self.verify_delete(self.proxy.delete_volume, volume.Volume, True) diff --git a/openstack/tests/unit/block_store/v2/test_snapshot.py b/openstack/tests/unit/block_store/v2/test_snapshot.py deleted file mode 100644 index 79388489a3..0000000000 --- a/openstack/tests/unit/block_store/v2/test_snapshot.py +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.block_store.v2 import snapshot - -FAKE_ID = "ffa9bc5e-1172-4021-acaf-cdcd78a9584d" - -SNAPSHOT = { - "status": "creating", - "description": "Daily backup", - "created_at": "2015-03-09T12:14:57.233772", - "metadata": {}, - "volume_id": "5aa119a8-d25b-45a7-8d1b-88e127885635", - "size": 1, - "id": FAKE_ID, - "name": "snap-001", - "force": "true", -} - -DETAILS = { - "os-extended-snapshot-attributes:progress": "100%", - "os-extended-snapshot-attributes:project_id": - "0c2eba2c5af04d3f9e9d0d410b371fde" -} - -DETAILED_SNAPSHOT = SNAPSHOT.copy() -DETAILED_SNAPSHOT.update(**DETAILS) - - -class TestSnapshot(testtools.TestCase): - - def test_basic(self): - sot = snapshot.Snapshot(SNAPSHOT) - self.assertEqual("snapshot", sot.resource_key) - self.assertEqual("snapshots", sot.resources_key) - self.assertEqual("/snapshots", sot.base_path) - self.assertEqual("volume", sot.service.service_type) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) - self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_delete) - self.assertTrue(sot.allow_list) - - self.assertDictEqual({"name": "name", - "status": "status", - "all_tenants": "all_tenants", - "volume_id": "volume_id", - "limit": "limit", - "marker": "marker"}, - sot._query_mapping._mapping) - - def test_create_basic(self): - sot = snapshot.Snapshot(**SNAPSHOT) - self.assertEqual(SNAPSHOT["id"], sot.id) - self.assertEqual(SNAPSHOT["status"], sot.status) - self.assertEqual(SNAPSHOT["created_at"], sot.created_at) - self.assertEqual(SNAPSHOT["metadata"], sot.metadata) - self.assertEqual(SNAPSHOT["volume_id"], sot.volume_id) - self.assertEqual(SNAPSHOT["size"], sot.size) - self.assertEqual(SNAPSHOT["name"], sot.name) - self.assertTrue(sot.is_forced) - - -class TestSnapshotDetail(testtools.TestCase): - - def test_basic(self): - sot = snapshot.SnapshotDetail(DETAILED_SNAPSHOT) - self.assertIsInstance(sot, snapshot.Snapshot) - self.assertEqual("/snapshots/detail", sot.base_path) - - def test_create_detailed(self): - sot = snapshot.SnapshotDetail(**DETAILED_SNAPSHOT) - - self.assertEqual( - DETAILED_SNAPSHOT["os-extended-snapshot-attributes:progress"], - sot.progress) - self.assertEqual( - DETAILED_SNAPSHOT["os-extended-snapshot-attributes:project_id"], - sot.project_id) diff --git a/openstack/tests/unit/block_store/v2/test_type.py b/openstack/tests/unit/block_store/v2/test_type.py deleted file mode 100644 index d841b35ca1..0000000000 --- a/openstack/tests/unit/block_store/v2/test_type.py +++ /dev/null @@ -1,49 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.block_store.v2 import type - -FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" -TYPE = { - "extra_specs": { - "capabilities": "gpu" - }, - "id": FAKE_ID, - "name": "SSD" -} - - -class TestType(testtools.TestCase): - - def test_basic(self): - sot = type.Type(**TYPE) - self.assertEqual("volume_type", sot.resource_key) - self.assertEqual("volume_types", sot.resources_key) - self.assertEqual("/types", sot.base_path) - self.assertEqual("volume", sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_delete) - self.assertTrue(sot.allow_list) - self.assertFalse(sot.allow_update) - - def test_new(self): - sot = type.Type.new(id=FAKE_ID) - self.assertEqual(FAKE_ID, sot.id) - - def test_create(self): - sot = type.Type(**TYPE) - self.assertEqual(TYPE["id"], sot.id) - self.assertEqual(TYPE["extra_specs"], sot.extra_specs) - self.assertEqual(TYPE["name"], sot.name) diff --git a/openstack/tests/unit/block_store/v2/test_volume.py b/openstack/tests/unit/block_store/v2/test_volume.py deleted file mode 100644 index 825cb910fb..0000000000 --- a/openstack/tests/unit/block_store/v2/test_volume.py +++ /dev/null @@ -1,119 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import copy - -import testtools - -from openstack.block_store.v2 import volume - -FAKE_ID = "6685584b-1eac-4da6-b5c3-555430cf68ff" - -VOLUME = { - "status": "creating", - "name": "my_volume", - "attachments": [], - "availability_zone": "nova", - "bootable": "false", - "created_at": "2015-03-09T12:14:57.233772", - "description": "something", - "volume_type": "some_type", - "snapshot_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", - "source_volid": None, - "imageRef": "some_image", - "metadata": {}, - "id": FAKE_ID, - "size": 10 -} - -DETAILS = { - "os-vol-host-attr:host": "127.0.0.1", - "os-vol-tenant-attr:tenant_id": "some tenant", - "os-vol-mig-status-attr:migstat": "done", - "os-vol-mig-status-attr:name_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", - "replication_status": "nah", - "os-volume-replication:extended_status": "really nah", - "consistencygroup_id": "123asf-asdf123", - "os-volume-replication:driver_data": "ahasadfasdfasdfasdfsdf", - "snapshot_id": "93c2e2aa-7744-4fd6-a31a-80c4726b08d7", - "encrypted": "false", -} - -VOLUME_DETAIL = copy.copy(VOLUME) -VOLUME_DETAIL.update(DETAILS) - - -class TestVolume(testtools.TestCase): - - def test_basic(self): - sot = volume.Volume(VOLUME) - self.assertEqual("volume", sot.resource_key) - self.assertEqual("volumes", sot.resources_key) - self.assertEqual("/volumes", sot.base_path) - self.assertEqual("volume", sot.service.service_type) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_update) - self.assertTrue(sot.allow_delete) - self.assertTrue(sot.allow_list) - - self.assertDictEqual({"name": "name", - "status": "status", - "all_tenants": "all_tenants", - "project_id": "project_id", - "limit": "limit", - "marker": "marker"}, - sot._query_mapping._mapping) - - def test_create(self): - sot = volume.Volume(**VOLUME) - self.assertEqual(VOLUME["id"], sot.id) - self.assertEqual(VOLUME["status"], sot.status) - self.assertEqual(VOLUME["attachments"], sot.attachments) - self.assertEqual(VOLUME["availability_zone"], sot.availability_zone) - self.assertFalse(sot.is_bootable) - self.assertEqual(VOLUME["created_at"], sot.created_at) - self.assertEqual(VOLUME["description"], sot.description) - self.assertEqual(VOLUME["volume_type"], sot.volume_type) - self.assertEqual(VOLUME["snapshot_id"], sot.snapshot_id) - self.assertEqual(VOLUME["source_volid"], sot.source_volume_id) - self.assertEqual(VOLUME["metadata"], sot.metadata) - self.assertEqual(VOLUME["size"], sot.size) - self.assertEqual(VOLUME["imageRef"], sot.image_id) - - -class TestVolumeDetail(testtools.TestCase): - - def test_basic(self): - sot = volume.VolumeDetail(VOLUME_DETAIL) - self.assertIsInstance(sot, volume.Volume) - self.assertEqual("/volumes/detail", sot.base_path) - - def test_create(self): - sot = volume.VolumeDetail(**VOLUME_DETAIL) - self.assertEqual(VOLUME_DETAIL["os-vol-host-attr:host"], sot.host) - self.assertEqual(VOLUME_DETAIL["os-vol-tenant-attr:tenant_id"], - sot.project_id) - self.assertEqual(VOLUME_DETAIL["os-vol-mig-status-attr:migstat"], - sot.migration_status) - self.assertEqual(VOLUME_DETAIL["os-vol-mig-status-attr:name_id"], - sot.migration_id) - self.assertEqual(VOLUME_DETAIL["replication_status"], - sot.replication_status) - self.assertEqual( - VOLUME_DETAIL["os-volume-replication:extended_status"], - sot.extended_replication_status) - self.assertEqual(VOLUME_DETAIL["consistencygroup_id"], - sot.consistency_group_id) - self.assertEqual(VOLUME_DETAIL["os-volume-replication:driver_data"], - sot.replication_driver_data) - self.assertFalse(sot.is_encrypted) diff --git a/openstack/tests/unit/cloud/__init__.py b/openstack/tests/unit/cloud/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/cloud/test__utils.py b/openstack/tests/unit/cloud/test__utils.py new file mode 100644 index 0000000000..1fd1a3f3c7 --- /dev/null +++ b/openstack/tests/unit/cloud/test__utils.py @@ -0,0 +1,404 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock +from uuid import uuid4 + +import testtools + +from openstack.cloud import _utils +from openstack import exceptions +from openstack.tests.unit import base + +RANGE_DATA = [ + dict(id=1, key1=1, key2=5), + dict(id=2, key1=1, key2=20), + dict(id=3, key1=2, key2=10), + dict(id=4, key1=2, key2=30), + dict(id=5, key1=3, key2=40), + dict(id=6, key1=3, key2=40), +] + + +class TestUtils(base.TestCase): + def test__filter_list_name_or_id(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto') + data = [el1, el2] + ret = _utils._filter_list(data, 'donald', None) + self.assertEqual([el1], ret) + + def test__filter_list_name_or_id_special(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto[2017-01-10]') + data = [el1, el2] + ret = _utils._filter_list(data, 'pluto[2017-01-10]', None) + self.assertEqual([el2], ret) + + def test__filter_list_name_or_id_partial_bad(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto[2017-01-10]') + data = [el1, el2] + ret = _utils._filter_list(data, 'pluto[2017-01]', None) + self.assertEqual([], ret) + + def test__filter_list_name_or_id_partial_glob(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto[2017-01-10]') + data = [el1, el2] + ret = _utils._filter_list(data, 'pluto*', None) + self.assertEqual([el2], ret) + + def test__filter_list_name_or_id_non_glob_glob(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto[2017-01-10]') + data = [el1, el2] + ret = _utils._filter_list(data, 'pluto', None) + self.assertEqual([], ret) + + def test__filter_list_name_or_id_glob(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto') + el3 = dict(id=200, name='pluto-2') + data = [el1, el2, el3] + ret = _utils._filter_list(data, 'pluto*', None) + self.assertEqual([el2, el3], ret) + + def test__filter_list_name_or_id_glob_not_found(self): + el1 = dict(id=100, name='donald') + el2 = dict(id=200, name='pluto') + el3 = dict(id=200, name='pluto-2') + data = [el1, el2, el3] + ret = _utils._filter_list(data, 'q*', None) + self.assertEqual([], ret) + + def test__filter_list_unicode(self): + el1 = dict( + id=100, + name='中文', + last='duck', + other=dict(category='duck', financial=dict(status='poor')), + ) + el2 = dict( + id=200, + name='中文', + last='trump', + other=dict(category='human', financial=dict(status='rich')), + ) + el3 = dict( + id=300, + name='donald', + last='ronald mac', + other=dict(category='clown', financial=dict(status='rich')), + ) + data = [el1, el2, el3] + ret = _utils._filter_list( + data, '中文', {'other': {'financial': {'status': 'rich'}}} + ) + self.assertEqual([el2], ret) + + def test__filter_list_filter(self): + el1 = dict(id=100, name='donald', other='duck') + el2 = dict(id=200, name='donald', other='trump') + data = [el1, el2] + ret = _utils._filter_list(data, 'donald', {'other': 'duck'}) + self.assertEqual([el1], ret) + + def test__filter_list_filter_jmespath(self): + el1 = dict(id=100, name='donald', other='duck') + el2 = dict(id=200, name='donald', other='trump') + data = [el1, el2] + ret = _utils._filter_list(data, 'donald', "[?other == `duck`]") + self.assertEqual([el1], ret) + + def test__filter_list_dict1(self): + el1 = dict( + id=100, name='donald', last='duck', other=dict(category='duck') + ) + el2 = dict( + id=200, name='donald', last='trump', other=dict(category='human') + ) + el3 = dict( + id=300, + name='donald', + last='ronald mac', + other=dict(category='clown'), + ) + data = [el1, el2, el3] + ret = _utils._filter_list( + data, 'donald', {'other': {'category': 'clown'}} + ) + self.assertEqual([el3], ret) + + def test__filter_list_dict2(self): + el1 = dict( + id=100, + name='donald', + last='duck', + other=dict(category='duck', financial=dict(status='poor')), + ) + el2 = dict( + id=200, + name='donald', + last='trump', + other=dict(category='human', financial=dict(status='rich')), + ) + el3 = dict( + id=300, + name='donald', + last='ronald mac', + other=dict(category='clown', financial=dict(status='rich')), + ) + data = [el1, el2, el3] + ret = _utils._filter_list( + data, 'donald', {'other': {'financial': {'status': 'rich'}}} + ) + self.assertEqual([el2, el3], ret) + + def test_safe_dict_min_ints(self): + """Test integer comparison""" + data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] + retval = _utils.safe_dict_min('f1', data) + self.assertEqual(1, retval) + + def test_safe_dict_min_strs(self): + """Test integer as strings comparison""" + data = [{'f1': '3'}, {'f1': '2'}, {'f1': '1'}] + retval = _utils.safe_dict_min('f1', data) + self.assertEqual(1, retval) + + def test_safe_dict_min_None(self): + """Test None values""" + data = [{'f1': 3}, {'f1': None}, {'f1': 1}] + retval = _utils.safe_dict_min('f1', data) + self.assertEqual(1, retval) + + def test_safe_dict_min_key_missing(self): + """Test missing key for an entry still works""" + data = [{'f1': 3}, {'x': 2}, {'f1': 1}] + retval = _utils.safe_dict_min('f1', data) + self.assertEqual(1, retval) + + def test_safe_dict_min_key_not_found(self): + """Test key not found in any elements returns None""" + data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] + retval = _utils.safe_dict_min('doesnotexist', data) + self.assertIsNone(retval) + + def test_safe_dict_min_not_int(self): + """Test non-integer key value raises OSCE""" + data = [{'f1': 3}, {'f1': "aaa"}, {'f1': 1}] + with testtools.ExpectedException( + exceptions.SDKException, + "Search for minimum value failed. " + "Value for f1 is not an integer: aaa", + ): + _utils.safe_dict_min('f1', data) + + def test_safe_dict_max_ints(self): + """Test integer comparison""" + data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] + retval = _utils.safe_dict_max('f1', data) + self.assertEqual(3, retval) + + def test_safe_dict_max_strs(self): + """Test integer as strings comparison""" + data = [{'f1': '3'}, {'f1': '2'}, {'f1': '1'}] + retval = _utils.safe_dict_max('f1', data) + self.assertEqual(3, retval) + + def test_safe_dict_max_None(self): + """Test None values""" + data = [{'f1': 3}, {'f1': None}, {'f1': 1}] + retval = _utils.safe_dict_max('f1', data) + self.assertEqual(3, retval) + + def test_safe_dict_max_key_missing(self): + """Test missing key for an entry still works""" + data = [{'f1': 3}, {'x': 2}, {'f1': 1}] + retval = _utils.safe_dict_max('f1', data) + self.assertEqual(3, retval) + + def test_safe_dict_max_key_not_found(self): + """Test key not found in any elements returns None""" + data = [{'f1': 3}, {'f1': 2}, {'f1': 1}] + retval = _utils.safe_dict_max('doesnotexist', data) + self.assertIsNone(retval) + + def test_safe_dict_max_not_int(self): + """Test non-integer key value raises OSCE""" + data = [{'f1': 3}, {'f1': "aaa"}, {'f1': 1}] + with testtools.ExpectedException( + exceptions.SDKException, + "Search for maximum value failed. " + "Value for f1 is not an integer: aaa", + ): + _utils.safe_dict_max('f1', data) + + def test_parse_range_None(self): + self.assertIsNone(_utils.parse_range(None)) + + def test_parse_range_invalid(self): + self.assertIsNone(_utils.parse_range("1024") + self.assertIsInstance(retval, tuple) + self.assertEqual(">", retval[0]) + self.assertEqual(1024, retval[1]) + + def test_parse_range_le(self): + retval = _utils.parse_range("<=1024") + self.assertIsInstance(retval, tuple) + self.assertEqual("<=", retval[0]) + self.assertEqual(1024, retval[1]) + + def test_parse_range_ge(self): + retval = _utils.parse_range(">=1024") + self.assertIsInstance(retval, tuple) + self.assertEqual(">=", retval[0]) + self.assertEqual(1024, retval[1]) + + def test_range_filter_min(self): + retval = _utils.range_filter(RANGE_DATA, "key1", "min") + self.assertIsInstance(retval, list) + self.assertEqual(2, len(retval)) + self.assertEqual(RANGE_DATA[:2], retval) + + def test_range_filter_max(self): + retval = _utils.range_filter(RANGE_DATA, "key1", "max") + self.assertIsInstance(retval, list) + self.assertEqual(2, len(retval)) + self.assertEqual(RANGE_DATA[-2:], retval) + + def test_range_filter_range(self): + retval = _utils.range_filter(RANGE_DATA, "key1", "<3") + self.assertIsInstance(retval, list) + self.assertEqual(4, len(retval)) + self.assertEqual(RANGE_DATA[:4], retval) + + def test_range_filter_exact(self): + retval = _utils.range_filter(RANGE_DATA, "key1", "2") + self.assertIsInstance(retval, list) + self.assertEqual(2, len(retval)) + self.assertEqual(RANGE_DATA[2:4], retval) + + def test_range_filter_invalid_int(self): + with testtools.ExpectedException( + exceptions.SDKException, "Invalid range value: <1A0" + ): + _utils.range_filter(RANGE_DATA, "key1", "<1A0") + + def test_range_filter_invalid_op(self): + with testtools.ExpectedException( + exceptions.SDKException, "Invalid range value: <>100" + ): + _utils.range_filter(RANGE_DATA, "key1", "<>100") + + def test_get_entity_pass_object(self): + obj = mock.Mock(id=uuid4().hex) + self.cloud.use_direct_get = True + self.assertEqual(obj, _utils._get_entity(self.cloud, '', obj, {})) + + def test_get_entity_pass_dict(self): + d = dict(id=uuid4().hex) + self.cloud.use_direct_get = True + self.assertEqual(d, _utils._get_entity(self.cloud, '', d, {})) + + def test_get_entity_no_use_direct_get(self): + # test we are defaulting to the search_ methods + # if the use_direct_get flag is set to False(default). + uuid = uuid4().hex + resource = 'network' + func = f'search_{resource}s' + filters = {} + with mock.patch.object(self.cloud, func) as search: + _utils._get_entity(self.cloud, resource, uuid, filters) + search.assert_called_once_with(uuid, filters) + + def test_get_entity_no_uuid_like(self): + # test we are defaulting to the search_ methods + # if the name_or_id param is a name(string) but not a uuid. + self.cloud.use_direct_get = True + name = 'name_no_uuid' + resource = 'network' + func = f'search_{resource}s' + filters = {} + with mock.patch.object(self.cloud, func) as search: + _utils._get_entity(self.cloud, resource, name, filters) + search.assert_called_once_with(name, filters) + + def test_get_entity_pass_uuid(self): + uuid = uuid4().hex + self.cloud.use_direct_get = True + resources = [ + 'flavor', + 'image', + 'volume', + 'network', + 'subnet', + 'port', + 'floating_ip', + 'security_group', + ] + for r in resources: + f = f'get_{r}_by_id' + with mock.patch.object(self.cloud, f) as get: + _utils._get_entity(self.cloud, r, uuid, {}) + get.assert_called_once_with(uuid) + + def test_get_entity_pass_search_methods(self): + self.cloud.use_direct_get = True + resources = [ + 'flavor', + 'image', + 'volume', + 'network', + 'subnet', + 'port', + 'floating_ip', + 'security_group', + ] + filters = {} + name = 'name_no_uuid' + for r in resources: + f = f'search_{r}s' + with mock.patch.object(self.cloud, f) as search: + _utils._get_entity(self.cloud, r, name, {}) + search.assert_called_once_with(name, filters) + + def test_get_entity_get_and_search(self): + resources = [ + 'flavor', + 'image', + 'volume', + 'network', + 'subnet', + 'port', + 'floating_ip', + 'security_group', + ] + for r in resources: + self.assertTrue(hasattr(self.cloud, f'get_{r}_by_id')) + self.assertTrue(hasattr(self.cloud, f'search_{r}s')) diff --git a/openstack/tests/unit/cloud/test_accelerator.py b/openstack/tests/unit/cloud/test_accelerator.py new file mode 100644 index 0000000000..2fdb4e2eb0 --- /dev/null +++ b/openstack/tests/unit/cloud/test_accelerator.py @@ -0,0 +1,407 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import uuid + +from openstack.tests.unit import base + + +DEP_UUID = uuid.uuid4().hex +DEP_DICT = { + 'uuid': DEP_UUID, + 'name': 'dep_name', + 'parent_id': None, + 'root_id': 1, + 'num_accelerators': 4, + 'device_id': 0, +} + +DEV_UUID = uuid.uuid4().hex +DEV_DICT = { + 'id': 1, + 'uuid': DEV_UUID, + 'name': 'dev_name', + 'type': 'test_type', + 'vendor': '0x8086', + 'model': 'test_model', + 'std_board_info': '{"product_id": "0x09c4"}', + 'vendor_board_info': 'test_vb_info', +} + +DEV_PROF_UUID = uuid.uuid4().hex +DEV_PROF_GROUPS = [ + { + "resources:ACCELERATOR_FPGA": "1", + "trait:CUSTOM_FPGA_INTEL_PAC_ARRIA10": "required", + "trait:CUSTOM_FUNCTION_ID_3AFB": "required", + }, + { + "resources:CUSTOM_ACCELERATOR_FOO": "2", + "resources:CUSTOM_MEMORY": "200", + "trait:CUSTOM_TRAIT_ALWAYS": "required", + }, +] +DEV_PROF_DICT = { + "id": 1, + "uuid": DEV_PROF_UUID, + "name": 'afaas_example_1', + "groups": DEV_PROF_GROUPS, +} + +NEW_DEV_PROF_DICT = copy.copy(DEV_PROF_DICT) + +ARQ_UUID = uuid.uuid4().hex +ARQ_DEV_RP_UUID = uuid.uuid4().hex +ARQ_INSTANCE_UUID = uuid.uuid4().hex +ARQ_ATTACH_INFO_STR = ( + '{"bus": "5e", "device": "00", "domain": "0000", "function": "1"}' +) +ARQ_DICT = { + 'uuid': ARQ_UUID, + 'hostname': 'test_hostname', + 'device_profile_name': 'fake-devprof', + 'device_profile_group_id': 0, + 'device_rp_uuid': ARQ_DEV_RP_UUID, + 'instance_uuid': ARQ_INSTANCE_UUID, + 'attach_handle_type': 'PCI', + 'attach_handle_info': ARQ_ATTACH_INFO_STR, +} + +NEW_ARQ_DICT = copy.copy(ARQ_DICT) + + +class TestAccelerator(base.TestCase): + def setUp(self): + super().setUp() + self.use_cyborg() + + def test_list_deployables(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'accelerator', 'public', append=['v2', 'deployables'] + ), + json={'deployables': [DEP_DICT]}, + ), + ] + ) + dep_list = self.cloud.list_deployables() + self.assertEqual(len(dep_list), 1) + self.assertEqual(dep_list[0].id, DEP_DICT['uuid']) + self.assertEqual(dep_list[0].name, DEP_DICT['name']) + self.assertEqual(dep_list[0].parent_id, DEP_DICT['parent_id']) + self.assertEqual(dep_list[0].root_id, DEP_DICT['root_id']) + self.assertEqual( + dep_list[0].num_accelerators, DEP_DICT['num_accelerators'] + ) + self.assertEqual(dep_list[0].device_id, DEP_DICT['device_id']) + self.assert_calls() + + def test_list_devices(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'accelerator', 'public', append=['v2', 'devices'] + ), + json={'devices': [DEV_DICT]}, + ), + ] + ) + dev_list = self.cloud.list_devices() + self.assertEqual(len(dev_list), 1) + self.assertEqual(dev_list[0].id, DEV_DICT['id']) + self.assertEqual(dev_list[0].uuid, DEV_DICT['uuid']) + self.assertEqual(dev_list[0].name, DEV_DICT['name']) + self.assertEqual(dev_list[0].type, DEV_DICT['type']) + self.assertEqual(dev_list[0].vendor, DEV_DICT['vendor']) + self.assertEqual(dev_list[0].model, DEV_DICT['model']) + self.assertEqual( + dev_list[0].std_board_info, DEV_DICT['std_board_info'] + ) + self.assertEqual( + dev_list[0].vendor_board_info, DEV_DICT['vendor_board_info'] + ) + self.assert_calls() + + def test_list_device_profiles(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'accelerator', + 'public', + append=['v2', 'device_profiles'], + ), + json={'device_profiles': [DEV_PROF_DICT]}, + ), + ] + ) + dev_prof_list = self.cloud.list_device_profiles() + self.assertEqual(len(dev_prof_list), 1) + self.assertEqual(dev_prof_list[0].id, DEV_PROF_DICT['id']) + self.assertEqual(dev_prof_list[0].uuid, DEV_PROF_DICT['uuid']) + self.assertEqual(dev_prof_list[0].name, DEV_PROF_DICT['name']) + self.assertEqual(dev_prof_list[0].groups, DEV_PROF_DICT['groups']) + self.assert_calls() + + def test_create_device_profile(self): + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'accelerator', + 'public', + append=['v2', 'device_profiles'], + ), + json=NEW_DEV_PROF_DICT, + ) + ] + ) + + attrs = { + 'name': NEW_DEV_PROF_DICT['name'], + 'groups': NEW_DEV_PROF_DICT['groups'], + } + + self.assertTrue(self.cloud.create_device_profile(attrs)) + self.assert_calls() + + def test_delete_device_profile(self, filters=None): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'accelerator', + 'public', + append=[ + 'v2', + 'device_profiles', + DEV_PROF_DICT['name'], + ], + ), + json={"device_profiles": [DEV_PROF_DICT]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'accelerator', + 'public', + append=[ + 'v2', + 'device_profiles', + DEV_PROF_DICT['name'], + ], + ), + json=DEV_PROF_DICT, + ), + ] + ) + self.assertTrue( + self.cloud.delete_device_profile(DEV_PROF_DICT['name'], filters) + ) + self.assert_calls() + + def test_list_accelerator_requests(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'accelerator', + 'public', + append=['v2', 'accelerator_requests'], + ), + json={'arqs': [ARQ_DICT]}, + ), + ] + ) + arq_list = self.cloud.list_accelerator_requests() + self.assertEqual(len(arq_list), 1) + self.assertEqual(arq_list[0].uuid, ARQ_DICT['uuid']) + self.assertEqual( + arq_list[0].device_profile_name, ARQ_DICT['device_profile_name'] + ) + self.assertEqual( + arq_list[0].device_profile_group_id, + ARQ_DICT['device_profile_group_id'], + ) + self.assertEqual( + arq_list[0].device_rp_uuid, ARQ_DICT['device_rp_uuid'] + ) + self.assertEqual(arq_list[0].instance_uuid, ARQ_DICT['instance_uuid']) + self.assertEqual( + arq_list[0].attach_handle_type, ARQ_DICT['attach_handle_type'] + ) + self.assertEqual( + arq_list[0].attach_handle_info, ARQ_DICT['attach_handle_info'] + ) + self.assert_calls() + + def test_create_accelerator_request(self): + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'accelerator', + 'public', + append=['v2', 'accelerator_requests'], + ), + json=NEW_ARQ_DICT, + ), + ] + ) + + attrs = { + 'device_profile_name': NEW_ARQ_DICT['device_profile_name'], + 'device_profile_group_id': NEW_ARQ_DICT['device_profile_group_id'], + } + + self.assertTrue(self.cloud.create_accelerator_request(attrs)) + self.assert_calls() + + def test_delete_accelerator_request(self, filters=None): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'accelerator', + 'public', + append=[ + 'v2', + 'accelerator_requests', + ARQ_DICT['uuid'], + ], + ), + json={"accelerator_requests": [ARQ_DICT]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'accelerator', + 'public', + append=[ + 'v2', + 'accelerator_requests', + ARQ_DICT['uuid'], + ], + ), + json=ARQ_DICT, + ), + ] + ) + self.assertTrue( + self.cloud.delete_accelerator_request(ARQ_DICT['uuid'], filters) + ) + self.assert_calls() + + def test_bind_accelerator_request(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'accelerator', + 'public', + append=[ + 'v2', + 'accelerator_requests', + ARQ_DICT['uuid'], + ], + ), + json={"accelerator_requests": [ARQ_DICT]}, + ), + dict( + method='PATCH', + uri=self.get_mock_url( + 'accelerator', + 'public', + append=[ + 'v2', + 'accelerator_requests', + ARQ_DICT['uuid'], + ], + ), + json=ARQ_DICT, + ), + ] + ) + properties = [ + {'path': '/hostname', 'value': ARQ_DICT['hostname'], 'op': 'add'}, + { + 'path': '/instance_uuid', + 'value': ARQ_DICT['instance_uuid'], + 'op': 'add', + }, + { + 'path': '/device_rp_uuid', + 'value': ARQ_DICT['device_rp_uuid'], + 'op': 'add', + }, + ] + + self.assertTrue( + self.cloud.bind_accelerator_request(ARQ_DICT['uuid'], properties) + ) + self.assert_calls() + + def test_unbind_accelerator_request(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'accelerator', + 'public', + append=[ + 'v2', + 'accelerator_requests', + ARQ_DICT['uuid'], + ], + ), + json={"accelerator_requests": [ARQ_DICT]}, + ), + dict( + method='PATCH', + uri=self.get_mock_url( + 'accelerator', + 'public', + append=[ + 'v2', + 'accelerator_requests', + ARQ_DICT['uuid'], + ], + ), + json=ARQ_DICT, + ), + ] + ) + + properties = [ + {'path': '/hostname', 'op': 'remove'}, + {'path': '/instance_uuid', 'op': 'remove'}, + {'path': '/device_rp_uuid', 'op': 'remove'}, + ] + + self.assertTrue( + self.cloud.unbind_accelerator_request(ARQ_DICT['uuid'], properties) + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_aggregate.py b/openstack/tests/unit/cloud/test_aggregate.py new file mode 100644 index 0000000000..d0d8f84539 --- /dev/null +++ b/openstack/tests/unit/cloud/test_aggregate.py @@ -0,0 +1,279 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestAggregate(base.TestCase): + def setUp(self): + super().setUp() + self.aggregate_name = self.getUniqueString('aggregate') + self.fake_aggregate = fakes.make_fake_aggregate(1, self.aggregate_name) + self.use_compute_discovery() + + def test_create_aggregate(self): + create_aggregate = self.fake_aggregate.copy() + del create_aggregate['metadata'] + del create_aggregate['hosts'] + + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates'] + ), + json={'aggregate': create_aggregate}, + validate=dict( + json={ + 'aggregate': { + 'name': self.aggregate_name, + 'availability_zone': None, + } + } + ), + ), + ] + ) + self.cloud.create_aggregate(name=self.aggregate_name) + + self.assert_calls() + + def test_create_aggregate_with_az(self): + availability_zone = 'az1' + az_aggregate = fakes.make_fake_aggregate( + 1, self.aggregate_name, availability_zone=availability_zone + ) + + create_aggregate = az_aggregate.copy() + del create_aggregate['metadata'] + del create_aggregate['hosts'] + + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates'] + ), + json={'aggregate': create_aggregate}, + validate=dict( + json={ + 'aggregate': { + 'name': self.aggregate_name, + 'availability_zone': availability_zone, + } + } + ), + ), + ] + ) + + self.cloud.create_aggregate( + name=self.aggregate_name, availability_zone=availability_zone + ) + + self.assert_calls() + + def test_delete_aggregate(self): + self.register_uris( + [ + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates', '1'] + ), + ), + ] + ) + + self.assertTrue(self.cloud.delete_aggregate('1')) + + self.assert_calls() + + def test_delete_aggregate_by_name(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-aggregates', self.aggregate_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates'] + ), + json={'aggregates': [self.fake_aggregate]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates', '1'] + ), + ), + ] + ) + + self.assertTrue(self.cloud.delete_aggregate(self.aggregate_name)) + + self.assert_calls() + + def test_update_aggregate_set_az(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates', '1'] + ), + json=self.fake_aggregate, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates', '1'] + ), + json={'aggregate': self.fake_aggregate}, + validate=dict( + json={ + 'aggregate': { + 'availability_zone': 'az', + } + } + ), + ), + ] + ) + + self.cloud.update_aggregate(1, availability_zone='az') + + self.assert_calls() + + def test_update_aggregate_unset_az(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates', '1'] + ), + json=self.fake_aggregate, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates', '1'] + ), + json={'aggregate': self.fake_aggregate}, + validate=dict( + json={ + 'aggregate': { + 'availability_zone': None, + } + } + ), + ), + ] + ) + + self.cloud.update_aggregate(1, availability_zone=None) + + self.assert_calls() + + def test_set_aggregate_metadata(self): + metadata = {'key': 'value'} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates', '1'] + ), + json=self.fake_aggregate, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-aggregates', '1', 'action'], + ), + json={'aggregate': self.fake_aggregate}, + validate=dict( + json={'set_metadata': {'metadata': metadata}} + ), + ), + ] + ) + self.cloud.set_aggregate_metadata('1', metadata) + + self.assert_calls() + + def test_add_host_to_aggregate(self): + hostname = 'host1' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates', '1'] + ), + json=self.fake_aggregate, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-aggregates', '1', 'action'], + ), + json={'aggregate': self.fake_aggregate}, + validate=dict(json={'add_host': {'host': hostname}}), + ), + ] + ) + self.cloud.add_host_to_aggregate('1', hostname) + + self.assert_calls() + + def test_remove_host_from_aggregate(self): + hostname = 'host1' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-aggregates', '1'] + ), + json=self.fake_aggregate, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-aggregates', '1', 'action'], + ), + json={'aggregate': self.fake_aggregate}, + validate=dict(json={'remove_host': {'host': hostname}}), + ), + ] + ) + self.cloud.remove_host_from_aggregate('1', hostname) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_availability_zones.py b/openstack/tests/unit/cloud/test_availability_zones.py new file mode 100644 index 0000000000..aa277f6696 --- /dev/null +++ b/openstack/tests/unit/cloud/test_availability_zones.py @@ -0,0 +1,73 @@ +# Copyright (c) 2017 Red Hat, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests import fakes +from openstack.tests.unit import base + + +_fake_zone_list = { + "availabilityZoneInfo": [ + {"hosts": None, "zoneName": "az1", "zoneState": {"available": True}}, + {"hosts": None, "zoneName": "nova", "zoneState": {"available": False}}, + ] +} + + +class TestAvailabilityZoneNames(base.TestCase): + def test_list_availability_zone_names(self): + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-availability-zone', + json=_fake_zone_list, + ), + ] + ) + + self.assertEqual(['az1'], self.cloud.list_availability_zone_names()) + + self.assert_calls() + + def test_unauthorized_availability_zone_names(self): + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-availability-zone', + status_code=403, + ), + ] + ) + + self.assertEqual([], self.cloud.list_availability_zone_names()) + + self.assert_calls() + + def test_list_all_availability_zone_names(self): + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-availability-zone', + json=_fake_zone_list, + ), + ] + ) + + self.assertEqual( + ['az1', 'nova'], + self.cloud.list_availability_zone_names(unavailable=True), + ) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_baremetal_node.py b/openstack/tests/unit/cloud/test_baremetal_node.py new file mode 100644 index 0000000000..5c7fd8dca3 --- /dev/null +++ b/openstack/tests/unit/cloud/test_baremetal_node.py @@ -0,0 +1,2475 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_baremetal_node +---------------------------------- + +Tests for baremetal node related operations +""" + +import uuid + +from testscenarios import load_tests_apply_scenarios as load_tests # noqa + +from openstack import exceptions +from openstack.network.v2 import port as _port +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestBaremetalNode(base.IronicTestCase): + def setUp(self): + super().setUp() + self.fake_baremetal_node = fakes.make_fake_machine( + self.name, self.uuid + ) + # TODO(TheJulia): Some tests below have fake ports, + # since they are required in some processes. Lets refactor + # them at some point to use self.fake_baremetal_port. + self.fake_baremetal_port = fakes.make_fake_port( + '00:01:02:03:04:05', node_id=self.uuid + ) + + def test_list_machines(self): + fake_baremetal_two = fakes.make_fake_machine('two', str(uuid.uuid4())) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='nodes'), + json={ + 'nodes': [self.fake_baremetal_node, fake_baremetal_two] + }, + ), + ] + ) + + machines = self.cloud.list_machines() + self.assertEqual(2, len(machines)) + self.assertSubdict(self.fake_baremetal_node, machines[0]) + self.assertSubdict(fake_baremetal_two, machines[1]) + self.assert_calls() + + def test_get_machine(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + + machine = self.cloud.get_machine(self.fake_baremetal_node['uuid']) + self.assertEqual(machine['uuid'], self.fake_baremetal_node['uuid']) + self.assert_calls() + + def test_get_machine_by_mac(self): + mac_address = '00:01:02:03:04:05' + node_uuid = self.fake_baremetal_node['uuid'] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='ports', + append=['detail'], + qs_elements=[f'address={mac_address}'], + ), + json={ + 'ports': [ + {'address': mac_address, 'node_uuid': node_uuid} + ] + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + + machine = self.cloud.get_machine_by_mac(mac_address) + self.assertEqual(machine['uuid'], self.fake_baremetal_node['uuid']) + self.assert_calls() + + def test_validate_machine(self): + # NOTE(TheJulia): Note: These are only the interfaces + # that are validated, and all must be true for an + # exception to not be raised. + validate_return = { + 'boot': { + 'result': True, + }, + 'deploy': { + 'result': True, + }, + 'management': { + 'result': True, + }, + 'power': { + 'result': True, + }, + 'foo': { + 'result': False, + }, + } + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], 'validate'], + ), + json=validate_return, + ), + ] + ) + self.cloud.validate_machine(self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_validate_machine_not_for_deploy(self): + validate_return = { + 'deploy': { + 'result': False, + 'reason': 'Not ready', + }, + 'power': { + 'result': True, + }, + 'foo': { + 'result': False, + }, + } + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], 'validate'], + ), + json=validate_return, + ), + ] + ) + self.cloud.validate_machine( + self.fake_baremetal_node['uuid'], for_deploy=False + ) + + self.assert_calls() + + def test_deprecated_validate_node(self): + validate_return = { + 'deploy': { + 'result': True, + }, + 'power': { + 'result': True, + }, + 'foo': { + 'result': False, + }, + } + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], 'validate'], + ), + json=validate_return, + ), + ] + ) + self.cloud.validate_node(self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_validate_machine_raises_exception(self): + validate_return = { + 'deploy': { + 'result': False, + 'reason': 'error!', + }, + 'power': { + 'result': True, + 'reason': None, + }, + 'foo': {'result': True}, + } + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], 'validate'], + ), + json=validate_return, + ), + ] + ) + self.assertRaises( + exceptions.ValidationException, + self.cloud.validate_machine, + self.fake_baremetal_node['uuid'], + ) + + self.assert_calls() + + def test_patch_machine(self): + test_patch = [{'op': 'remove', 'path': '/instance_info'}] + self.fake_baremetal_node['instance_info'] = {} + self.register_uris( + [ + dict( + method='PATCH', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + validate=dict(json=test_patch), + ), + ] + ) + result = self.cloud.patch_machine( + self.fake_baremetal_node['uuid'], test_patch + ) + self.assertEqual(self.fake_baremetal_node['uuid'], result['uuid']) + + self.assert_calls() + + def test_set_node_instance_info(self): + test_patch = [{'op': 'add', 'path': '/foo', 'value': 'bar'}] + self.register_uris( + [ + dict( + method='PATCH', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + validate=dict(json=test_patch), + ), + ] + ) + self.cloud.set_node_instance_info( + self.fake_baremetal_node['uuid'], test_patch + ) + + self.assert_calls() + + def test_purge_node_instance_info(self): + test_patch = [{'op': 'remove', 'path': '/instance_info'}] + self.fake_baremetal_node['instance_info'] = {} + self.register_uris( + [ + dict( + method='PATCH', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + validate=dict(json=test_patch), + ), + ] + ) + self.cloud.purge_node_instance_info(self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_inspect_machine_fail_active(self): + self.fake_baremetal_node['provision_state'] = 'active' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.inspect_machine, + self.fake_baremetal_node['uuid'], + wait=True, + timeout=1, + ) + + self.assert_calls() + + def test_inspect_machine_fail_associated(self): + self.fake_baremetal_node['provision_state'] = 'available' + self.fake_baremetal_node['instance_uuid'] = '1234' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + self.assertRaisesRegex( + exceptions.SDKException, + 'associated with an instance', + self.cloud.inspect_machine, + self.fake_baremetal_node['uuid'], + wait=True, + timeout=1, + ) + + self.assert_calls() + + def test_inspect_machine_failed(self): + inspecting_node = self.fake_baremetal_node.copy() + self.fake_baremetal_node['provision_state'] = 'inspect failed' + self.fake_baremetal_node['last_error'] = 'kaboom!' + inspecting_node['provision_state'] = 'inspecting' + finished_node = self.fake_baremetal_node.copy() + finished_node['provision_state'] = 'manageable' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'inspect'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=inspecting_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=finished_node, + ), + ] + ) + + self.cloud.inspect_machine(self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_inspect_machine_manageable(self): + self.fake_baremetal_node['provision_state'] = 'manageable' + inspecting_node = self.fake_baremetal_node.copy() + inspecting_node['provision_state'] = 'inspecting' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'inspect'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=inspecting_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + self.cloud.inspect_machine(self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_inspect_machine_available(self): + available_node = self.fake_baremetal_node.copy() + available_node['provision_state'] = 'available' + manageable_node = self.fake_baremetal_node.copy() + manageable_node['provision_state'] = 'manageable' + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=available_node, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'manage'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=manageable_node, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'inspect'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=manageable_node, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'provide'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=available_node, + ), + ] + ) + self.cloud.inspect_machine(self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_inspect_machine_available_wait(self): + available_node = self.fake_baremetal_node.copy() + available_node['provision_state'] = 'available' + manageable_node = self.fake_baremetal_node.copy() + manageable_node['provision_state'] = 'manageable' + inspecting_node = self.fake_baremetal_node.copy() + inspecting_node['provision_state'] = 'inspecting' + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=available_node, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'manage'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=available_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=manageable_node, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'inspect'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=inspecting_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=manageable_node, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'provide'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=available_node, + ), + ] + ) + self.cloud.inspect_machine( + self.fake_baremetal_node['uuid'], wait=True, timeout=1 + ) + + self.assert_calls() + + def test_inspect_machine_wait(self): + self.fake_baremetal_node['provision_state'] = 'manageable' + inspecting_node = self.fake_baremetal_node.copy() + inspecting_node['provision_state'] = 'inspecting' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'inspect'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=inspecting_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=inspecting_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + self.cloud.inspect_machine( + self.fake_baremetal_node['uuid'], wait=True, timeout=1 + ) + + self.assert_calls() + + def test_inspect_machine_inspect_failed(self): + self.fake_baremetal_node['provision_state'] = 'manageable' + inspecting_node = self.fake_baremetal_node.copy() + inspecting_node['provision_state'] = 'inspecting' + inspect_fail_node = self.fake_baremetal_node.copy() + inspect_fail_node['provision_state'] = 'inspect failed' + inspect_fail_node['last_error'] = 'Earth Imploded' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'inspect'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=inspecting_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=inspect_fail_node, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.inspect_machine, + self.fake_baremetal_node['uuid'], + wait=True, + timeout=1, + ) + + self.assert_calls() + + def test_set_machine_maintenace_state(self): + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'maintenance', + ], + ), + validate=dict(json={'reason': 'no reason'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + self.cloud.set_machine_maintenance_state( + self.fake_baremetal_node['uuid'], True, reason='no reason' + ) + + self.assert_calls() + + def test_set_machine_maintenace_state_false(self): + self.register_uris( + [ + dict( + method='DELETE', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'maintenance', + ], + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + self.cloud.set_machine_maintenance_state( + self.fake_baremetal_node['uuid'], False + ) + + self.assert_calls + + def test_remove_machine_from_maintenance(self): + self.register_uris( + [ + dict( + method='DELETE', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'maintenance', + ], + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + self.cloud.remove_machine_from_maintenance( + self.fake_baremetal_node['uuid'] + ) + + self.assert_calls() + + def test_set_machine_power_on(self): + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'power', + ], + ), + validate=dict(json={'target': 'power on'}), + ), + ] + ) + return_value = self.cloud.set_machine_power_on( + self.fake_baremetal_node['uuid'] + ) + self.assertIsNone(return_value) + + self.assert_calls() + + def test_set_machine_power_on_with_retires(self): + # NOTE(TheJulia): This logic ends up testing power on/off and reboot + # as they all utilize the same helper method. + self.register_uris( + [ + dict( + method='PUT', + status_code=503, + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'power', + ], + ), + validate=dict(json={'target': 'power on'}), + ), + dict( + method='PUT', + status_code=409, + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'power', + ], + ), + validate=dict(json={'target': 'power on'}), + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'power', + ], + ), + validate=dict(json={'target': 'power on'}), + ), + ] + ) + return_value = self.cloud.set_machine_power_on( + self.fake_baremetal_node['uuid'] + ) + self.assertIsNone(return_value) + + self.assert_calls() + + def test_set_machine_power_off(self): + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'power', + ], + ), + validate=dict(json={'target': 'power off'}), + ), + ] + ) + return_value = self.cloud.set_machine_power_off( + self.fake_baremetal_node['uuid'] + ) + self.assertIsNone(return_value) + + self.assert_calls() + + def test_set_machine_power_reboot(self): + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'power', + ], + ), + validate=dict(json={'target': 'rebooting'}), + ), + ] + ) + return_value = self.cloud.set_machine_power_reboot( + self.fake_baremetal_node['uuid'] + ) + self.assertIsNone(return_value) + + self.assert_calls() + + def test_set_machine_power_reboot_failure(self): + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'power', + ], + ), + status_code=400, + json={'error': 'invalid'}, + validate=dict(json={'target': 'rebooting'}), + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.set_machine_power_reboot, + self.fake_baremetal_node['uuid'], + ) + + self.assert_calls() + + def test_node_set_provision_state(self): + deploy_node = self.fake_baremetal_node.copy() + deploy_node['provision_state'] = 'deploying' + active_node = self.fake_baremetal_node.copy() + active_node['provision_state'] = 'active' + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict( + json={ + 'target': 'active', + 'configdrive': 'http://host/file', + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + result = self.cloud.node_set_provision_state( + self.fake_baremetal_node['uuid'], + 'active', + configdrive='http://host/file', + ) + self.assertEqual(self.fake_baremetal_node['uuid'], result['uuid']) + + self.assert_calls() + + def test_node_set_provision_state_with_retries(self): + deploy_node = self.fake_baremetal_node.copy() + deploy_node['provision_state'] = 'deploying' + active_node = self.fake_baremetal_node.copy() + active_node['provision_state'] = 'active' + self.register_uris( + [ + dict( + method='PUT', + status_code=409, + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict( + json={ + 'target': 'active', + 'configdrive': 'http://host/file', + } + ), + ), + dict( + method='PUT', + status_code=503, + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict( + json={ + 'target': 'active', + 'configdrive': 'http://host/file', + } + ), + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict( + json={ + 'target': 'active', + 'configdrive': 'http://host/file', + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + self.cloud.node_set_provision_state( + self.fake_baremetal_node['uuid'], + 'active', + configdrive='http://host/file', + ) + + self.assert_calls() + + def test_node_set_provision_state_wait_timeout(self): + deploy_node = self.fake_baremetal_node.copy() + deploy_node['provision_state'] = 'deploying' + active_node = self.fake_baremetal_node.copy() + active_node['provision_state'] = 'active' + self.fake_baremetal_node['provision_state'] = 'available' + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'active'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=deploy_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=active_node, + ), + ] + ) + return_value = self.cloud.node_set_provision_state( + self.fake_baremetal_node['uuid'], 'active', wait=True + ) + + self.assertSubdict(active_node, return_value) + self.assert_calls() + + def test_node_set_provision_state_wait_timeout_fails(self): + # Intentionally time out. + self.fake_baremetal_node['provision_state'] = 'deploy wait' + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'active'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.node_set_provision_state, + self.fake_baremetal_node['uuid'], + 'active', + wait=True, + timeout=0.001, + ) + + self.assert_calls() + + def test_node_set_provision_state_wait_success(self): + self.fake_baremetal_node['provision_state'] = 'active' + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'active'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + + return_value = self.cloud.node_set_provision_state( + self.fake_baremetal_node['uuid'], 'active', wait=True + ) + + self.assertSubdict(self.fake_baremetal_node, return_value) + self.assert_calls() + + def test_node_set_provision_state_wait_failure_cases(self): + self.fake_baremetal_node['provision_state'] = 'foo failed' + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'active'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.node_set_provision_state, + self.fake_baremetal_node['uuid'], + 'active', + wait=True, + timeout=300, + ) + + self.assert_calls() + + def test_node_set_provision_state_wait_provide(self): + self.fake_baremetal_node['provision_state'] = 'manageable' + available_node = self.fake_baremetal_node.copy() + available_node['provision_state'] = 'available' + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'provide'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=available_node, + ), + ] + ) + return_value = self.cloud.node_set_provision_state( + self.fake_baremetal_node['uuid'], 'provide', wait=True + ) + + self.assertSubdict(available_node, return_value) + self.assert_calls() + + def test_wait_for_baremetal_node_lock_locked(self): + self.fake_baremetal_node['reservation'] = 'conductor0' + unlocked_node = self.fake_baremetal_node.copy() + unlocked_node['reservation'] = None + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=unlocked_node, + ), + ] + ) + self.assertIsNone( + self.cloud.wait_for_baremetal_node_lock( + self.fake_baremetal_node, timeout=1 + ) + ) + + self.assert_calls() + + def test_wait_for_baremetal_node_lock_not_locked(self): + self.fake_baremetal_node['reservation'] = None + self.assertIsNone( + self.cloud.wait_for_baremetal_node_lock( + self.fake_baremetal_node, timeout=1 + ) + ) + + # NOTE(dtantsur): service discovery apparently requires 3 calls + self.assertEqual(3, len(self.adapter.request_history)) + + def test_wait_for_baremetal_node_lock_timeout(self): + self.fake_baremetal_node['reservation'] = 'conductor0' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.wait_for_baremetal_node_lock, + self.fake_baremetal_node, + timeout=0.001, + ) + + self.assert_calls() + + def test_activate_node(self): + self.fake_baremetal_node['provision_state'] = 'active' + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict( + json={ + 'target': 'active', + 'configdrive': 'http://host/file', + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + return_value = self.cloud.activate_node( + self.fake_baremetal_node['uuid'], + configdrive='http://host/file', + wait=True, + ) + + self.assertIsNone(return_value) + self.assert_calls() + + def test_deactivate_node(self): + self.fake_baremetal_node['provision_state'] = 'available' + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'deleted'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + return_value = self.cloud.deactivate_node( + self.fake_baremetal_node['uuid'], wait=True + ) + + self.assertIsNone(return_value) + self.assert_calls() + + def test_register_machine(self): + mac_address = '00:01:02:03:04:05' + nics = [{'address': mac_address}] + node_uuid = self.fake_baremetal_node['uuid'] + # TODO(TheJulia): There is a lot of duplication + # in testing creation. Surely this hsould be a helper + # or something. We should fix this. + node_to_post = { + 'driver': None, + 'driver_info': None, + 'name': self.fake_baremetal_node['name'], + 'properties': None, + 'uuid': node_uuid, + } + self.fake_baremetal_node['provision_state'] = 'available' + if 'provision_state' in node_to_post: + node_to_post.pop('provision_state') + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(resource='nodes'), + json=self.fake_baremetal_node, + validate=dict(json=node_to_post), + ), + dict( + method='POST', + uri=self.get_mock_url(resource='ports'), + validate=dict( + json={'address': mac_address, 'node_uuid': node_uuid} + ), + json=self.fake_baremetal_port, + ), + ] + ) + return_value = self.cloud.register_machine(nics, **node_to_post) + + self.assertEqual(self.uuid, return_value.id) + self.assertSubdict(self.fake_baremetal_node, return_value) + self.assert_calls() + + # TODO(TheJulia): We need to de-duplicate these tests. + # Possibly a dedicated class, although we should do it + # then as we may find differences that need to be + # accounted for newer microversions. + def test_register_machine_enroll(self): + mac_address = '00:01:02:03:04:05' + nics = [{'address': mac_address, 'pxe_enabled': False}] + node_uuid = self.fake_baremetal_node['uuid'] + node_to_post = { + 'chassis_uuid': None, + 'driver': None, + 'driver_info': None, + 'name': self.fake_baremetal_node['name'], + 'properties': None, + 'uuid': node_uuid, + } + self.fake_baremetal_node['provision_state'] = 'enroll' + manageable_node = self.fake_baremetal_node.copy() + manageable_node['provision_state'] = 'manageable' + available_node = self.fake_baremetal_node.copy() + available_node['provision_state'] = 'available' + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(resource='nodes'), + validate=dict(json=node_to_post), + json=self.fake_baremetal_node, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'manage'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=manageable_node, + ), + dict( + method='POST', + uri=self.get_mock_url(resource='ports'), + validate=dict( + json={ + 'address': mac_address, + 'node_uuid': node_uuid, + 'pxe_enabled': False, + } + ), + json=self.fake_baremetal_port, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'provide'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=available_node, + ), + ] + ) + return_value = self.cloud.register_machine(nics, **node_to_post) + + self.assertSubdict(available_node, return_value) + self.assert_calls() + + def test_register_machine_enroll_wait(self): + mac_address = self.fake_baremetal_port + nics = [{'address': mac_address}] + node_uuid = self.fake_baremetal_node['uuid'] + node_to_post = { + 'chassis_uuid': None, + 'driver': None, + 'driver_info': None, + 'name': self.fake_baremetal_node['name'], + 'properties': None, + 'uuid': node_uuid, + } + self.fake_baremetal_node['provision_state'] = 'enroll' + manageable_node = self.fake_baremetal_node.copy() + manageable_node['provision_state'] = 'manageable' + available_node = self.fake_baremetal_node.copy() + available_node['provision_state'] = 'available' + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(resource='nodes'), + validate=dict(json=node_to_post), + json=self.fake_baremetal_node, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'manage'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=manageable_node, + ), + dict( + method='POST', + uri=self.get_mock_url(resource='ports'), + validate=dict( + json={'address': mac_address, 'node_uuid': node_uuid} + ), + json=self.fake_baremetal_port, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'provide'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=manageable_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=available_node, + ), + ] + ) + return_value = self.cloud.register_machine( + nics, wait=True, **node_to_post + ) + + self.assertSubdict(available_node, return_value) + self.assert_calls() + + def test_register_machine_enroll_failure(self): + mac_address = '00:01:02:03:04:05' + nics = [{'address': mac_address}] + node_uuid = self.fake_baremetal_node['uuid'] + node_to_post = { + 'chassis_uuid': None, + 'driver': None, + 'driver_info': None, + 'name': self.fake_baremetal_node['name'], + 'properties': None, + 'uuid': node_uuid, + } + self.fake_baremetal_node['provision_state'] = 'enroll' + failed_node = self.fake_baremetal_node.copy() + failed_node['reservation'] = 'conductor0' + failed_node['provision_state'] = 'enroll' + failed_node['last_error'] = 'kaboom!' + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(resource='nodes'), + json=self.fake_baremetal_node, + validate=dict(json=node_to_post), + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'manage'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=failed_node, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.register_machine, + nics, + **node_to_post, + ) + self.assert_calls() + + def test_register_machine_enroll_timeout(self): + mac_address = '00:01:02:03:04:05' + nics = [{'address': mac_address}] + node_uuid = self.fake_baremetal_node['uuid'] + node_to_post = { + 'chassis_uuid': None, + 'driver': None, + 'driver_info': None, + 'name': self.fake_baremetal_node['name'], + 'properties': None, + 'uuid': node_uuid, + } + self.fake_baremetal_node['provision_state'] = 'enroll' + busy_node = self.fake_baremetal_node.copy() + busy_node['reservation'] = 'conductor0' + busy_node['provision_state'] = 'verifying' + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(resource='nodes'), + json=self.fake_baremetal_node, + validate=dict(json=node_to_post), + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'manage'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=busy_node, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + ), + ] + ) + # NOTE(TheJulia): This test shortcircuits the timeout loop + # such that it executes only once. The very last returned + # state to the API is essentially a busy state that we + # want to block on until it has cleared. + self.assertRaises( + exceptions.SDKException, + self.cloud.register_machine, + nics, + timeout=0.001, + lock_timeout=0.001, + **node_to_post, + ) + self.assert_calls() + + def test_register_machine_enroll_timeout_wait(self): + mac_address = '00:01:02:03:04:05' + nics = [{'address': mac_address}] + node_uuid = self.fake_baremetal_node['uuid'] + node_to_post = { + 'chassis_uuid': None, + 'driver': None, + 'driver_info': None, + 'name': self.fake_baremetal_node['name'], + 'properties': None, + 'uuid': node_uuid, + } + self.fake_baremetal_node['provision_state'] = 'enroll' + manageable_node = self.fake_baremetal_node.copy() + manageable_node['provision_state'] = 'manageable' + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(resource='nodes'), + json=self.fake_baremetal_node, + validate=dict(json=node_to_post), + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'manage'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=manageable_node, + ), + dict( + method='POST', + uri=self.get_mock_url(resource='ports'), + validate=dict( + json={'address': mac_address, 'node_uuid': node_uuid} + ), + json=self.fake_baremetal_port, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'states', + 'provision', + ], + ), + validate=dict(json={'target': 'provide'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.register_machine, + nics, + wait=True, + timeout=0.001, + **node_to_post, + ) + self.assert_calls() + + def test_register_machine_port_create_failed(self): + mac_address = '00:01:02:03:04:05' + nics = [{'address': mac_address}] + node_uuid = self.fake_baremetal_node['uuid'] + node_to_post = { + 'chassis_uuid': None, + 'driver': None, + 'driver_info': None, + 'name': self.fake_baremetal_node['name'], + 'properties': None, + 'uuid': node_uuid, + } + self.fake_baremetal_node['provision_state'] = 'available' + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(resource='nodes'), + json=self.fake_baremetal_node, + validate=dict(json=node_to_post), + ), + dict( + method='POST', + uri=self.get_mock_url(resource='ports'), + status_code=400, + json={'error': 'no ports for you'}, + validate=dict( + json={'address': mac_address, 'node_uuid': node_uuid} + ), + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + ), + ] + ) + self.assertRaisesRegex( + exceptions.SDKException, + 'no ports for you', + self.cloud.register_machine, + nics, + **node_to_post, + ) + + self.assert_calls() + + def test_register_machine_several_ports_create_failed(self): + mac_address = '00:01:02:03:04:05' + mac_address2 = mac_address[::-1] + # Verify a couple of ways to provide MACs + nics = [mac_address, {'mac': mac_address2}] + node_uuid = self.fake_baremetal_node['uuid'] + node_to_post = { + 'chassis_uuid': None, + 'driver': None, + 'driver_info': None, + 'name': self.fake_baremetal_node['name'], + 'properties': None, + 'uuid': node_uuid, + } + self.fake_baremetal_node['provision_state'] = 'available' + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(resource='nodes'), + json=self.fake_baremetal_node, + validate=dict(json=node_to_post), + ), + dict( + method='POST', + uri=self.get_mock_url(resource='ports'), + validate=dict( + json={'address': mac_address, 'node_uuid': node_uuid} + ), + json=self.fake_baremetal_port, + ), + dict( + method='POST', + uri=self.get_mock_url(resource='ports'), + status_code=400, + json={'error': 'no ports for you'}, + validate=dict( + json={'address': mac_address2, 'node_uuid': node_uuid} + ), + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='ports', + append=[self.fake_baremetal_port['uuid']], + ), + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + ), + ] + ) + self.assertRaisesRegex( + exceptions.SDKException, + 'no ports for you', + self.cloud.register_machine, + nics, + **node_to_post, + ) + + self.assert_calls() + + def test_unregister_machine(self): + mac_address = self.fake_baremetal_port['address'] + nics = [{'mac': mac_address}] + port_uuid = self.fake_baremetal_port['uuid'] + # NOTE(TheJulia): The two values below should be the same. + port_node_uuid = self.fake_baremetal_port['node_uuid'] + self.fake_baremetal_node['provision_state'] = 'available' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='ports', + qs_elements=[f'address={mac_address}'], + ), + json={ + 'ports': [ + { + 'address': mac_address, + 'node_uuid': port_node_uuid, + 'uuid': port_uuid, + } + ] + }, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='ports', + append=[self.fake_baremetal_port['uuid']], + ), + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + ), + ] + ) + + self.cloud.unregister_machine(nics, self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_unregister_machine_locked_timeout(self): + mac_address = self.fake_baremetal_port['address'] + nics = [{'mac': mac_address}] + self.fake_baremetal_node['provision_state'] = 'available' + self.fake_baremetal_node['reservation'] = 'conductor99' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.unregister_machine, + nics, + self.fake_baremetal_node['uuid'], + timeout=0.001, + ) + self.assert_calls() + + def test_unregister_machine_retries(self): + mac_address = self.fake_baremetal_port['address'] + nics = [{'mac': mac_address}] + port_uuid = self.fake_baremetal_port['uuid'] + # NOTE(TheJulia): The two values below should be the same. + port_node_uuid = self.fake_baremetal_port['node_uuid'] + self.fake_baremetal_node['provision_state'] = 'available' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='ports', + qs_elements=[f'address={mac_address}'], + ), + json={ + 'ports': [ + { + 'address': mac_address, + 'node_uuid': port_node_uuid, + 'uuid': port_uuid, + } + ] + }, + ), + dict( + method='DELETE', + status_code=503, + uri=self.get_mock_url( + resource='ports', + append=[self.fake_baremetal_port['uuid']], + ), + ), + dict( + method='DELETE', + status_code=409, + uri=self.get_mock_url( + resource='ports', + append=[self.fake_baremetal_port['uuid']], + ), + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='ports', + append=[self.fake_baremetal_port['uuid']], + ), + ), + dict( + method='DELETE', + status_code=409, + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + ), + ] + ) + + self.cloud.unregister_machine(nics, self.fake_baremetal_node['uuid']) + + self.assert_calls() + + def test_unregister_machine_unavailable(self): + # This is a list of invalid states that the method + # should fail on. + invalid_states = ['active', 'cleaning', 'clean wait', 'clean failed'] + mac_address = self.fake_baremetal_port['address'] + nics = [{'mac': mac_address}] + url_list = [] + for state in invalid_states: + self.fake_baremetal_node['provision_state'] = state + url_list.append( + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ) + ) + + self.register_uris(url_list) + + for state in invalid_states: + self.assertRaises( + exceptions.SDKException, + self.cloud.unregister_machine, + nics, + self.fake_baremetal_node['uuid'], + ) + + self.assert_calls() + + def test_update_machine_patch_no_action(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + ] + ) + # NOTE(TheJulia): This is just testing mechanics. + update_dict = self.cloud.update_machine( + self.fake_baremetal_node['uuid'] + ) + self.assertIsNone(update_dict['changes']) + self.assertSubdict(self.fake_baremetal_node, update_dict['node']) + + self.assert_calls() + + def test_attach_port_to_machine(self): + vif_id = '953ccbee-e854-450f-95fe-fe5e40d611ec' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + service_type='network', + resource='ports', + base_url_append='v2.0', + append=[vif_id], + ), + json={'id': vif_id}, + ), + dict( + method='POST', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], 'vifs'], + ), + ), + ] + ) + self.cloud.attach_port_to_machine( + self.fake_baremetal_node['uuid'], vif_id + ) + self.assert_calls() + + def test_detach_port_from_machine(self): + vif_id = '953ccbee-e854-450f-95fe-fe5e40d611ec' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + service_type='network', + resource='ports', + base_url_append='v2.0', + append=[vif_id], + ), + json={'id': vif_id}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='nodes', + append=[ + self.fake_baremetal_node['uuid'], + 'vifs', + vif_id, + ], + ), + ), + ] + ) + self.cloud.detach_port_from_machine( + self.fake_baremetal_node['uuid'], vif_id + ) + self.assert_calls() + + def test_list_ports_attached_to_machine(self): + vif_id = '953ccbee-e854-450f-95fe-fe5e40d611ec' + fake_port = {'id': vif_id, 'name': 'test'} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid'], 'vifs'], + ), + json={'vifs': [{'id': vif_id}]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + service_type='network', + resource='ports', + base_url_append='v2.0', + append=[vif_id], + ), + json=fake_port, + ), + ] + ) + res = self.cloud.list_ports_attached_to_machine( + self.fake_baremetal_node['uuid'] + ) + self.assert_calls() + self.assertEqual( + [_port.Port(**fake_port).to_dict(computed=False)], + [i.to_dict(computed=False) for i in res], + ) + + +class TestUpdateMachinePatch(base.IronicTestCase): + # NOTE(TheJulia): As appears, and mordred describes, + # this class utilizes black magic, which ultimately + # results in additional test runs being executed with + # the scenario name appended. Useful for lots of + # variables that need to be tested. + + def setUp(self): + super().setUp() + self.fake_baremetal_node = fakes.make_fake_machine( + self.name, self.uuid + ) + + def test_update_machine_patch(self): + # The model has evolved over time, create the field if + # we don't already have it. + if self.field_name not in self.fake_baremetal_node: + self.fake_baremetal_node[self.field_name] = None + value_to_send = self.fake_baremetal_node[self.field_name] + if self.changed: + value_to_send = self.new_value + uris = [ + dict( + method='GET', + uri=self.get_mock_url( + resource='nodes', append=[self.fake_baremetal_node['uuid']] + ), + json=self.fake_baremetal_node, + ), + ] + if self.changed: + test_patch = [ + { + 'op': 'replace', + 'path': '/' + self.field_name, + 'value': value_to_send, + } + ] + uris.append( + dict( + method='PATCH', + uri=self.get_mock_url( + resource='nodes', + append=[self.fake_baremetal_node['uuid']], + ), + json=self.fake_baremetal_node, + validate=dict(json=test_patch), + ) + ) + + self.register_uris(uris) + + call_args = {self.field_name: value_to_send} + update_dict = self.cloud.update_machine( + self.fake_baremetal_node['uuid'], **call_args + ) + + if self.changed: + self.assertEqual(['/' + self.field_name], update_dict['changes']) + else: + self.assertIsNone(update_dict['changes']) + self.assertSubdict(self.fake_baremetal_node, update_dict['node']) + + self.assert_calls() + + scenarios = [ + ('chassis_uuid', dict(field_name='chassis_uuid', changed=False)), + ( + 'chassis_uuid_changed', + dict(field_name='chassis_uuid', changed=True, new_value='meow'), + ), + ('driver', dict(field_name='driver', changed=False)), + ( + 'driver_changed', + dict(field_name='driver', changed=True, new_value='meow'), + ), + ('driver_info', dict(field_name='driver_info', changed=False)), + ( + 'driver_info_changed', + dict( + field_name='driver_info', + changed=True, + new_value={'cat': 'meow'}, + ), + ), + ('instance_info', dict(field_name='instance_info', changed=False)), + ( + 'instance_info_changed', + dict( + field_name='instance_info', + changed=True, + new_value={'cat': 'meow'}, + ), + ), + ('instance_uuid', dict(field_name='instance_uuid', changed=False)), + ( + 'instance_uuid_changed', + dict(field_name='instance_uuid', changed=True, new_value='meow'), + ), + ('name', dict(field_name='name', changed=False)), + ( + 'name_changed', + dict(field_name='name', changed=True, new_value='meow'), + ), + ('properties', dict(field_name='properties', changed=False)), + ( + 'properties_changed', + dict( + field_name='properties', + changed=True, + new_value={'cat': 'meow'}, + ), + ), + ] diff --git a/openstack/tests/unit/cloud/test_baremetal_ports.py b/openstack/tests/unit/cloud/test_baremetal_ports.py new file mode 100644 index 0000000000..5a75676cc7 --- /dev/null +++ b/openstack/tests/unit/cloud/test_baremetal_ports.py @@ -0,0 +1,173 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_baremetal_ports +---------------------------------- + +Tests for baremetal port related operations +""" + +from testscenarios import load_tests_apply_scenarios as load_tests # noqa + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestBaremetalPort(base.IronicTestCase): + def setUp(self): + super().setUp() + self.fake_baremetal_node = fakes.make_fake_machine( + self.name, self.uuid + ) + # TODO(TheJulia): Some tests below have fake ports, + # since they are required in some processes. Lets refactor + # them at some point to use self.fake_baremetal_port. + self.fake_baremetal_port = fakes.make_fake_port( + '00:01:02:03:04:05', node_id=self.uuid + ) + self.fake_baremetal_port2 = fakes.make_fake_port( + '0a:0b:0c:0d:0e:0f', node_id=self.uuid + ) + + def test_list_nics(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='ports', append=['detail']), + json={ + 'ports': [ + self.fake_baremetal_port, + self.fake_baremetal_port2, + ] + }, + ), + ] + ) + + return_value = self.cloud.list_nics() + self.assertEqual(2, len(return_value)) + self.assertSubdict(self.fake_baremetal_port, return_value[0]) + self.assert_calls() + + def test_list_nics_failure(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='ports', append=['detail']), + status_code=400, + ) + ] + ) + self.assertRaises(exceptions.SDKException, self.cloud.list_nics) + self.assert_calls() + + def test_list_nics_for_machine(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='ports', + append=['detail'], + qs_elements=[ + 'node_uuid={}'.format( + self.fake_baremetal_node['uuid'] + ) + ], + ), + json={ + 'ports': [ + self.fake_baremetal_port, + self.fake_baremetal_port2, + ] + }, + ), + ] + ) + + return_value = self.cloud.list_nics_for_machine( + self.fake_baremetal_node['uuid'] + ) + self.assertEqual(2, len(return_value)) + self.assertSubdict(self.fake_baremetal_port, return_value[0]) + self.assert_calls() + + def test_list_nics_for_machine_failure(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='ports', + append=['detail'], + qs_elements=[ + 'node_uuid={}'.format( + self.fake_baremetal_node['uuid'] + ) + ], + ), + status_code=400, + ) + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.list_nics_for_machine, + self.fake_baremetal_node['uuid'], + ) + self.assert_calls() + + def test_get_nic_by_mac(self): + mac = self.fake_baremetal_port['address'] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='ports', + append=['detail'], + qs_elements=[f'address={mac}'], + ), + json={'ports': [self.fake_baremetal_port]}, + ), + ] + ) + + return_value = self.cloud.get_nic_by_mac(mac) + + self.assertSubdict(self.fake_baremetal_port, return_value) + self.assert_calls() + + def test_get_nic_by_mac_failure(self): + mac = self.fake_baremetal_port['address'] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='ports', + append=['detail'], + qs_elements=[f'address={mac}'], + ), + json={'ports': []}, + ), + ] + ) + + self.assertIsNone(self.cloud.get_nic_by_mac(mac)) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_cloud.py b/openstack/tests/unit/cloud/test_cloud.py new file mode 100644 index 0000000000..8ae554f467 --- /dev/null +++ b/openstack/tests/unit/cloud/test_cloud.py @@ -0,0 +1,213 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock +import uuid + +import testtools + +from openstack import connection +from openstack import exceptions +from openstack.tests.unit import base +from openstack import utils + + +RANGE_DATA = [ + dict(id=1, key1=1, key2=5), + dict(id=2, key1=1, key2=20), + dict(id=3, key1=2, key2=10), + dict(id=4, key1=2, key2=30), + dict(id=5, key1=3, key2=40), + dict(id=6, key1=3, key2=40), +] + + +class TestCloud(base.TestCase): + def test_openstack_cloud(self): + self.assertIsInstance(self.cloud, connection.Connection) + + def test_endpoint_for(self): + dns_override = 'https://override.dns.example.com' + self.cloud.config.config['dns_endpoint_override'] = dns_override + self.assertEqual( + 'https://compute.example.com/v2.1/', + self.cloud.endpoint_for('compute'), + ) + self.assertEqual( + 'https://internal.compute.example.com/v2.1/', + self.cloud.endpoint_for('compute', interface='internal'), + ) + self.assertIsNone( + self.cloud.endpoint_for('compute', region_name='unknown-region') + ) + self.assertEqual(dns_override, self.cloud.endpoint_for('dns')) + + def test_connect_as(self): + # Do initial auth/catalog steps + # This should authenticate a second time, but should not + # need a second identity discovery + project_name = 'test_project' + self.register_uris( + [ + self.get_keystone_v3_token(project_name=project_name), + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail'] + ), + json={'servers': []}, + ), + ] + ) + + c2 = self.cloud.connect_as(project_name=project_name) + self.assertEqual(c2.list_servers(), []) + self.assert_calls() + + def test_connect_as_context(self): + # Do initial auth/catalog steps + # This should authenticate a second time, but should not + # need a second identity discovery + project_name = 'test_project' + self.register_uris( + [ + self.get_keystone_v3_token(project_name=project_name), + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail'] + ), + json={'servers': []}, + ), + ] + ) + + with self.cloud.connect_as(project_name=project_name) as c2: + self.assertEqual(c2.list_servers(), []) + self.assert_calls() + + def test_global_request_id(self): + request_id = uuid.uuid4().hex + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail'] + ), + json={'servers': []}, + validate=dict( + headers={'X-Openstack-Request-Id': request_id} + ), + ), + ] + ) + + cloud2 = self.cloud.global_request(request_id) + self.assertEqual([], cloud2.list_servers()) + + self.assert_calls() + + def test_global_request_id_context(self): + request_id = uuid.uuid4().hex + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail'] + ), + json={'servers': []}, + validate=dict( + headers={'X-Openstack-Request-Id': request_id} + ), + ), + ] + ) + + with self.cloud.global_request(request_id) as c2: + self.assertEqual([], c2.list_servers()) + + self.assert_calls() + + def test_iterate_timeout_bad_wait(self): + with testtools.ExpectedException( + exceptions.SDKException, + "Wait value must be an int or float value.", + ): + for count in utils.iterate_timeout( + 1, "test_iterate_timeout_bad_wait", wait="timeishard" + ): + pass + + @mock.patch('time.sleep') + def test_iterate_timeout_str_wait(self, mock_sleep): + iter = utils.iterate_timeout( + 10, "test_iterate_timeout_str_wait", wait="1.6" + ) + next(iter) + next(iter) + mock_sleep.assert_called_with(1.6) + + @mock.patch('time.sleep') + def test_iterate_timeout_int_wait(self, mock_sleep): + iter = utils.iterate_timeout( + 10, "test_iterate_timeout_int_wait", wait=1 + ) + next(iter) + next(iter) + mock_sleep.assert_called_with(1.0) + + @mock.patch('time.sleep') + def test_iterate_timeout_timeout(self, mock_sleep): + message = "timeout test" + with testtools.ExpectedException(exceptions.ResourceTimeout, message): + for count in utils.iterate_timeout(0.1, message, wait=1): + pass + mock_sleep.assert_called_with(1.0) + + def test_range_search(self): + filters = {"key1": "min", "key2": "20"} + retval = self.cloud.range_search(RANGE_DATA, filters) + self.assertIsInstance(retval, list) + self.assertEqual(1, len(retval)) + self.assertEqual([RANGE_DATA[1]], retval) + + def test_range_search_2(self): + filters = {"key1": "<=2", "key2": ">10"} + retval = self.cloud.range_search(RANGE_DATA, filters) + self.assertIsInstance(retval, list) + self.assertEqual(2, len(retval)) + self.assertEqual([RANGE_DATA[1], RANGE_DATA[3]], retval) + + def test_range_search_3(self): + filters = {"key1": "2", "key2": "min"} + retval = self.cloud.range_search(RANGE_DATA, filters) + self.assertIsInstance(retval, list) + self.assertEqual(0, len(retval)) + + def test_range_search_4(self): + filters = {"key1": "max", "key2": "min"} + retval = self.cloud.range_search(RANGE_DATA, filters) + self.assertIsInstance(retval, list) + self.assertEqual(0, len(retval)) + + def test_range_search_5(self): + filters = {"key1": "min", "key2": "min"} + retval = self.cloud.range_search(RANGE_DATA, filters) + self.assertIsInstance(retval, list) + self.assertEqual(1, len(retval)) + self.assertEqual([RANGE_DATA[0]], retval) diff --git a/openstack/tests/unit/cloud/test_cluster_templates.py b/openstack/tests/unit/cloud/test_cluster_templates.py new file mode 100644 index 0000000000..e25105e718 --- /dev/null +++ b/openstack/tests/unit/cloud/test_cluster_templates.py @@ -0,0 +1,289 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from openstack.container_infrastructure_management.v1 import cluster_template +from openstack import exceptions +from openstack.tests.unit import base + + +cluster_template_obj = dict( + apiserver_port=12345, + cluster_distro='fake-distro', + coe='fake-coe', + created_at='fake-date', + dns_nameserver='8.8.8.8', + docker_volume_size=1, + external_network_id='public', + fixed_network=None, + flavor_id='fake-flavor', + https_proxy=None, + human_id=None, + image_id='fake-image', + insecure_registry='https://192.168.0.10', + keypair_id='fake-key', + labels={}, + links={}, + master_flavor_id=None, + name='fake-cluster-template', + network_driver='fake-driver', + no_proxy=None, + public=False, + registry_enabled=False, + server_type='vm', + tls_disabled=False, + updated_at=None, + uuid='fake-uuid', + volume_driver=None, +) + + +class TestClusterTemplates(base.TestCase): + def _compare_clustertemplates(self, exp, real): + self.assertDictEqual( + cluster_template.ClusterTemplate(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def get_mock_url( + self, + service_type='container-infrastructure-management', + base_url_append=None, + append=None, + resource=None, + ): + return super().get_mock_url( + service_type=service_type, + resource=resource, + append=append, + base_url_append=base_url_append, + ) + + def test_list_cluster_templates_without_detail(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='clustertemplates'), + json=dict(clustertemplates=[cluster_template_obj]), + ) + ] + ) + cluster_templates_list = self.cloud.list_cluster_templates() + self._compare_clustertemplates( + cluster_template_obj, + cluster_templates_list[0], + ) + self.assert_calls() + + def test_list_cluster_templates_with_detail(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='clustertemplates'), + json=dict(clustertemplates=[cluster_template_obj]), + ) + ] + ) + cluster_templates_list = self.cloud.list_cluster_templates(detail=True) + self._compare_clustertemplates( + cluster_template_obj, + cluster_templates_list[0], + ) + self.assert_calls() + + def test_search_cluster_templates_by_name(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='clustertemplates'), + json=dict(clustertemplates=[cluster_template_obj]), + ) + ] + ) + + cluster_templates = self.cloud.search_cluster_templates( + name_or_id='fake-cluster-template' + ) + + self.assertEqual(1, len(cluster_templates)) + self.assertEqual('fake-uuid', cluster_templates[0]['uuid']) + self.assert_calls() + + def test_search_cluster_templates_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='clustertemplates'), + json=dict(clustertemplates=[cluster_template_obj]), + ) + ] + ) + + cluster_templates = self.cloud.search_cluster_templates( + name_or_id='non-existent' + ) + + self.assertEqual(0, len(cluster_templates)) + self.assert_calls() + + def test_get_cluster_template(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='clustertemplates'), + json=dict(clustertemplates=[cluster_template_obj]), + ) + ] + ) + + r = self.cloud.get_cluster_template('fake-cluster-template') + self.assertIsNotNone(r) + self._compare_clustertemplates( + cluster_template_obj, + r, + ) + self.assert_calls() + + def test_get_cluster_template_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='clustertemplates'), + json=dict(clustertemplates=[]), + ) + ] + ) + r = self.cloud.get_cluster_template('doesNotExist') + self.assertIsNone(r) + self.assert_calls() + + def test_create_cluster_template(self): + json_response = cluster_template_obj.copy() + kwargs = dict( + name=cluster_template_obj['name'], + image_id=cluster_template_obj['image_id'], + keypair_id=cluster_template_obj['keypair_id'], + coe=cluster_template_obj['coe'], + ) + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(resource='clustertemplates'), + json=json_response, + validate=dict(json=kwargs), + ) + ] + ) + response = self.cloud.create_cluster_template(**kwargs) + self._compare_clustertemplates(json_response, response) + + self.assert_calls() + + def test_create_cluster_template_exception(self): + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(resource='clustertemplates'), + status_code=403, + ) + ] + ) + # TODO(mordred) requests here doens't give us a great story + # for matching the old error message text. Investigate plumbing + # an error message in to the adapter call so that we can give a + # more informative error. Also, the test was originally catching + # SDKException - but for some reason testtools will not + # match the more specific HTTPError, even though it's a subclass + # of SDKException. + with testtools.ExpectedException(exceptions.ForbiddenException): + self.cloud.create_cluster_template('fake-cluster-template') + self.assert_calls() + + def test_delete_cluster_template(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='clustertemplates'), + json=dict(clustertemplates=[cluster_template_obj]), + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='clustertemplates/fake-uuid' + ), + ), + ] + ) + self.cloud.delete_cluster_template('fake-uuid') + self.assert_calls() + + def test_update_cluster_template(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='clustertemplates'), + json=dict(clustertemplates=[cluster_template_obj]), + ), + dict( + method='PATCH', + uri=self.get_mock_url( + resource='clustertemplates/fake-uuid' + ), + status_code=200, + validate=dict( + json=[ + { + 'op': 'replace', + 'path': '/name', + 'value': 'new-cluster-template', + } + ] + ), + ), + ] + ) + new_name = 'new-cluster-template' + updated = self.cloud.update_cluster_template( + 'fake-uuid', name=new_name + ) + self.assertEqual(new_name, updated.name) + self.assert_calls() + + def test_coe_get_cluster_template(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='clustertemplates'), + json=dict(clustertemplates=[cluster_template_obj]), + ) + ] + ) + + r = self.cloud.get_cluster_template('fake-cluster-template') + self.assertIsNotNone(r) + self._compare_clustertemplates( + cluster_template_obj, + r, + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_clustering.py b/openstack/tests/unit/cloud/test_clustering.py new file mode 100644 index 0000000000..7a5da7f3ad --- /dev/null +++ b/openstack/tests/unit/cloud/test_clustering.py @@ -0,0 +1,684 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from openstack.clustering.v1 import cluster +from openstack.tests.unit import base + + +CLUSTERING_DICT = { + 'name': 'fake-name', + 'profile_id': '1', + 'desired_capacity': 1, + 'config': {'a': 'b'}, + 'max_size': 1, + 'min_size': 1, + 'timeout': 100, + 'metadata': {}, +} + +PROFILE_DICT = {'name': 'fake-profile-name', 'spec': {}, 'metadata': {}} + +POLICY_DICT = { + 'name': 'fake-profile-name', + 'spec': {}, +} + +RECEIVER_DICT = { + 'action': 'FAKE_CLUSTER_ACTION', + 'cluster_id': 'fake-cluster-id', + 'name': 'fake-receiver-name', + 'params': {}, + 'type': 'webhook', +} + +NEW_CLUSTERING_DICT = copy.copy(CLUSTERING_DICT) +NEW_CLUSTERING_DICT['id'] = '1' +NEW_PROFILE_DICT = copy.copy(PROFILE_DICT) +NEW_PROFILE_DICT['id'] = '1' +NEW_POLICY_DICT = copy.copy(POLICY_DICT) +NEW_POLICY_DICT['id'] = '1' +NEW_RECEIVER_DICT = copy.copy(RECEIVER_DICT) +NEW_RECEIVER_DICT['id'] = '1' + + +class TestClustering(base.TestCase): + def assertAreInstances(self, elements, elem_type): + for e in elements: + self.assertIsInstance(e, elem_type) + + def _compare_clusters(self, exp, real): + self.assertEqual( + cluster.Cluster(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def setUp(self): + super().setUp() + self.use_senlin() + + +# def test_create_cluster(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles', '1']), +# json={ +# "profiles": [NEW_PROFILE_DICT]}), +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles']), +# json={ +# "profiles": [NEW_PROFILE_DICT]}), +# dict(method='POST', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters']), +# json=NEW_CLUSTERING_DICT) +# ]) +# profile = self.cloud.get_cluster_profile_by_id(NEW_PROFILE_DICT['id']) +# c = self.cloud.create_cluster( +# name=CLUSTERING_DICT['name'], +# desired_capacity=CLUSTERING_DICT['desired_capacity'], +# profile=profile, +# config=CLUSTERING_DICT['config'], +# max_size=CLUSTERING_DICT['max_size'], +# min_size=CLUSTERING_DICT['min_size'], +# metadata=CLUSTERING_DICT['metadata'], +# timeout=CLUSTERING_DICT['timeout']) +# +# self._compare_clusters(NEW_CLUSTERING_DICT, c) +# self.assert_calls() +# +# def test_create_cluster_exception(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles', '1']), +# json={ +# "profiles": [NEW_PROFILE_DICT]}), +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles']), +# json={ +# "profiles": [NEW_PROFILE_DICT]}), +# dict(method='POST', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters']), +# status_code=500) +# ]) +# profile = self.cloud.get_cluster_profile_by_id(NEW_PROFILE_DICT['id']) +# with testtools.ExpectedException( +# exc.OpenStackCloudHTTPError): +# self.cloud.create_cluster(name='fake-name', profile=profile) +# self.assert_calls() +# +# def test_get_cluster_by_id(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1']), +# json={ +# "cluster": NEW_CLUSTERING_DICT}) +# ]) +# cluster = self.cloud.get_cluster_by_id('1') +# self.assertEqual(cluster['id'], '1') +# self.assert_calls() +# +# def test_get_cluster_not_found_returns_false(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', +# 'no-cluster']), +# status_code=404) +# ]) +# c = self.cloud.get_cluster_by_id('no-cluster') +# self.assertFalse(c) +# self.assert_calls() +# +# def test_update_cluster(self): +# new_max_size = 5 +# updated_cluster = copy.copy(NEW_CLUSTERING_DICT) +# updated_cluster['max_size'] = new_max_size +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1']), +# json={ +# "cluster": NEW_CLUSTERING_DICT}), +# dict(method='PATCH', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1']), +# json=updated_cluster, +# ) +# ]) +# cluster = self.cloud.get_cluster_by_id('1') +# c = self.cloud.update_cluster(cluster, new_max_size) +# self.assertEqual(updated_cluster, c) +# self.assert_calls() +# +# def test_delete_cluster(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters']), +# json={ +# "clusters": [NEW_CLUSTERING_DICT]}), +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1', +# 'policies']), +# json={"cluster_policies": []}), +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'receivers']), +# json={"receivers": []}), +# dict(method='DELETE', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1']), +# json=NEW_CLUSTERING_DICT) +# ]) +# self.assertTrue(self.cloud.delete_cluster('1')) +# self.assert_calls() +# +# def test_list_clusters(self): +# clusters = {'clusters': [NEW_CLUSTERING_DICT]} +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters']), +# json=clusters) +# ]) +# c = self.cloud.list_clusters() +# +# self.assertIsInstance(c, list) +# self.assertAreInstances(c, dict) +# +# self.assert_calls() +# +# def test_attach_policy_to_cluster(self): +# policy = { +# 'policy_id': '1', +# 'enabled': 'true' +# } +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1']), +# json={ +# "cluster": NEW_CLUSTERING_DICT}), +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'policies', '1']), +# json={ +# "policy": NEW_POLICY_DICT}), +# dict(method='POST', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1', +# 'actions']), +# json={'policy_attach': policy}) +# ]) +# cluster = self.cloud.get_cluster_by_id('1') +# policy = self.cloud.get_cluster_policy_by_id('1') +# p = self.cloud.attach_policy_to_cluster(cluster, policy, 'true') +# self.assertTrue(p) +# self.assert_calls() +# +# def test_detach_policy_from_cluster(self): +# updated_cluster = copy.copy(NEW_CLUSTERING_DICT) +# updated_cluster['policies'] = ['1'] +# detached_cluster = copy.copy(NEW_CLUSTERING_DICT) +# detached_cluster['policies'] = [] +# +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1']), +# json={ +# "cluster": NEW_CLUSTERING_DICT}), +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'policies', '1']), +# json={ +# "policy": NEW_POLICY_DICT}), +# dict(method='POST', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1', +# 'actions']), +# json={'policy_detach': {'policy_id': '1'}}), +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1']), +# json={ +# "cluster": updated_cluster}), +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1']), +# json={ +# "cluster": detached_cluster}), +# ]) +# cluster = self.cloud.get_cluster_by_id('1') +# policy = self.cloud.get_cluster_policy_by_id('1') +# p = self.cloud.detach_policy_from_cluster(cluster, policy, wait=True) +# self.assertTrue(p) +# self.assert_calls() +# +# def test_get_policy_on_cluster_by_id(self): +# cluster_policy = { +# "cluster_id": "1", +# "cluster_name": "cluster1", +# "enabled": True, +# "id": "1", +# "policy_id": "1", +# "policy_name": "policy1", +# "policy_type": "senlin.policy.deletion-1.0" +# } +# +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1', +# 'policies', '1']), +# json={ +# "cluster_policy": cluster_policy}) +# ]) +# policy = self.cloud.get_policy_on_cluster('1', '1') +# self.assertEqual(policy['cluster_id'], '1') +# self.assert_calls() +# +# def test_get_policy_on_cluster_not_found_returns_false(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1', +# 'policies', +# 'no-policy']), +# status_code=404) +# ]) +# p = self.cloud.get_policy_on_cluster('1', 'no-policy') +# self.assertFalse(p) +# self.assert_calls() +# +# def test_update_policy_on_cluster(self): +# policy = { +# 'policy_id': '1', +# 'enabled': 'true' +# } +# updated_cluster = copy.copy(NEW_CLUSTERING_DICT) +# updated_cluster['policies'] = policy +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1']), +# json={ +# "cluster": NEW_CLUSTERING_DICT}), +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'policies', +# '1']), +# json={ +# "policy": NEW_POLICY_DICT}), +# dict(method='POST', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1', +# 'actions']), +# json={'policies': []}) +# ]) +# cluster = self.cloud.get_cluster_by_id('1') +# policy = self.cloud.get_cluster_policy_by_id('1') +# p = self.cloud.update_policy_on_cluster(cluster, policy, True) +# self.assertTrue(p) +# self.assert_calls() +# +# def test_get_policy_on_cluster(self): +# cluster_policy = { +# 'cluster_id': '1', +# 'cluster_name': 'cluster1', +# 'enabled': 'true', +# 'id': '1', +# 'policy_id': '1', +# 'policy_name': 'policy1', +# 'policy_type': 'type' +# } +# +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters', '1', +# 'policies', '1']), +# json={ +# "cluster_policy": cluster_policy}) +# ]) +# get_policy = self.cloud.get_policy_on_cluster('1', '1') +# self.assertEqual(get_policy, cluster_policy) +# self.assert_calls() +# +# def test_create_cluster_profile(self): +# self.register_uris([ +# dict(method='POST', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles']), +# json={'profile': NEW_PROFILE_DICT}) +# ]) +# p = self.cloud.create_cluster_profile('fake-profile-name', {}) +# +# self.assertEqual(NEW_PROFILE_DICT, p) +# self.assert_calls() +# +# def test_create_cluster_profile_exception(self): +# self.register_uris([ +# dict(method='POST', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles']), +# status_code=500) +# ]) +# with testtools.ExpectedException( +# exc.OpenStackCloudHTTPError, +# "Error creating profile fake-profile-name.*"): +# self.cloud.create_cluster_profile('fake-profile-name', {}) +# self.assert_calls() +# +# def test_list_cluster_profiles(self): +# profiles = {'profiles': [NEW_PROFILE_DICT]} +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles']), +# json=profiles) +# ]) +# p = self.cloud.list_cluster_profiles() +# +# self.assertIsInstance(p, list) +# self.assertAreInstances(p, dict) +# +# self.assert_calls() +# +# def test_get_cluster_profile_by_id(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles', '1']), +# json={ +# "profile": NEW_PROFILE_DICT}) +# ]) +# p = self.cloud.get_cluster_profile_by_id('1') +# self.assertEqual(p['id'], '1') +# self.assert_calls() +# +# def test_get_cluster_profile_not_found_returns_false(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles', +# 'no-profile']), +# status_code=404) +# ]) +# p = self.cloud.get_cluster_profile_by_id('no-profile') +# self.assertFalse(p) +# self.assert_calls() +# +# def test_update_cluster_profile(self): +# new_name = "new-name" +# updated_profile = copy.copy(NEW_PROFILE_DICT) +# updated_profile['name'] = new_name +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles']), +# json={ +# "profiles": [NEW_PROFILE_DICT]}), +# dict(method='PATCH', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles', '1']), +# json=updated_profile, +# ) +# ]) +# p = self.cloud.update_cluster_profile('1', new_name=new_name) +# self.assertEqual(updated_profile, p) +# self.assert_calls() +# +# def test_delete_cluster_profile(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles', '1']), +# json={ +# "profile": NEW_PROFILE_DICT}), +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters']), +# json={'clusters': [{'cluster': CLUSTERING_DICT}]}), +# dict(method='DELETE', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'profiles', '1']), +# json=NEW_PROFILE_DICT) +# ]) +# profile = self.cloud.get_cluster_profile_by_id('1') +# self.assertTrue(self.cloud.delete_cluster_profile(profile)) +# self.assert_calls() +# +# def test_create_cluster_policy(self): +# self.register_uris([ +# dict(method='POST', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'policies']), +# json={'policy': NEW_POLICY_DICT}) +# ]) +# p = self.cloud.create_cluster_policy('fake-policy-name', {}) +# +# self.assertEqual(NEW_POLICY_DICT, p) +# self.assert_calls() +# +# def test_create_cluster_policy_exception(self): +# self.register_uris([ +# dict(method='POST', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'policies']), +# status_code=500) +# ]) +# with testtools.ExpectedException( +# exc.OpenStackCloudHTTPError, +# "Error creating policy fake-policy-name.*"): +# self.cloud.create_cluster_policy('fake-policy-name', {}) +# self.assert_calls() +# +# def test_list_cluster_policies(self): +# policies = {'policies': [NEW_POLICY_DICT]} +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'policies']), +# json=policies) +# ]) +# p = self.cloud.list_cluster_policies() +# +# self.assertIsInstance(p, list) +# self.assertAreInstances(p, dict) +# +# self.assert_calls() +# +# def test_get_cluster_policy_by_id(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'policies', '1']), +# json={ +# "policy": NEW_POLICY_DICT}) +# ]) +# p = self.cloud.get_cluster_policy_by_id('1') +# self.assertEqual(p['id'], '1') +# self.assert_calls() +# +# def test_get_cluster_policy_not_found_returns_false(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'policies', +# 'no-policy']), +# status_code=404) +# ]) +# p = self.cloud.get_cluster_policy_by_id('no-policy') +# self.assertFalse(p) +# self.assert_calls() +# +# def test_update_cluster_policy(self): +# new_name = "new-name" +# updated_policy = copy.copy(NEW_POLICY_DICT) +# updated_policy['name'] = new_name +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'policies']), +# json={ +# "policies": [NEW_POLICY_DICT]}), +# dict(method='PATCH', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'policies', '1']), +# json=updated_policy, +# ) +# ]) +# p = self.cloud.update_cluster_policy('1', new_name=new_name) +# self.assertEqual(updated_policy, p) +# self.assert_calls() +# +# def test_delete_cluster_policy(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'policies', '1']), +# json={ +# "policy": NEW_POLICY_DICT}), +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters']), +# json={}), +# dict(method='DELETE', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'policies', '1']), +# json=NEW_POLICY_DICT) +# ]) +# self.assertTrue(self.cloud.delete_cluster_policy('1')) +# self.assert_calls() +# +# def test_create_cluster_receiver(self): +# clusters = {'clusters': [NEW_CLUSTERING_DICT]} +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters']), +# json=clusters), +# dict(method='POST', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'receivers']), +# json={'receiver': NEW_RECEIVER_DICT}) +# ]) +# r = self.cloud.create_cluster_receiver('fake-receiver-name', {}) +# +# self.assertEqual(NEW_RECEIVER_DICT, r) +# self.assert_calls() +# +# def test_create_cluster_receiver_exception(self): +# clusters = {'clusters': [NEW_CLUSTERING_DICT]} +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'clusters']), +# json=clusters), +# dict(method='POST', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'receivers']), +# status_code=500), +# ]) +# with testtools.ExpectedException( +# exc.OpenStackCloudHTTPError, +# "Error creating receiver fake-receiver-name.*"): +# self.cloud.create_cluster_receiver('fake-receiver-name', {}) +# self.assert_calls() +# +# def test_list_cluster_receivers(self): +# receivers = {'receivers': [NEW_RECEIVER_DICT]} +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'receivers']), +# json=receivers) +# ]) +# r = self.cloud.list_cluster_receivers() +# +# self.assertIsInstance(r, list) +# self.assertAreInstances(r, dict) +# +# self.assert_calls() +# +# def test_get_cluster_receiver_by_id(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'receivers', '1']), +# json={ +# "receiver": NEW_RECEIVER_DICT}) +# ]) +# r = self.cloud.get_cluster_receiver_by_id('1') +# self.assertEqual(r['id'], '1') +# self.assert_calls() +# +# def test_get_cluster_receiver_not_found_returns_false(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'receivers', +# 'no-receiver']), +# json={'receivers': []}) +# ]) +# p = self.cloud.get_cluster_receiver_by_id('no-receiver') +# self.assertFalse(p) +# self.assert_calls() +# +# def test_update_cluster_receiver(self): +# new_name = "new-name" +# updated_receiver = copy.copy(NEW_RECEIVER_DICT) +# updated_receiver['name'] = new_name +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'receivers']), +# json={ +# "receivers": [NEW_RECEIVER_DICT]}), +# dict(method='PATCH', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'receivers', '1']), +# json=updated_receiver, +# ) +# ]) +# r = self.cloud.update_cluster_receiver('1', new_name=new_name) +# self.assertEqual(updated_receiver, r) +# self.assert_calls() +# +# def test_delete_cluster_receiver(self): +# self.register_uris([ +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'receivers']), +# json={ +# "receivers": [NEW_RECEIVER_DICT]}), +# dict(method='DELETE', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'receivers', '1']), +# json=NEW_RECEIVER_DICT), +# dict(method='GET', +# uri=self.get_mock_url( +# 'clustering', 'public', append=['v1', 'receivers', '1']), +# json={}), +# ]) +# self.assertTrue(self.cloud.delete_cluster_receiver('1', wait=True)) +# self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_coe_clusters.py b/openstack/tests/unit/cloud/test_coe_clusters.py new file mode 100644 index 0000000000..97e864e092 --- /dev/null +++ b/openstack/tests/unit/cloud/test_coe_clusters.py @@ -0,0 +1,216 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.container_infrastructure_management.v1 import cluster +from openstack.tests.unit import base + + +coe_cluster_obj = dict( + status="CREATE_IN_PROGRESS", + cluster_template_id="0562d357-8641-4759-8fed-8173f02c9633", + uuid="731387cf-a92b-4c36-981e-3271d63e5597", + links=[{}, {}], + stack_id="31c1ee6c-081e-4f39-9f0f-f1d87a7defa1", + keypair="my_keypair", + master_count=3, + create_timeout=60, + node_count=10, + name="k8s", + created_at="2016-08-29T06:51:31+00:00", + api_address="https://172.24.4.6:6443", + discovery_url="https://discovery.etcd.io/cbeb580da58915809d59ee69348a84f3", + updated_at="2016-08-29T06:53:24+00:00", + coe_version="v1.2.0", + master_addresses=["172.24.4.6"], + node_addresses=["172.24.4.13"], + status_reason="Stack CREATE completed successfully", +) + + +class TestCOEClusters(base.TestCase): + def _compare_clusters(self, exp, real): + self.assertDictEqual( + cluster.Cluster(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def get_mock_url( + self, + service_type="container-infrastructure-management", + base_url_append=None, + append=None, + resource=None, + ): + return super().get_mock_url( + service_type=service_type, + resource=resource, + append=append, + base_url_append=base_url_append, + ) + + def test_list_coe_clusters(self): + self.register_uris( + [ + dict( + method="GET", + uri=self.get_mock_url(resource="clusters"), + json=dict(clusters=[coe_cluster_obj]), + ) + ] + ) + cluster_list = self.cloud.list_coe_clusters() + self._compare_clusters( + coe_cluster_obj, + cluster_list[0], + ) + self.assert_calls() + + def test_create_coe_cluster(self): + json_response = dict(uuid=coe_cluster_obj.get("uuid")) + kwargs = dict( + name=coe_cluster_obj["name"], + cluster_template_id=coe_cluster_obj["cluster_template_id"], + master_count=coe_cluster_obj["master_count"], + node_count=coe_cluster_obj["node_count"], + ) + self.register_uris( + [ + dict( + method="POST", + uri=self.get_mock_url(resource="clusters"), + json=json_response, + validate=dict(json=kwargs), + ), + ] + ) + response = self.cloud.create_coe_cluster(**kwargs) + expected = kwargs.copy() + expected.update(**json_response) + self._compare_clusters(expected, response) + self.assert_calls() + + def test_search_coe_cluster_by_name(self): + self.register_uris( + [ + dict( + method="GET", + uri=self.get_mock_url(resource="clusters"), + json=dict(clusters=[coe_cluster_obj]), + ) + ] + ) + + coe_clusters = self.cloud.search_coe_clusters(name_or_id="k8s") + + self.assertEqual(1, len(coe_clusters)) + self.assertEqual(coe_cluster_obj["uuid"], coe_clusters[0]["id"]) + self.assert_calls() + + def test_search_coe_cluster_not_found(self): + self.register_uris( + [ + dict( + method="GET", + uri=self.get_mock_url(resource="clusters"), + json=dict(clusters=[coe_cluster_obj]), + ) + ] + ) + + coe_clusters = self.cloud.search_coe_clusters( + name_or_id="non-existent" + ) + + self.assertEqual(0, len(coe_clusters)) + self.assert_calls() + + def test_get_coe_cluster(self): + self.register_uris( + [ + dict( + method="GET", + uri=self.get_mock_url(resource="clusters"), + json=dict(clusters=[coe_cluster_obj]), + ) + ] + ) + + r = self.cloud.get_coe_cluster(coe_cluster_obj["name"]) + self.assertIsNotNone(r) + self._compare_clusters( + coe_cluster_obj, + r, + ) + self.assert_calls() + + def test_get_coe_cluster_not_found(self): + self.register_uris( + [ + dict( + method="GET", + uri=self.get_mock_url(resource="clusters"), + json=dict(clusters=[]), + ) + ] + ) + r = self.cloud.get_coe_cluster("doesNotExist") + self.assertIsNone(r) + self.assert_calls() + + def test_delete_coe_cluster(self): + self.register_uris( + [ + dict( + method="GET", + uri=self.get_mock_url(resource="clusters"), + json=dict(clusters=[coe_cluster_obj]), + ), + dict( + method="DELETE", + uri=self.get_mock_url( + resource="clusters", append=[coe_cluster_obj['uuid']] + ), + ), + ] + ) + self.cloud.delete_coe_cluster(coe_cluster_obj["uuid"]) + self.assert_calls() + + def test_update_coe_cluster(self): + self.register_uris( + [ + dict( + method="GET", + uri=self.get_mock_url(resource="clusters"), + json=dict(clusters=[coe_cluster_obj]), + ), + dict( + method="PATCH", + uri=self.get_mock_url( + resource="clusters", append=[coe_cluster_obj["uuid"]] + ), + status_code=200, + validate=dict( + json=[ + { + "op": "replace", + "path": "/node_count", + "value": 3, + } + ] + ), + ), + ] + ) + self.cloud.update_coe_cluster(coe_cluster_obj["uuid"], node_count=3) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_coe_clusters_certificate.py b/openstack/tests/unit/cloud/test_coe_clusters_certificate.py new file mode 100644 index 0000000000..f1e56e8ca4 --- /dev/null +++ b/openstack/tests/unit/cloud/test_coe_clusters_certificate.py @@ -0,0 +1,99 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.container_infrastructure_management.v1 import ( + cluster_certificate, +) +from openstack.tests.unit import base + +coe_cluster_ca_obj = dict( + cluster_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c", + pem="-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----\n", + bay_uuid="43e305ce-3a5f-412a-8a14-087834c34c8c", + links=[], +) + +coe_cluster_signed_cert_obj = dict( + cluster_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c', + pem='-----BEGIN CERTIFICATE-----\nMIIDAO\n-----END CERTIFICATE-----', + bay_uuid='43e305ce-3a5f-412a-8a14-087834c34c8c', + links=[], + csr=( + '-----BEGIN CERTIFICATE REQUEST-----\nMIICfz==' + '\n-----END CERTIFICATE REQUEST-----\n' + ), +) + + +class TestCOEClusters(base.TestCase): + def _compare_cluster_certs(self, exp, real): + self.assertDictEqual( + cluster_certificate.ClusterCertificate(**exp).to_dict( + computed=False + ), + real.to_dict(computed=False), + ) + + def get_mock_url( + self, + service_type='container-infrastructure-management', + base_url_append=None, + append=None, + resource=None, + ): + return super().get_mock_url( + service_type=service_type, + resource=resource, + append=append, + base_url_append=base_url_append, + ) + + def test_get_coe_cluster_certificate(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='certificates', + append=[coe_cluster_ca_obj['cluster_uuid']], + ), + json=coe_cluster_ca_obj, + ) + ] + ) + ca_cert = self.cloud.get_coe_cluster_certificate( + coe_cluster_ca_obj['cluster_uuid'] + ) + self._compare_cluster_certs(coe_cluster_ca_obj, ca_cert) + self.assert_calls() + + def test_sign_coe_cluster_certificate(self): + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(resource='certificates'), + json={ + "cluster_uuid": coe_cluster_signed_cert_obj[ + 'cluster_uuid' + ], + "csr": coe_cluster_signed_cert_obj['csr'], + }, + ) + ] + ) + self.cloud.sign_coe_cluster_certificate( + coe_cluster_signed_cert_obj['cluster_uuid'], + coe_cluster_signed_cert_obj['csr'], + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_compute.py b/openstack/tests/unit/cloud/test_compute.py new file mode 100644 index 0000000000..f079845146 --- /dev/null +++ b/openstack/tests/unit/cloud/test_compute.py @@ -0,0 +1,397 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestServers(base.TestCase): + def test_get_server(self): + server1 = fakes.make_fake_server('123', 'mickey') + server2 = fakes.make_fake_server('345', 'mouse') + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'mickey'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=mickey'], + ), + json={'servers': [server1, server2]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={"networks": []}, + ), + ] + ) + + r = self.cloud.get_server('mickey') + self.assertIsNotNone(r) + self.assertEqual(server1['name'], r['name']) + + self.assert_calls() + + def test_get_server_not_found(self): + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'doesNotExist'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=doesNotExist'], + ), + json={'servers': []}, + ), + ] + ) + + r = self.cloud.get_server('doesNotExist') + self.assertIsNone(r) + + self.assert_calls() + + def test_list_servers(self): + server_id = str(uuid.uuid4()) + server_name = self.getUniqueString('name') + fake_server = fakes.make_fake_server(server_id, server_name) + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail'] + ), + json={'servers': [fake_server]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={"networks": []}, + ), + ] + ) + + r = self.cloud.list_servers() + + self.assertEqual(1, len(r)) + self.assertEqual(server_name, r[0]['name']) + + self.assert_calls() + + def test_list_server_private_ip(self): + self.has_neutron = True + server_id = "97fe35e9-756a-41a2-960a-1d057d2c9ee4" + fake_server = { + "OS-EXT-STS:task_state": None, + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:b4:a3:07", + "version": 4, + "addr": "10.4.0.13", + "OS-EXT-IPS:type": "fixed", + }, + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:b4:a3:07", + "version": 4, + "addr": "89.40.216.229", + "OS-EXT-IPS:type": "floating", + }, + ] + }, + "links": [ + {"href": "http://example.com/images/95e4c4", "rel": "self"}, + { + "href": "http://example.com/images/95e4c4", + "rel": "bookmark", + }, + ], + "image": { + "id": "95e4c449-8abf-486e-97d9-dc3f82417d2d", + "links": [ + { + "href": "http://example.com/images/95e4c4", + "rel": "bookmark", + } + ], + }, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2018-03-01T02:44:50.000000", + "flavor": { + "id": "3bd99062-2fe8-4eac-93f0-9200cc0f97ae", + "links": [ + { + "href": "http://example.com/flavors/95e4c4", + "rel": "bookmark", + } + ], + }, + "id": server_id, + "security_groups": [{"name": "default"}], + "user_id": "c17534835f8f42bf98fc367e0bf35e09", + "OS-DCF:diskConfig": "MANUAL", + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "OS-EXT-AZ:availability_zone": "nova", + "metadata": {}, + "status": "ACTIVE", + "updated": "2018-03-01T02:44:51Z", + "hostId": "", + "OS-SRV-USG:terminated_at": None, + "key_name": None, + "name": "mttest", + "created": "2018-03-01T02:44:46Z", + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", + "os-extended-volumes:volumes_attached": [], + "config_drive": "", + } + fake_networks = { + "networks": [ + { + "status": "ACTIVE", + "router:external": True, + "availability_zone_hints": [], + "availability_zones": ["nova"], + "description": None, + "subnets": [ + "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", + "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", + "fc541f48-fc7f-48c0-a063-18de6ee7bdd7", + ], + "shared": False, + "tenant_id": "a564613210ee43708b8a7fc6274ebd63", + "tags": [], + "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa: E501 + "mtu": 1550, + "is_default": False, + "admin_state_up": True, + "revision_number": 0, + "ipv4_address_scope": None, + "port_security_enabled": True, + "project_id": "a564613210ee43708b8a7fc6274ebd63", + "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", + "name": "ext-net", + }, + { + "status": "ACTIVE", + "router:external": False, + "availability_zone_hints": [], + "availability_zones": ["nova"], + "description": "", + "subnets": ["f0ad1df5-53ee-473f-b86b-3604ea5591e9"], + "shared": False, + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", + "created_at": "2016-10-22T13:46:26Z", + "tags": [], + "ipv6_address_scope": None, + "updated_at": "2016-10-22T13:46:26Z", + "admin_state_up": True, + "mtu": 1500, + "revision_number": 0, + "ipv4_address_scope": None, + "port_security_enabled": True, + "project_id": "65222a4d09ea4c68934fa1028c77f394", + "id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", + "name": "private", + }, + ] + } + fake_subnets = { + "subnets": [ + { + "service_types": [], + "description": "", + "enable_dhcp": True, + "tags": [], + "network_id": "827c6bb6-492f-4168-9577-f3a131eb29e8", + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", + "created_at": "2017-06-12T13:23:57Z", + "dns_nameservers": [], + "updated_at": "2017-06-12T13:23:57Z", + "gateway_ip": "10.24.4.1", + "ipv6_ra_mode": None, + "allocation_pools": [ + {"start": "10.24.4.2", "end": "10.24.4.254"} + ], + "host_routes": [], + "revision_number": 0, + "ip_version": 4, + "ipv6_address_mode": None, + "cidr": "10.24.4.0/24", + "project_id": "65222a4d09ea4c68934fa1028c77f394", + "id": "3f0642d9-4644-4dff-af25-bcf64f739698", + "subnetpool_id": None, + "name": "foo_subnet", + }, + { + "service_types": [], + "description": "", + "enable_dhcp": True, + "tags": [], + "network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", + "created_at": "2016-10-22T13:46:26Z", + "dns_nameservers": ["89.36.90.101", "89.36.90.102"], + "updated_at": "2016-10-22T13:46:26Z", + "gateway_ip": "10.4.0.1", + "ipv6_ra_mode": None, + "allocation_pools": [ + {"start": "10.4.0.2", "end": "10.4.0.200"} + ], + "host_routes": [], + "revision_number": 0, + "ip_version": 4, + "ipv6_address_mode": None, + "cidr": "10.4.0.0/24", + "project_id": "65222a4d09ea4c68934fa1028c77f394", + "id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9", + "subnetpool_id": None, + "name": "private-subnet-ipv4", + }, + ] + } + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', server_id] + ), + json={'server': fake_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json=fake_networks, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json=fake_subnets, + ), + ] + ) + + r = self.cloud.get_server(server_id) + + self.assertEqual('10.4.0.13', r['private_v4']) + + self.assert_calls() + + def test_list_servers_all_projects(self): + """This test verifies that when list_servers is called with + `all_projects=True` that it passes `all_tenants=True` to nova.""" + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['all_tenants=True'], + ), + complete_qs=True, + json={'servers': []}, + ), + ] + ) + + self.cloud.list_servers(all_projects=True) + + self.assert_calls() + + def test_list_servers_filters(self): + """This test verifies that when list_servers is called with + `filters` dict that it passes it to nova.""" + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=[ + 'deleted=True', + 'changes-since=2014-12-03T00:00:00Z', + ], + ), + complete_qs=True, + json={'servers': []}, + ), + ] + ) + + self.cloud.list_servers( + filters={'deleted': True, 'changes-since': '2014-12-03T00:00:00Z'} + ) + + self.assert_calls() + + def test_list_servers_exception(self): + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail'] + ), + status_code=400, + ), + ] + ) + + self.assertRaises(exceptions.SDKException, self.cloud.list_servers) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_create_server.py b/openstack/tests/unit/cloud/test_create_server.py new file mode 100644 index 0000000000..6ce8168ea2 --- /dev/null +++ b/openstack/tests/unit/cloud/test_create_server.py @@ -0,0 +1,1700 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_create_server +---------------------------------- + +Tests for the `create_server` command. +""" + +import base64 +from unittest import mock +import uuid + +from openstack.compute.v2 import server +from openstack import connection +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestCreateServer(base.TestCase): + def _compare_servers(self, exp, real): + self.assertDictEqual( + server.Server(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def test_create_server_with_get_exception(self): + """ + Test that a bad status code when attempting to get the server instance + raises an exception in create_server. + """ + build_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + status_code=404, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.create_server, + 'server-name', + {'id': 'image-id'}, + {'id': 'flavor-id'}, + ) + self.assert_calls() + + def test_create_server_with_server_error(self): + """ + Test that a server error before we return or begin waiting for the + server instance spawn raises an exception in create_server. + """ + build_server = fakes.make_fake_server('1234', '', 'BUILD') + error_server = fakes.make_fake_server('1234', '', 'ERROR') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': error_server}, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.create_server, + 'server-name', + {'id': 'image-id'}, + {'id': 'flavor-id'}, + ) + self.assert_calls() + + def test_create_server_wait_server_error(self): + """ + Test that a server error while waiting for the server to spawn + raises an exception in create_server. + """ + build_server = fakes.make_fake_server('1234', '', 'BUILD') + error_server = fakes.make_fake_server('1234', '', 'ERROR') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': build_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': error_server}, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.create_server, + 'server-name', + dict(id='image-id'), + dict(id='flavor-id'), + wait=True, + ) + + self.assert_calls() + + def test_create_server_with_timeout(self): + """ + Test that a timeout while waiting for the server to spawn raises an + exception in create_server. + """ + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': fake_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': fake_server}, + ), + ] + ) + self.assertRaises( + exceptions.ResourceTimeout, + self.cloud.create_server, + 'server-name', + dict(id='image-id'), + dict(id='flavor-id'), + wait=True, + timeout=0.01, + ) + # We poll at the end, so we don't know real counts + self.assert_calls(do_count=False) + + def test_create_server_no_wait(self): + """ + Test that create_server with no wait and no exception in the + create call returns the server instance. + """ + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': fake_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': fake_server}, + ), + ] + ) + self.assertDictEqual( + server.Server(**fake_server).to_dict(computed=False), + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + ).to_dict(computed=False), + ) + + self.assert_calls() + + def test_create_server_config_drive(self): + """ + Test that config_drive gets passed in properly + """ + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': fake_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'config_drive': True, + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': fake_server}, + ), + ] + ) + self.assertDictEqual( + server.Server(**fake_server).to_dict(computed=False), + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + config_drive=True, + ).to_dict(computed=False), + ) + + self.assert_calls() + + def test_create_server_config_drive_none(self): + """ + Test that config_drive gets not passed in properly + """ + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': fake_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': fake_server}, + ), + ] + ) + self.assertEqual( + server.Server(**fake_server).to_dict(computed=False), + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + config_drive=None, + ).to_dict(computed=False), + ) + + self.assert_calls() + + def test_create_server_with_admin_pass_no_wait(self): + """ + Test that a server with an admin_pass passed returns the password + """ + admin_pass = self.getUniqueString('password') + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + fake_create_server = fakes.make_fake_server( + '1234', '', 'BUILD', admin_pass=admin_pass + ) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': fake_create_server}, + validate=dict( + json={ + 'server': { + 'adminPass': admin_pass, + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': fake_server}, + ), + ] + ) + self.assertEqual( + admin_pass, + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + admin_pass=admin_pass, + )['admin_password'], + ) + + self.assert_calls() + + @mock.patch.object(connection.Connection, "wait_for_server") + def test_create_server_with_admin_pass_wait(self, mock_wait): + """ + Test that a server with an admin_pass passed returns the password + """ + admin_pass = self.getUniqueString('password') + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + fake_server_with_pass = fakes.make_fake_server( + '1234', '', 'BUILD', admin_pass=admin_pass + ) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': fake_server_with_pass}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'adminPass': admin_pass, + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + ] + ) + + # The wait returns non-password server + mock_wait.return_value = server.Server(**fake_server) + + new_server = self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + admin_pass=admin_pass, + wait=True, + timeout=0.01, + ) + + # Assert that we did wait + self.assertTrue(mock_wait.called) + + # Even with the wait, we should still get back a passworded server + self.assertEqual( + new_server['admin_password'], fake_server_with_pass['adminPass'] + ) + self.assert_calls() + + def test_create_server_user_data_base64(self): + """ + Test that a server passed user-data sends it base64 encoded. + """ + user_data = self.getUniqueString('user_data') + user_data_b64 = base64.b64encode(user_data.encode('utf-8')).decode( + 'utf-8' + ) + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + fake_server['user_data'] = user_data + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': fake_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'user_data': user_data_b64, + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': fake_server}, + ), + ] + ) + + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + userdata=user_data, + wait=False, + ) + + self.assert_calls() + + @mock.patch.object(connection.Connection, "get_active_server") + @mock.patch.object(connection.Connection, "get_server") + def test_wait_for_server(self, mock_get_server, mock_get_active_server): + """ + Test that waiting for a server returns the server instance when + its status changes to "ACTIVE". + """ + # TODO(mordred) Rework this to not mock methods + building_server = {'id': 'fake_server_id', 'status': 'BUILDING'} + active_server = {'id': 'fake_server_id', 'status': 'ACTIVE'} + + mock_get_server.side_effect = iter([building_server, active_server]) + mock_get_active_server.side_effect = iter( + [building_server, active_server] + ) + + server = self.cloud.wait_for_server(building_server) + + self.assertEqual(2, mock_get_server.call_count) + mock_get_server.assert_has_calls( + [ + mock.call(building_server['id']), + mock.call(active_server['id']), + ] + ) + + self.assertEqual(2, mock_get_active_server.call_count) + mock_get_active_server.assert_has_calls( + [ + mock.call( + server=building_server, + reuse=True, + auto_ip=True, + ips=None, + ip_pool=None, + wait=True, + timeout=mock.ANY, + nat_destination=None, + ), + mock.call( + server=active_server, + reuse=True, + auto_ip=True, + ips=None, + ip_pool=None, + wait=True, + timeout=mock.ANY, + nat_destination=None, + ), + ] + ) + + self.assertEqual('ACTIVE', server['status']) + + @mock.patch.object(connection.Connection, 'wait_for_server') + def test_create_server_wait(self, mock_wait): + """ + Test that create_server with a wait actually does the wait. + """ + # TODO(mordred) Make this a full proper response + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': fake_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + ] + ) + self.cloud.create_server( + 'server-name', + dict(id='image-id'), + dict(id='flavor-id'), + wait=True, + timeout=0.01, + ) + + # This is a pretty dirty hack to ensure we in principle use object with + # expected properties + srv = server.Server.existing( + connection=self.cloud, + min_count=1, + max_count=1, + networks='auto', + imageRef='image-id', + flavorRef='flavor-id', + **fake_server, + ) + mock_wait.assert_called_once_with( + srv, + auto_ip=True, + ips=None, + ip_pool=None, + reuse=True, + timeout=0.01, + nat_destination=None, + ) + self.assert_calls() + + @mock.patch.object(connection.Connection, 'add_ips_to_server') + def test_create_server_no_addresses(self, mock_add_ips_to_server): + """ + Test that create_server with a wait throws an exception if the + server doesn't have addresses. + """ + build_server = fakes.make_fake_server('1234', '', 'BUILD') + fake_server = fakes.make_fake_server( + '1234', '', 'ACTIVE', addresses={} + ) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': fake_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports'], + qs_elements=['device_id=1234'], + ), + json={'ports': []}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + status_code=404, + ), + ] + ) + mock_add_ips_to_server.return_value = fake_server + + self.assertRaises( + exceptions.SDKException, + self.cloud.create_server, + 'server-name', + {'id': 'image-id'}, + {'id': 'flavor-id'}, + wait=True, + timeout=0.01, + ) + + self.assert_calls() + + def test_create_server_network_with_no_nics(self): + """ + Verify that if 'network' is supplied, and 'nics' is not, that we + attempt to get the network for the server. + """ + build_server = fakes.make_fake_server('1234', '', 'BUILD') + network = {'id': 'network-id', 'name': 'network-name'} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', 'network-name'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=['name=network-name'], + ), + json={'networks': [network]}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'networks': [{'uuid': 'network-id'}], + 'name': 'server-name', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': build_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': [network]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnets': []}, + ), + ] + ) + self.cloud.create_server( + 'server-name', + dict(id='image-id'), + dict(id='flavor-id'), + network='network-name', + ) + self.assert_calls() + + def test_create_server_network_with_empty_nics(self): + """ + Verify that if 'network' is supplied, along with an empty 'nics' list, + it's treated the same as if 'nics' were not included. + """ + network = {'id': 'network-id', 'name': 'network-name'} + build_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', 'network-name'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=['name=network-name'], + ), + json={'networks': [network]}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'networks': [{'uuid': 'network-id'}], + 'name': 'server-name', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': build_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': [network]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnets': []}, + ), + ] + ) + self.cloud.create_server( + 'server-name', + dict(id='image-id'), + dict(id='flavor-id'), + network='network-name', + nics=[], + ) + self.assert_calls() + + def test_create_server_network_fixed_ip(self): + """ + Verify that if 'fixed_ip' is supplied in nics, we pass it to networks + appropriately. + """ + network = {'id': 'network-id', 'name': 'network-name'} + fixed_ip = '10.0.0.1' + build_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'networks': [{'fixed_ip': fixed_ip}], + 'name': 'server-name', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': build_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': [network]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnets': []}, + ), + ] + ) + self.cloud.create_server( + 'server-name', + dict(id='image-id'), + dict(id='flavor-id'), + nics=[{'fixed_ip': fixed_ip}], + ) + self.assert_calls() + + def test_create_server_network_v4_fixed_ip(self): + """ + Verify that if 'v4-fixed-ip' is supplied in nics, we pass it to + networks appropriately. + """ + network = {'id': 'network-id', 'name': 'network-name'} + fixed_ip = '10.0.0.1' + build_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'networks': [{'fixed_ip': fixed_ip}], + 'name': 'server-name', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': build_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': [network]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnets': []}, + ), + ] + ) + self.cloud.create_server( + 'server-name', + dict(id='image-id'), + dict(id='flavor-id'), + nics=[{'fixed_ip': fixed_ip}], + ) + self.assert_calls() + + def test_create_server_network_v6_fixed_ip(self): + """ + Verify that if 'v6-fixed-ip' is supplied in nics, we pass it to + networks appropriately. + """ + network = {'id': 'network-id', 'name': 'network-name'} + # Note - it doesn't actually have to be a v6 address - it's just + # an alias. + fixed_ip = 'fe80::28da:5fff:fe57:13ed' + build_server = fakes.make_fake_server('1234', '', 'BUILD') + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'networks': [{'fixed_ip': fixed_ip}], + 'name': 'server-name', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': build_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': [network]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnets': []}, + ), + ] + ) + self.cloud.create_server( + 'server-name', + dict(id='image-id'), + dict(id='flavor-id'), + nics=[{'fixed_ip': fixed_ip}], + ) + self.assert_calls() + + def test_create_server_network_fixed_ip_conflicts(self): + """ + Verify that if 'fixed_ip' and 'v4-fixed-ip' are both supplied in nics, + we throw an exception. + """ + # Note - it doesn't actually have to be a v6 address - it's just + # an alias. + self.use_nothing() + fixed_ip = '10.0.0.1' + self.assertRaises( + exceptions.SDKException, + self.cloud.create_server, + 'server-name', + dict(id='image-id'), + dict(id='flavor-id'), + nics=[{'fixed_ip': fixed_ip, 'v4-fixed-ip': fixed_ip}], + ) + self.assert_calls() + + def test_create_server_get_flavor_image(self): + self.use_glance() + image_id = str(uuid.uuid4()) + fake_image_dict = fakes.make_fake_image(image_id=image_id) + fake_image_search_return = {'images': [fake_image_dict]} + + build_server = fakes.make_fake_server('1234', '', 'BUILD') + active_server = fakes.make_fake_server('1234', '', 'BUILD') + + self.register_uris( + [ + dict( + method='GET', + uri=f'https://image.example.com/v2/images/{image_id}', + json=fake_image_search_return, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['flavors', 'vanilla'], + qs_elements=[], + ), + json=fakes.FAKE_FLAVOR, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': fakes.FLAVOR_ID, + 'imageRef': image_id, + 'max_count': 1, + 'min_count': 1, + 'networks': [{'uuid': 'some-network'}], + 'name': 'server-name', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': active_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + ] + ) + + self.cloud.create_server( + 'server-name', + image_id, + 'vanilla', + nics=[{'net-id': 'some-network'}], + wait=False, + ) + + self.assert_calls() + + def test_create_server_nics_port_id(self): + '''Verify port-id in nics input turns into port in REST.''' + build_server = fakes.make_fake_server('1234', '', 'BUILD') + active_server = fakes.make_fake_server('1234', '', 'BUILD') + image_id = uuid.uuid4().hex + port_id = uuid.uuid4().hex + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': fakes.FLAVOR_ID, + 'imageRef': image_id, + 'max_count': 1, + 'min_count': 1, + 'networks': [{'port': port_id}], + 'name': 'server-name', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': active_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + ] + ) + + self.cloud.create_server( + 'server-name', + dict(id=image_id), + dict(id=fakes.FLAVOR_ID), + nics=[{'port-id': port_id}], + wait=False, + ) + + self.assert_calls() + + def test_create_boot_attach_volume(self): + build_server = fakes.make_fake_server('1234', '', 'BUILD') + active_server = fakes.make_fake_server('1234', '', 'BUILD') + volume_id = '20e82d93-14fa-475b-bfcc-f5e6246dd194' + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + self.get_cinder_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume_id] + ), + json={ + 'volume': { + 'id': volume_id, + 'status': 'available', + 'size': 1, + 'availability_zone': 'cinder', + 'name': '', + 'description': None, + 'volume_type': 'lvmdriver-1', + } + }, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'block_device_mapping_v2': [ + { + 'boot_index': 0, + 'delete_on_termination': True, + 'destination_type': 'local', + 'source_type': 'image', + 'uuid': 'image-id', + }, + { + 'boot_index': '-1', + 'delete_on_termination': False, + 'destination_type': 'volume', + 'source_type': 'volume', + 'uuid': volume_id, + }, + ], + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': active_server}, + ), + ] + ) + + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + boot_from_volume=False, + volumes=[volume_id], + wait=False, + ) + + self.assert_calls() + + def test_create_boot_from_volume_image_terminate(self): + build_server = fakes.make_fake_server('1234', '', 'BUILD') + active_server = fakes.make_fake_server('1234', '', 'BUILD') + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': build_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': '', + 'max_count': 1, + 'min_count': 1, + 'block_device_mapping_v2': [ + { + 'boot_index': '0', + 'delete_on_termination': True, + 'destination_type': 'volume', + 'source_type': 'image', + 'uuid': 'image-id', + 'volume_size': '1', + } + ], + 'name': 'server-name', + 'networks': 'auto', + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': active_server}, + ), + ] + ) + + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + boot_from_volume=True, + terminate_volume=True, + volume_size=1, + wait=False, + ) + + self.assert_calls() + + def test_create_server_scheduler_hints(self): + """ + Test that setting scheduler_hints will include them in POST request + """ + scheduler_hints = { + 'group': self.getUniqueString('group'), + } + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + fake_server['scheduler_hints'] = scheduler_hints + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': fake_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + }, + 'OS-SCH-HNT:scheduler_hints': scheduler_hints, + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': fake_server}, + ), + ] + ) + + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + scheduler_hints=scheduler_hints, + wait=False, + ) + + self.assert_calls() + + def test_create_server_scheduler_hints_group_merge(self): + """ + Test that setting both scheduler_hints and group results in merged + hints in POST request + """ + group_id = uuid.uuid4().hex + group_name = self.getUniqueString('server-group') + policies = ['affinity'] + fake_group = fakes.make_fake_server_group( + group_id, group_name, policies + ) + + # The scheduler hints we pass in + scheduler_hints = { + 'different_host': [], + } + + # The scheduler hints we expect to be in POST request + scheduler_hints_merged = { + 'different_host': [], + 'group': group_id, + } + + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + fake_server['scheduler_hints'] = scheduler_hints_merged + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-server-groups', group_id], + ), + json={'server_groups': [fake_group]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': fake_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + }, + 'OS-SCH-HNT:scheduler_hints': scheduler_hints_merged, # noqa: E501 + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': fake_server}, + ), + ] + ) + + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + scheduler_hints=dict(scheduler_hints), + group=group_id, + wait=False, + ) + + self.assert_calls() + + def test_create_server_scheduler_hints_group_override(self): + """ + Test that setting group in both scheduler_hints and group param prefers + param + """ + group_id_scheduler_hints = uuid.uuid4().hex + group_id = uuid.uuid4().hex + group_name = self.getUniqueString('server-group') + policies = ['affinity'] + fake_group = fakes.make_fake_server_group( + group_id, group_name, policies + ) + + # The scheduler hints we pass in that are expected to be ignored in + # POST call + scheduler_hints = { + 'group': group_id_scheduler_hints, + } + + # The scheduler hints we expect to be in POST request + group_scheduler_hints = { + 'group': group_id, + } + + fake_server = fakes.make_fake_server('1234', '', 'BUILD') + fake_server['scheduler_hints'] = group_scheduler_hints + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-server-groups', group_id], + ), + json={'server_groups': [fake_group]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['servers'] + ), + json={'server': fake_server}, + validate=dict( + json={ + 'server': { + 'flavorRef': 'flavor-id', + 'imageRef': 'image-id', + 'max_count': 1, + 'min_count': 1, + 'name': 'server-name', + 'networks': 'auto', + }, + 'OS-SCH-HNT:scheduler_hints': group_scheduler_hints, # noqa: E501 + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + json={'server': fake_server}, + ), + ] + ) + + self.cloud.create_server( + name='server-name', + image=dict(id='image-id'), + flavor=dict(id='flavor-id'), + scheduler_hints=dict(scheduler_hints), + group=group_id, + wait=False, + ) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_create_volume_snapshot.py b/openstack/tests/unit/cloud/test_create_volume_snapshot.py new file mode 100644 index 0000000000..f19211ba40 --- /dev/null +++ b/openstack/tests/unit/cloud/test_create_volume_snapshot.py @@ -0,0 +1,190 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_create_volume_snapshot +---------------------------------- + +Tests for the `create_volume_snapshot` command. +""" + +from openstack.block_storage.v3 import snapshot +from openstack.cloud import meta +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestCreateVolumeSnapshot(base.TestCase): + def setUp(self): + super().setUp() + self.use_cinder() + + def _compare_snapshots(self, exp, real): + self.assertDictEqual( + snapshot.Snapshot(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def test_create_volume_snapshot_wait(self): + """ + Test that create_volume_snapshot with a wait returns the volume + snapshot when its status changes to "available". + """ + snapshot_id = '5678' + volume_id = '1234' + build_snapshot = fakes.FakeVolumeSnapshot( + snapshot_id, 'creating', 'foo', 'derpysnapshot' + ) + build_snapshot_dict = meta.obj_to_munch(build_snapshot) + fake_snapshot = fakes.FakeVolumeSnapshot( + snapshot_id, 'available', 'foo', 'derpysnapshot' + ) + fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) + + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', 'public', append=['snapshots'] + ), + json={'snapshot': build_snapshot_dict}, + validate=dict( + json={ + 'snapshot': {'volume_id': '1234', 'force': False} + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['snapshots', snapshot_id] + ), + json={'snapshot': build_snapshot_dict}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['snapshots', snapshot_id] + ), + json={'snapshot': fake_snapshot_dict}, + ), + ] + ) + + self._compare_snapshots( + fake_snapshot_dict, + self.cloud.create_volume_snapshot(volume_id=volume_id, wait=True), + ) + self.assert_calls() + + def test_create_volume_snapshot_with_timeout(self): + """ + Test that a timeout while waiting for the volume snapshot to create + raises an exception in create_volume_snapshot. + """ + snapshot_id = '5678' + volume_id = '1234' + build_snapshot = fakes.FakeVolumeSnapshot( + snapshot_id, 'creating', 'foo', 'derpysnapshot' + ) + build_snapshot_dict = meta.obj_to_munch(build_snapshot) + + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', 'public', append=['snapshots'] + ), + json={'snapshot': build_snapshot_dict}, + validate=dict( + json={ + 'snapshot': {'volume_id': '1234', 'force': False} + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['snapshots', snapshot_id] + ), + json={'snapshot': build_snapshot_dict}, + ), + ] + ) + + self.assertRaises( + exceptions.ResourceTimeout, + self.cloud.create_volume_snapshot, + volume_id=volume_id, + wait=True, + timeout=0.01, + ) + self.assert_calls(do_count=False) + + def test_create_volume_snapshot_with_error(self): + """ + Test that a error status while waiting for the volume snapshot to + create raises an exception in create_volume_snapshot. + """ + snapshot_id = '5678' + volume_id = '1234' + build_snapshot = fakes.FakeVolumeSnapshot( + snapshot_id, 'creating', 'bar', 'derpysnapshot' + ) + build_snapshot_dict = meta.obj_to_munch(build_snapshot) + error_snapshot = fakes.FakeVolumeSnapshot( + snapshot_id, 'error', 'blah', 'derpysnapshot' + ) + error_snapshot_dict = meta.obj_to_munch(error_snapshot) + + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', 'public', append=['snapshots'] + ), + json={'snapshot': build_snapshot_dict}, + validate=dict( + json={ + 'snapshot': {'volume_id': '1234', 'force': False} + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['snapshots', snapshot_id] + ), + json={'snapshot': build_snapshot_dict}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['snapshots', snapshot_id] + ), + json={'snapshot': error_snapshot_dict}, + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.create_volume_snapshot, + volume_id=volume_id, + wait=True, + timeout=5, + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_delete_server.py b/openstack/tests/unit/cloud/test_delete_server.py new file mode 100644 index 0000000000..3b987eecbf --- /dev/null +++ b/openstack/tests/unit/cloud/test_delete_server.py @@ -0,0 +1,484 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_delete_server +---------------------------------- + +Tests for the `delete_server` command. +""" + +import uuid + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestDeleteServer(base.TestCase): + def test_delete_server(self): + """ + Test that server delete is called when wait=False + """ + server = fakes.make_fake_server('1234', 'daffy', 'ACTIVE') + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'daffy'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=daffy'], + ), + json={'servers': [server]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + ), + ] + ) + self.assertTrue(self.cloud.delete_server('daffy', wait=False)) + + self.assert_calls() + + def test_delete_server_already_gone(self): + """ + Test that we return immediately when server is already gone + """ + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'tweety'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=tweety'], + ), + json={'servers': []}, + ), + ] + ) + self.assertFalse(self.cloud.delete_server('tweety', wait=False)) + + self.assert_calls() + + def test_delete_server_already_gone_wait(self): + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'speedy'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=speedy'], + ), + json={'servers': []}, + ), + ] + ) + self.assertFalse(self.cloud.delete_server('speedy', wait=True)) + self.assert_calls() + + def test_delete_server_wait_for_deleted(self): + """ + Test that delete_server waits for the server to be gone + """ + server = fakes.make_fake_server('9999', 'wily', 'ACTIVE') + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'wily'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=wily'], + ), + json={'servers': [server]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '9999'] + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '9999'] + ), + json={'server': server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '9999'] + ), + status_code=404, + ), + ] + ) + self.assertTrue(self.cloud.delete_server('wily', wait=True)) + + self.assert_calls() + + def test_delete_server_fails(self): + """ + Test that delete_server raises non-404 exceptions + """ + server = fakes.make_fake_server('1212', 'speedy', 'ACTIVE') + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'speedy'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=speedy'], + ), + json={'servers': [server]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1212'] + ), + status_code=400, + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.delete_server, + 'speedy', + wait=False, + ) + + self.assert_calls() + + def test_delete_server_no_cinder(self): + """ + Test that deleting server works when cinder is not available + """ + orig_has_service = self.cloud.has_service + + def fake_has_service(service_type): + if service_type == 'volume': + return False + return orig_has_service(service_type) + + self.cloud.has_service = fake_has_service + + server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'porky'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=porky'], + ), + json={'servers': [server]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + ), + ] + ) + self.assertTrue(self.cloud.delete_server('porky', wait=False)) + + self.assert_calls() + + def test_delete_server_delete_ips(self): + """ + Test that deleting server and fips works + """ + server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') + fip_id = uuid.uuid4().hex + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'porky'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=porky'], + ), + json={'servers': [server]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'floatingips'], + qs_elements=['floating_ip_address=172.24.5.5'], + ), + complete_qs=True, + json={ + 'floatingips': [ + { + 'router_id': 'd23abc8d-2991-4a55-ba98-2aaea84cc72f', # noqa: E501 + 'tenant_id': '4969c491a3c74ee4af974e6d800c62de', # noqa: E501 + 'floating_network_id': '376da547-b977-4cfe-9cba7', # noqa: E501 + 'fixed_ip_address': '10.0.0.4', + 'floating_ip_address': '172.24.5.5', + 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', # noqa: E501 + 'id': fip_id, + 'status': 'ACTIVE', + } + ] + }, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'floatingips', fip_id], + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + complete_qs=True, + json={'floatingips': []}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + status_code=404, + ), + ] + ) + self.assertTrue( + self.cloud.delete_server('porky', wait=True, delete_ips=True) + ) + + self.assert_calls() + + def test_delete_server_delete_ips_bad_neutron(self): + """ + Test that deleting server with a borked neutron doesn't bork + """ + server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'porky'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=porky'], + ), + json={'servers': [server]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'floatingips'], + qs_elements=['floating_ip_address=172.24.5.5'], + ), + complete_qs=True, + status_code=404, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + status_code=404, + ), + ] + ) + self.assertTrue( + self.cloud.delete_server('porky', wait=True, delete_ips=True) + ) + + self.assert_calls() + + def test_delete_server_delete_fips_nova(self): + """ + Test that deleting server with a borked neutron doesn't bork + """ + self.cloud._floating_ip_source = 'nova' + server = fakes.make_fake_server('1234', 'porky', 'ACTIVE') + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'porky'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=porky'], + ), + json={'servers': [server]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-floating-ips'] + ), + json={ + 'floating_ips': [ + { + 'fixed_ip': None, + 'id': 1, + 'instance_id': None, + 'ip': '172.24.5.5', + 'pool': 'nova', + } + ] + }, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['os-floating-ips', '1'] + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-floating-ips'] + ), + json={'floating_ips': []}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', '1234'] + ), + status_code=404, + ), + ] + ) + self.assertTrue( + self.cloud.delete_server('porky', wait=True, delete_ips=True) + ) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_delete_volume_snapshot.py b/openstack/tests/unit/cloud/test_delete_volume_snapshot.py new file mode 100644 index 0000000000..86860c1997 --- /dev/null +++ b/openstack/tests/unit/cloud/test_delete_volume_snapshot.py @@ -0,0 +1,153 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_delete_volume_snapshot +---------------------------------- + +Tests for the `delete_volume_snapshot` command. +""" + +from openstack.cloud import meta +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestDeleteVolumeSnapshot(base.TestCase): + def setUp(self): + super().setUp() + self.use_cinder() + + def test_delete_volume_snapshot(self): + """ + Test that delete_volume_snapshot without a wait returns True instance + when the volume snapshot deletes. + """ + fake_snapshot = fakes.FakeVolumeSnapshot( + '1234', 'available', 'foo', 'derpysnapshot' + ) + fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['snapshots', fake_snapshot.id], + ), + json={'snapshot': fake_snapshot_dict}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['snapshots', fake_snapshot_dict['id']], + ), + ), + ] + ) + + self.assertTrue( + self.cloud.delete_volume_snapshot(name_or_id='1234', wait=False) + ) + self.assert_calls() + + def test_delete_volume_snapshot_with_error(self): + """ + Test that a exception while deleting a volume snapshot will cause an + SDKException. + """ + fake_snapshot = fakes.FakeVolumeSnapshot( + '1234', 'available', 'foo', 'derpysnapshot' + ) + fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['snapshots', fake_snapshot.id], + ), + json={'snapshot': fake_snapshot_dict}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['snapshots', fake_snapshot_dict['id']], + ), + status_code=404, + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.delete_volume_snapshot, + name_or_id='1234', + ) + self.assert_calls() + + def test_delete_volume_snapshot_with_timeout(self): + """ + Test that a timeout while waiting for the volume snapshot to delete + raises an exception in delete_volume_snapshot. + """ + fake_snapshot = fakes.FakeVolumeSnapshot( + '1234', 'available', 'foo', 'derpysnapshot' + ) + fake_snapshot_dict = meta.obj_to_munch(fake_snapshot) + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['snapshots', fake_snapshot.id], + ), + json={'snapshots': [fake_snapshot_dict]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['snapshots', fake_snapshot_dict['id']], + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['snapshots', '1234'] + ), + json={'snapshot': fake_snapshot_dict}, + ), + ] + ) + + self.assertRaises( + exceptions.ResourceTimeout, + self.cloud.delete_volume_snapshot, + name_or_id='1234', + wait=True, + timeout=0.01, + ) + self.assert_calls(do_count=False) diff --git a/openstack/tests/unit/cloud/test_domain_params.py b/openstack/tests/unit/cloud/test_domain_params.py new file mode 100644 index 0000000000..84535ea7d2 --- /dev/null +++ b/openstack/tests/unit/cloud/test_domain_params.py @@ -0,0 +1,78 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack.tests.unit import base + + +class TestDomainParams(base.TestCase): + def get_mock_url( + self, + service_type='identity', + interface='public', + resource='projects', + append=None, + base_url_append='v3', + qs_elements=None, + ): + return super().get_mock_url( + service_type, + interface, + resource, + append, + base_url_append, + qs_elements, + ) + + def test_identity_params_v3(self): + project_data = self._get_project_data(v3=True) + self.register_uris( + [ + # can't retrieve by name + dict( + method='GET', + uri=self.get_mock_url(append=[project_data.project_name]), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + qs_elements=[f'name={project_data.project_name}'] + ), + json=dict( + projects=[project_data.json_response['project']] + ), + ), + ] + ) + + ret = self.cloud._get_identity_params( + domain_id='5678', project=project_data.project_name + ) + self.assertIn('default_project_id', ret) + self.assertEqual(ret['default_project_id'], project_data.project_id) + self.assertIn('domain_id', ret) + self.assertEqual(ret['domain_id'], '5678') + + self.assert_calls() + + def test_identity_params_v3_no_domain(self): + project_data = self._get_project_data(v3=True) + + self.assertRaises( + exceptions.SDKException, + self.cloud._get_identity_params, + domain_id=None, + project=project_data.project_name, + ) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_domains.py b/openstack/tests/unit/cloud/test_domains.py new file mode 100644 index 0000000000..026424d390 --- /dev/null +++ b/openstack/tests/unit/cloud/test_domains.py @@ -0,0 +1,321 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +import testtools +from testtools import matchers + +from openstack import exceptions +from openstack.tests.unit import base + + +class TestDomains(base.TestCase): + def get_mock_url( + self, + service_type='identity', + resource='domains', + append=None, + base_url_append='v3', + qs_elements=None, + ): + return super().get_mock_url( + service_type=service_type, + resource=resource, + append=append, + base_url_append=base_url_append, + qs_elements=qs_elements, + ) + + def test_list_domains(self): + domain_data = self._get_domain_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'domains': [domain_data.json_response['domain']]}, + ) + ] + ) + domains = self.cloud.list_domains() + self.assertThat(len(domains), matchers.Equals(1)) + self.assertThat( + domains[0].name, matchers.Equals(domain_data.domain_name) + ) + self.assertThat(domains[0].id, matchers.Equals(domain_data.domain_id)) + self.assert_calls() + + def test_get_domain(self): + domain_data = self._get_domain_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[domain_data.domain_id]), + status_code=200, + json=domain_data.json_response, + ) + ] + ) + domain = self.cloud.get_domain(domain_id=domain_data.domain_id) + self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) + self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) + self.assert_calls() + + def test_get_domain_with_name_or_id(self): + domain_data = self._get_domain_data() + response = {'domains': [domain_data.json_response['domain']]} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[domain_data.domain_id]), + status_code=200, + json=domain_data.json_response, + ), + dict( + method='GET', + uri=self.get_mock_url(append=[domain_data.domain_name]), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + qs_elements=['name=' + domain_data.domain_name] + ), + status_code=200, + json=response, + ), + ] + ) + domain = self.cloud.get_domain(name_or_id=domain_data.domain_id) + domain_by_name = self.cloud.get_domain( + name_or_id=domain_data.domain_name + ) + self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) + self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) + self.assertThat( + domain_by_name.id, matchers.Equals(domain_data.domain_id) + ) + self.assertThat( + domain_by_name.name, matchers.Equals(domain_data.domain_name) + ) + self.assert_calls() + + def test_create_domain(self): + domain_data = self._get_domain_data( + description=uuid.uuid4().hex, enabled=True + ) + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(), + status_code=200, + json=domain_data.json_response, + validate=dict(json=domain_data.json_request), + ) + ] + ) + domain = self.cloud.create_domain( + domain_data.domain_name, domain_data.description + ) + self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) + self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) + self.assertThat( + domain.description, matchers.Equals(domain_data.description) + ) + self.assert_calls() + + def test_create_domain_exception(self): + domain_data = self._get_domain_data( + domain_name='domain_name', enabled=True + ) + with testtools.ExpectedException(exceptions.BadRequestException): + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(), + status_code=400, + json=domain_data.json_response, + validate=dict(json=domain_data.json_request), + ) + ] + ) + self.cloud.create_domain('domain_name') + self.assert_calls() + + def test_delete_domain(self): + domain_data = self._get_domain_data() + new_resp = domain_data.json_response.copy() + new_resp['domain']['enabled'] = False + domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) + self.register_uris( + [ + dict( + method='PATCH', + uri=domain_resource_uri, + status_code=200, + json=new_resp, + validate=dict(json={'domain': {'enabled': False}}), + ), + dict( + method='DELETE', uri=domain_resource_uri, status_code=204 + ), + ] + ) + self.cloud.delete_domain(domain_data.domain_id) + self.assert_calls() + + def test_delete_domain_name_or_id(self): + domain_data = self._get_domain_data() + new_resp = domain_data.json_response.copy() + new_resp['domain']['enabled'] = False + + domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[domain_data.domain_id]), + status_code=200, + json={'domain': domain_data.json_response['domain']}, + ), + dict( + method='PATCH', + uri=domain_resource_uri, + status_code=200, + json=new_resp, + validate=dict(json={'domain': {'enabled': False}}), + ), + dict( + method='DELETE', uri=domain_resource_uri, status_code=204 + ), + ] + ) + self.cloud.delete_domain(name_or_id=domain_data.domain_id) + self.assert_calls() + + def test_delete_domain_exception(self): + # NOTE(notmorgan): This test does not reflect the case where the domain + # cannot be updated to be disabled, Shade raises that as an unable + # to update domain even though it is called via delete_domain. This + # should be fixed in shade to catch either a failure on PATCH, + # subsequent GET, or DELETE call(s). + domain_data = self._get_domain_data() + new_resp = domain_data.json_response.copy() + new_resp['domain']['enabled'] = False + domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) + self.register_uris( + [ + dict( + method='PATCH', + uri=domain_resource_uri, + status_code=200, + json=new_resp, + validate=dict(json={'domain': {'enabled': False}}), + ), + dict( + method='DELETE', uri=domain_resource_uri, status_code=404 + ), + ] + ) + with testtools.ExpectedException(exceptions.NotFoundException): + self.cloud.delete_domain(domain_data.domain_id) + self.assert_calls() + + def test_update_domain(self): + domain_data = self._get_domain_data( + description=self.getUniqueString('domainDesc') + ) + domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) + self.register_uris( + [ + dict( + method='PATCH', + uri=domain_resource_uri, + status_code=200, + json=domain_data.json_response, + validate=dict(json=domain_data.json_request), + ) + ] + ) + domain = self.cloud.update_domain( + domain_data.domain_id, + name=domain_data.domain_name, + description=domain_data.description, + ) + self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) + self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) + self.assertThat( + domain.description, matchers.Equals(domain_data.description) + ) + self.assert_calls() + + def test_update_domain_name_or_id(self): + domain_data = self._get_domain_data( + description=self.getUniqueString('domainDesc') + ) + domain_resource_uri = self.get_mock_url(append=[domain_data.domain_id]) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[domain_data.domain_id]), + status_code=200, + json={'domain': domain_data.json_response['domain']}, + ), + dict( + method='PATCH', + uri=domain_resource_uri, + status_code=200, + json=domain_data.json_response, + validate=dict(json=domain_data.json_request), + ), + ] + ) + domain = self.cloud.update_domain( + name_or_id=domain_data.domain_id, + name=domain_data.domain_name, + description=domain_data.description, + ) + self.assertThat(domain.id, matchers.Equals(domain_data.domain_id)) + self.assertThat(domain.name, matchers.Equals(domain_data.domain_name)) + self.assertThat( + domain.description, matchers.Equals(domain_data.description) + ) + self.assert_calls() + + def test_update_domain_exception(self): + domain_data = self._get_domain_data( + description=self.getUniqueString('domainDesc') + ) + self.register_uris( + [ + dict( + method='PATCH', + uri=self.get_mock_url(append=[domain_data.domain_id]), + status_code=409, + json=domain_data.json_response, + validate=dict(json={'domain': {'enabled': False}}), + ) + ] + ) + with testtools.ExpectedException(exceptions.ConflictException): + self.cloud.delete_domain(domain_data.domain_id) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_endpoints.py b/openstack/tests/unit/cloud/test_endpoints.py new file mode 100644 index 0000000000..f4852efb32 --- /dev/null +++ b/openstack/tests/unit/cloud/test_endpoints.py @@ -0,0 +1,380 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_cloud_endpoints +---------------------------------- + +Tests Keystone endpoints commands. +""" + +import uuid + +from testtools import matchers + +from openstack.tests.unit import base + + +class TestCloudEndpoints(base.TestCase): + def get_mock_url( + self, + service_type='identity', + interface='public', + resource='endpoints', + append=None, + base_url_append='v3', + ): + return super().get_mock_url( + service_type, interface, resource, append, base_url_append + ) + + def _dummy_url(self): + return f'https://{uuid.uuid4().hex}.example.com/' + + def test_create_endpoint(self): + service_data = self._get_service_data() + public_endpoint_data = self._get_endpoint_v3_data( + service_id=service_data.service_id, + interface='public', + url=self._dummy_url(), + ) + public_endpoint_data_disabled = self._get_endpoint_v3_data( + service_id=service_data.service_id, + interface='public', + url=self._dummy_url(), + enabled=False, + ) + admin_endpoint_data = self._get_endpoint_v3_data( + service_id=service_data.service_id, + interface='admin', + url=self._dummy_url(), + region=public_endpoint_data.region_id, + ) + internal_endpoint_data = self._get_endpoint_v3_data( + service_id=service_data.service_id, + interface='internal', + url=self._dummy_url(), + region=public_endpoint_data.region_id, + ) + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='services', append=[service_data.service_id] + ), + status_code=200, + json=service_data.json_response_v3, + ), + dict( + method='POST', + uri=self.get_mock_url(), + status_code=200, + json=public_endpoint_data_disabled.json_response, + validate=dict( + json=public_endpoint_data_disabled.json_request + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='services', append=[service_data.service_id] + ), + status_code=200, + json=service_data.json_response_v3, + ), + dict( + method='POST', + uri=self.get_mock_url(), + status_code=200, + json=public_endpoint_data.json_response, + validate=dict(json=public_endpoint_data.json_request), + ), + dict( + method='POST', + uri=self.get_mock_url(), + status_code=200, + json=internal_endpoint_data.json_response, + validate=dict(json=internal_endpoint_data.json_request), + ), + dict( + method='POST', + uri=self.get_mock_url(), + status_code=200, + json=admin_endpoint_data.json_response, + validate=dict(json=admin_endpoint_data.json_request), + ), + ] + ) + + endpoints = self.cloud.create_endpoint( + service_name_or_id=service_data.service_id, + region=public_endpoint_data_disabled.region_id, + url=public_endpoint_data_disabled.url, + interface=public_endpoint_data_disabled.interface, + enabled=False, + ) + + # Test endpoint values + self.assertThat( + endpoints[0].id, + matchers.Equals(public_endpoint_data_disabled.endpoint_id), + ) + self.assertThat( + endpoints[0].url, + matchers.Equals(public_endpoint_data_disabled.url), + ) + self.assertThat( + endpoints[0].interface, + matchers.Equals(public_endpoint_data_disabled.interface), + ) + self.assertThat( + endpoints[0].region_id, + matchers.Equals(public_endpoint_data_disabled.region_id), + ) + self.assertThat( + endpoints[0].region_id, + matchers.Equals(public_endpoint_data_disabled.region_id), + ) + self.assertThat( + endpoints[0].is_enabled, + matchers.Equals(public_endpoint_data_disabled.enabled), + ) + + endpoints_2on3 = self.cloud.create_endpoint( + service_name_or_id=service_data.service_id, + region=public_endpoint_data.region_id, + public_url=public_endpoint_data.url, + internal_url=internal_endpoint_data.url, + admin_url=admin_endpoint_data.url, + ) + + # Three endpoints should be returned, public, internal, and admin + self.assertThat(len(endpoints_2on3), matchers.Equals(3)) + + # test keys and values are correct for each endpoint created + for result, reference in zip( + endpoints_2on3, + [ + public_endpoint_data, + internal_endpoint_data, + admin_endpoint_data, + ], + ): + self.assertThat(result.id, matchers.Equals(reference.endpoint_id)) + self.assertThat(result.url, matchers.Equals(reference.url)) + self.assertThat( + result.interface, matchers.Equals(reference.interface) + ) + self.assertThat( + result.region_id, matchers.Equals(reference.region_id) + ) + self.assertThat( + result.is_enabled, matchers.Equals(reference.enabled) + ) + self.assert_calls() + + def test_update_endpoint(self): + service_data = self._get_service_data() + dummy_url = self._dummy_url() + endpoint_data = self._get_endpoint_v3_data( + service_id=service_data.service_id, + interface='admin', + enabled=False, + ) + reference_request = endpoint_data.json_request.copy() + reference_request['endpoint']['url'] = dummy_url + self.register_uris( + [ + dict( + method='PATCH', + uri=self.get_mock_url(append=[endpoint_data.endpoint_id]), + status_code=200, + json=endpoint_data.json_response, + validate=dict(json=reference_request), + ) + ] + ) + endpoint = self.cloud.update_endpoint( + endpoint_data.endpoint_id, + service_name_or_id=service_data.service_id, + region=endpoint_data.region_id, + url=dummy_url, + interface=endpoint_data.interface, + enabled=False, + ) + + # test keys and values are correct + self.assertThat( + endpoint.id, matchers.Equals(endpoint_data.endpoint_id) + ) + self.assertThat( + endpoint.service_id, matchers.Equals(service_data.service_id) + ) + self.assertThat(endpoint.url, matchers.Equals(endpoint_data.url)) + self.assertThat( + endpoint.interface, matchers.Equals(endpoint_data.interface) + ) + + self.assert_calls() + + def test_list_endpoints(self): + endpoints_data = [self._get_endpoint_v3_data() for e in range(1, 10)] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={ + 'endpoints': [ + e.json_response['endpoint'] for e in endpoints_data + ] + }, + ) + ] + ) + + endpoints = self.cloud.list_endpoints() + # test we are getting exactly len(self.mock_endpoints) elements + self.assertThat(len(endpoints), matchers.Equals(len(endpoints_data))) + + # test keys and values are correct + for i, ep in enumerate(endpoints_data): + self.assertThat(endpoints[i].id, matchers.Equals(ep.endpoint_id)) + self.assertThat( + endpoints[i].service_id, matchers.Equals(ep.service_id) + ) + self.assertThat(endpoints[i].url, matchers.Equals(ep.url)) + self.assertThat( + endpoints[i].interface, matchers.Equals(ep.interface) + ) + + self.assert_calls() + + def test_search_endpoints(self): + endpoints_data = [ + self._get_endpoint_v3_data(region='region1') for e in range(0, 2) + ] + endpoints_data.extend( + [self._get_endpoint_v3_data() for e in range(1, 8)] + ) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={ + 'endpoints': [ + e.json_response['endpoint'] for e in endpoints_data + ] + }, + ), + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={ + 'endpoints': [ + e.json_response['endpoint'] for e in endpoints_data + ] + }, + ), + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={ + 'endpoints': [ + e.json_response['endpoint'] for e in endpoints_data + ] + }, + ), + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={ + 'endpoints': [ + e.json_response['endpoint'] for e in endpoints_data + ] + }, + ), + ] + ) + + # Search by id + endpoints = self.cloud.search_endpoints( + id=endpoints_data[-1].endpoint_id + ) + # # test we are getting exactly 1 element + self.assertEqual(1, len(endpoints)) + self.assertThat( + endpoints[0].id, matchers.Equals(endpoints_data[-1].endpoint_id) + ) + self.assertThat( + endpoints[0].service_id, + matchers.Equals(endpoints_data[-1].service_id), + ) + self.assertThat( + endpoints[0].url, matchers.Equals(endpoints_data[-1].url) + ) + self.assertThat( + endpoints[0].interface, + matchers.Equals(endpoints_data[-1].interface), + ) + + # Not found + endpoints = self.cloud.search_endpoints(id='!invalid!') + self.assertEqual(0, len(endpoints)) + + # Multiple matches + endpoints = self.cloud.search_endpoints( + filters={'region_id': 'region1'} + ) + # # test we are getting exactly 2 elements + self.assertEqual(2, len(endpoints)) + + # test we are getting the correct response for region/region_id compat + endpoints = self.cloud.search_endpoints( + filters={'region_id': 'region1'} + ) + # # test we are getting exactly 2 elements, this is v3 + self.assertEqual(2, len(endpoints)) + + self.assert_calls() + + def test_delete_endpoint(self): + endpoint_data = self._get_endpoint_v3_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[endpoint_data.endpoint_id]), + status_code=200, + json=endpoint_data.json_response['endpoint'], + ), + dict( + method='DELETE', + uri=self.get_mock_url(append=[endpoint_data.endpoint_id]), + status_code=204, + ), + ] + ) + + # Delete by id + self.cloud.delete_endpoint(id=endpoint_data.endpoint_id) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_flavors.py b/openstack/tests/unit/cloud/test_flavors.py new file mode 100644 index 0000000000..de36018152 --- /dev/null +++ b/openstack/tests/unit/cloud/test_flavors.py @@ -0,0 +1,416 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestFlavors(base.TestCase): + def setUp(self): + super().setUp() + # self.use_compute_discovery() + + def test_create_flavor(self): + self.use_compute_discovery() + self.register_uris( + [ + dict( + method='POST', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors', + json={'flavor': fakes.FAKE_FLAVOR}, + validate=dict( + json={ + 'flavor': { + "name": "vanilla", + "description": None, + "ram": 65536, + "vcpus": 24, + "swap": 0, + "os-flavor-access:is_public": True, + "rxtx_factor": 1.0, + "OS-FLV-EXT-DATA:ephemeral": 0, + "disk": 1600, + "id": None, + } + } + ), + ) + ] + ) + + self.cloud.create_flavor( + 'vanilla', + ram=65536, + disk=1600, + vcpus=24, + ) + self.assert_calls() + + def test_delete_flavor(self): + self.use_compute_discovery() + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/vanilla', + json=fakes.FAKE_FLAVOR, + ), + dict( + method='DELETE', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/{fakes.FLAVOR_ID}', + ), + ] + ) + self.assertTrue(self.cloud.delete_flavor('vanilla')) + + self.assert_calls() + + def test_delete_flavor_not_found(self): + self.use_compute_discovery() + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/invalid', + status_code=404, + ), + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', + json={'flavors': fakes.FAKE_FLAVOR_LIST}, + ), + ] + ) + + self.assertFalse(self.cloud.delete_flavor('invalid')) + + self.assert_calls() + + def test_delete_flavor_exception(self): + self.use_compute_discovery() + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/vanilla', + json=fakes.FAKE_FLAVOR, + ), + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', + json={'flavors': fakes.FAKE_FLAVOR_LIST}, + ), + dict( + method='DELETE', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/{fakes.FLAVOR_ID}', + status_code=503, + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.delete_flavor, + 'vanilla', + ) + + def test_list_flavors(self): + self.use_compute_discovery() + uris_to_mock = [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', + json={'flavors': fakes.FAKE_FLAVOR_LIST}, + ), + ] + self.register_uris(uris_to_mock) + + flavors = self.cloud.list_flavors() + + # test that new flavor is created correctly + found = False + for flavor in flavors: + if flavor['name'] == 'vanilla': + found = True + break + self.assertTrue(found) + needed_keys = {'name', 'ram', 'vcpus', 'id', 'is_public', 'disk'} + if found: + # check flavor content + self.assertTrue(needed_keys.issubset(flavor.keys())) + self.assert_calls() + + def test_list_flavors_with_extra(self): + self.use_compute_discovery() + uris_to_mock = [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', + json={'flavors': fakes.FAKE_FLAVOR_LIST}, + ), + ] + uris_to_mock.extend( + [ + dict( + method='GET', + uri='{endpoint}/flavors/{id}/os-extra_specs'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id'] + ), + json={'extra_specs': {}}, + ) + for flavor in fakes.FAKE_FLAVOR_LIST + ] + ) + self.register_uris(uris_to_mock) + + flavors = self.cloud.list_flavors(get_extra=True) + + # test that new flavor is created correctly + found = False + for flavor in flavors: + if flavor['name'] == 'vanilla': + found = True + break + self.assertTrue(found) + needed_keys = {'name', 'ram', 'vcpus', 'id', 'is_public', 'disk'} + if found: + # check flavor content + self.assertTrue(needed_keys.issubset(flavor.keys())) + self.assert_calls() + + def test_get_flavor_by_ram(self): + self.use_compute_discovery() + uris_to_mock = [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', + json={'flavors': fakes.FAKE_FLAVOR_LIST}, + ), + ] + uris_to_mock.extend( + [ + dict( + method='GET', + uri='{endpoint}/flavors/{id}/os-extra_specs'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id'] + ), + json={'extra_specs': {}}, + ) + for flavor in fakes.FAKE_FLAVOR_LIST + ] + ) + self.register_uris(uris_to_mock) + + flavor = self.cloud.get_flavor_by_ram(ram=250) + self.assertEqual(fakes.STRAWBERRY_FLAVOR_ID, flavor['id']) + + def test_get_flavor_by_ram_and_include(self): + self.use_compute_discovery() + uris_to_mock = [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', + json={'flavors': fakes.FAKE_FLAVOR_LIST}, + ), + ] + uris_to_mock.extend( + [ + dict( + method='GET', + uri='{endpoint}/flavors/{id}/os-extra_specs'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id=flavor['id'] + ), + json={'extra_specs': {}}, + ) + for flavor in fakes.FAKE_FLAVOR_LIST + ] + ) + self.register_uris(uris_to_mock) + flavor = self.cloud.get_flavor_by_ram(ram=150, include='strawberry') + self.assertEqual(fakes.STRAWBERRY_FLAVOR_ID, flavor['id']) + + def test_get_flavor_by_ram_not_found(self): + self.use_compute_discovery() + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/detail?is_public=None', + json={'flavors': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.get_flavor_by_ram, + ram=100, + ) + + def test_get_flavor_string_and_int(self): + self.use_compute_discovery() + flavor_resource_uri = ( + f'{fakes.COMPUTE_ENDPOINT}/flavors/1/os-extra_specs' + ) + flavor = fakes.make_fake_flavor('1', 'vanilla') + flavor_json = {'extra_specs': {}} + + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/1', + json=flavor, + ), + dict(method='GET', uri=flavor_resource_uri, json=flavor_json), + ] + ) + + flavor1 = self.cloud.get_flavor('1') + self.assertEqual('1', flavor1['id']) + flavor2 = self.cloud.get_flavor(1) + self.assertEqual('1', flavor2['id']) + + def test_set_flavor_specs(self): + self.use_compute_discovery() + extra_specs = dict(key1='value1') + self.register_uris( + [ + dict( + method='POST', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/{1}/os-extra_specs', + json=dict(extra_specs=extra_specs), + ) + ] + ) + + self.cloud.set_flavor_specs(1, extra_specs) + self.assert_calls() + + def test_unset_flavor_specs(self): + self.use_compute_discovery() + keys = ['key1', 'key2'] + self.register_uris( + [ + dict( + method='DELETE', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/{1}/os-extra_specs/{key}', + ) + for key in keys + ] + ) + + self.cloud.unset_flavor_specs(1, keys) + self.assert_calls() + + def test_add_flavor_access(self): + self.register_uris( + [ + dict( + method='POST', + uri='{endpoint}/flavors/{id}/action'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id' + ), + json={ + 'flavor_access': [ + { + 'flavor_id': 'flavor_id', + 'tenant_id': 'tenant_id', + } + ] + }, + validate=dict( + json={'addTenantAccess': {'tenant': 'tenant_id'}} + ), + ) + ] + ) + + self.cloud.add_flavor_access('flavor_id', 'tenant_id') + self.assert_calls() + + def test_remove_flavor_access(self): + self.register_uris( + [ + dict( + method='POST', + uri='{endpoint}/flavors/{id}/action'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id='flavor_id' + ), + json={'flavor_access': []}, + validate=dict( + json={'removeTenantAccess': {'tenant': 'tenant_id'}} + ), + ) + ] + ) + + self.cloud.remove_flavor_access('flavor_id', 'tenant_id') + self.assert_calls() + + def test_list_flavor_access(self): + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/flavors/vanilla/os-flavor-access', + json={ + 'flavor_access': [ + {'flavor_id': 'vanilla', 'tenant_id': 'tenant_id'} + ] + }, + ) + ] + ) + self.cloud.list_flavor_access('vanilla') + self.assert_calls() + + def test_get_flavor_by_id(self): + self.use_compute_discovery() + flavor_uri = f'{fakes.COMPUTE_ENDPOINT}/flavors/1' + flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')} + + self.register_uris( + [ + dict(method='GET', uri=flavor_uri, json=flavor_json), + ] + ) + + flavor1 = self.cloud.get_flavor_by_id('1') + self.assertEqual('1', flavor1['id']) + self.assertEqual({}, flavor1.extra_specs) + flavor2 = self.cloud.get_flavor_by_id('1') + self.assertEqual('1', flavor2['id']) + self.assertEqual({}, flavor2.extra_specs) + + def test_get_flavor_with_extra_specs(self): + self.use_compute_discovery() + flavor_uri = f'{fakes.COMPUTE_ENDPOINT}/flavors/1' + flavor_extra_uri = f'{fakes.COMPUTE_ENDPOINT}/flavors/1/os-extra_specs' + flavor_json = {'flavor': fakes.make_fake_flavor('1', 'vanilla')} + flavor_extra_json = {'extra_specs': {'name': 'test'}} + + self.register_uris( + [ + dict(method='GET', uri=flavor_uri, json=flavor_json), + dict( + method='GET', uri=flavor_extra_uri, json=flavor_extra_json + ), + ] + ) + + flavor1 = self.cloud.get_flavor_by_id('1', get_extra=True) + self.assertEqual('1', flavor1['id']) + self.assertEqual({'name': 'test'}, flavor1.extra_specs) + flavor2 = self.cloud.get_flavor_by_id('1', get_extra=False) + self.assertEqual('1', flavor2['id']) + self.assertEqual({}, flavor2.extra_specs) diff --git a/openstack/tests/unit/cloud/test_floating_ip_common.py b/openstack/tests/unit/cloud/test_floating_ip_common.py new file mode 100644 index 0000000000..d61a931811 --- /dev/null +++ b/openstack/tests/unit/cloud/test_floating_ip_common.py @@ -0,0 +1,248 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_floating_ip_common +---------------------------------- + +Tests floating IP resource methods for Neutron and Nova-network. +""" + +from unittest.mock import patch + +from openstack.cloud import meta +from openstack.compute.v2 import server as _server +from openstack import connection +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestFloatingIP(base.TestCase): + @patch.object(connection.Connection, 'get_floating_ip') + @patch.object(connection.Connection, '_attach_ip_to_server') + @patch.object(connection.Connection, 'available_floating_ip') + def test_add_auto_ip( + self, + mock_available_floating_ip, + mock_attach_ip_to_server, + mock_get_floating_ip, + ): + server_dict = fakes.make_fake_server( + server_id='server-id', + name='test-server', + status="ACTIVE", + addresses={}, + ) + floating_ip_dict = { + "id": "this-is-a-floating-ip-id", + "fixed_ip_address": None, + "internal_network": None, + "floating_ip_address": "203.0.113.29", + "network": "this-is-a-net-or-pool-id", + "attached": False, + "status": "ACTIVE", + } + + mock_available_floating_ip.return_value = floating_ip_dict + + self.cloud.add_auto_ip(server=server_dict) + + mock_attach_ip_to_server.assert_called_with( + timeout=60, + wait=False, + server=server_dict, + floating_ip=floating_ip_dict, + skip_attach=False, + ) + + @patch.object(connection.Connection, '_add_ip_from_pool') + def test_add_ips_to_server_pool(self, mock_add_ip_from_pool): + server_dict = fakes.make_fake_server( + server_id='romeo', + name='test-server', + status="ACTIVE", + addresses={}, + ) + pool = 'nova' + + self.cloud.add_ips_to_server(server_dict, ip_pool=pool) + + mock_add_ip_from_pool.assert_called_with( + server_dict, + pool, + reuse=True, + wait=False, + timeout=60, + fixed_address=None, + nat_destination=None, + ) + + @patch.object(connection.Connection, 'has_service') + @patch.object(connection.Connection, 'get_floating_ip') + @patch.object(connection.Connection, '_add_auto_ip') + def test_add_ips_to_server_ipv6_only( + self, mock_add_auto_ip, mock_get_floating_ip, mock_has_service + ): + self.cloud._floating_ip_source = None + self.cloud.force_ipv4 = False + self.cloud._local_ipv6 = True + mock_has_service.return_value = False + server = fakes.make_fake_server( + server_id='server-id', + name='test-server', + status="ACTIVE", + addresses={ + 'private': [{'addr': "10.223.160.141", 'version': 4}], + 'public': [ + { + 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', + 'OS-EXT-IPS:type': 'fixed', + 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", + 'version': 6, + } + ], + }, + ) + server_dict = meta.add_server_interfaces( + self.cloud, _server.Server(**server) + ) + + new_server = self.cloud.add_ips_to_server(server=server_dict) + mock_get_floating_ip.assert_not_called() + mock_add_auto_ip.assert_not_called() + self.assertEqual( + new_server['interface_ip'], + '2001:4800:7819:103:be76:4eff:fe05:8525', + ) + self.assertEqual(new_server['private_v4'], '10.223.160.141') + self.assertEqual(new_server['public_v4'], '') + self.assertEqual( + new_server['public_v6'], '2001:4800:7819:103:be76:4eff:fe05:8525' + ) + + @patch.object(connection.Connection, 'has_service') + @patch.object(connection.Connection, 'get_floating_ip') + @patch.object(connection.Connection, '_add_auto_ip') + def test_add_ips_to_server_rackspace( + self, mock_add_auto_ip, mock_get_floating_ip, mock_has_service + ): + self.cloud._floating_ip_source = None + self.cloud.force_ipv4 = False + self.cloud._local_ipv6 = True + mock_has_service.return_value = False + server = fakes.make_fake_server( + server_id='server-id', + name='test-server', + status="ACTIVE", + addresses={ + 'private': [{'addr': "10.223.160.141", 'version': 4}], + 'public': [ + {'addr': "104.130.246.91", 'version': 4}, + { + 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", + 'version': 6, + }, + ], + }, + ) + server_dict = meta.add_server_interfaces( + self.cloud, _server.Server(**server) + ) + + new_server = self.cloud.add_ips_to_server(server=server_dict) + mock_get_floating_ip.assert_not_called() + mock_add_auto_ip.assert_not_called() + self.assertEqual( + new_server['interface_ip'], + '2001:4800:7819:103:be76:4eff:fe05:8525', + ) + + @patch.object(connection.Connection, 'has_service') + @patch.object(connection.Connection, 'get_floating_ip') + @patch.object(connection.Connection, '_add_auto_ip') + def test_add_ips_to_server_rackspace_local_ipv4( + self, mock_add_auto_ip, mock_get_floating_ip, mock_has_service + ): + self.cloud._floating_ip_source = None + self.cloud.force_ipv4 = False + self.cloud._local_ipv6 = False + mock_has_service.return_value = False + server = fakes.make_fake_server( + server_id='server-id', + name='test-server', + status="ACTIVE", + addresses={ + 'private': [{'addr': "10.223.160.141", 'version': 4}], + 'public': [ + {'addr': "104.130.246.91", 'version': 4}, + { + 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", + 'version': 6, + }, + ], + }, + ) + server_dict = meta.add_server_interfaces( + self.cloud, _server.Server(**server) + ) + + new_server = self.cloud.add_ips_to_server(server=server_dict) + mock_get_floating_ip.assert_not_called() + mock_add_auto_ip.assert_not_called() + self.assertEqual(new_server['interface_ip'], '104.130.246.91') + + @patch.object(connection.Connection, 'add_ip_list') + def test_add_ips_to_server_ip_list(self, mock_add_ip_list): + server_dict = fakes.make_fake_server( + server_id='server-id', + name='test-server', + status="ACTIVE", + addresses={}, + ) + ips = ['203.0.113.29', '172.24.4.229'] + + self.cloud.add_ips_to_server(server_dict, ips=ips) + + mock_add_ip_list.assert_called_with( + server_dict, + ips, + wait=False, + timeout=60, + fixed_address=None, + nat_destination=None, + ) + + @patch.object(connection.Connection, '_needs_floating_ip') + @patch.object(connection.Connection, '_add_auto_ip') + def test_add_ips_to_server_auto_ip( + self, mock_add_auto_ip, mock_needs_floating_ip + ): + server_dict = fakes.make_fake_server( + server_id='server-id', + name='test-server', + status="ACTIVE", + addresses={}, + ) + + # TODO(mordred) REMOVE THIS MOCK WHEN THE NEXT PATCH LANDS + # SERIOUSLY THIS TIME. NEXT PATCH - WHICH SHOULD ADD MOCKS FOR + # list_ports AND list_networks AND list_subnets. BUT THAT WOULD + # BE NOT ACTUALLY RELATED TO THIS PATCH. SO DO IT NEXT PATCH + mock_needs_floating_ip.return_value = True + + self.cloud.add_ips_to_server(server_dict) + + mock_add_auto_ip.assert_called_with( + server_dict, wait=False, timeout=60, reuse=True + ) diff --git a/openstack/tests/unit/cloud/test_floating_ip_neutron.py b/openstack/tests/unit/cloud/test_floating_ip_neutron.py new file mode 100644 index 0000000000..da90b6de8d --- /dev/null +++ b/openstack/tests/unit/cloud/test_floating_ip_neutron.py @@ -0,0 +1,1466 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_floating_ip_neutron +---------------------------------- + +Tests Floating IP resource methods for Neutron +""" + +import copy +import datetime + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base +from openstack import utils + + +class TestFloatingIP(base.TestCase): + mock_floating_ip_list_rep = { + 'floatingips': [ + { + 'router_id': 'd23abc8d-2991-4a55-ba98-2aaea84cc72f', + 'tenant_id': '4969c491a3c74ee4af974e6d800c62de', + 'floating_network_id': '376da547-b977-4cfe-9cba-275c80debf57', + 'fixed_ip_address': '10.0.0.4', + 'floating_ip_address': '172.24.4.229', + 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', + 'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda7', + 'status': 'ACTIVE', + }, + { + 'router_id': None, + 'tenant_id': '4969c491a3c74ee4af974e6d800c62de', + 'floating_network_id': '376da547-b977-4cfe-9cba-275c80debf57', + 'fixed_ip_address': None, + 'floating_ip_address': '203.0.113.30', + 'port_id': None, + 'id': '61cea855-49cb-4846-997d-801b70c71bdd', + 'status': 'DOWN', + }, + ] + } + + mock_floating_ip_new_rep = { + 'floatingip': { + 'fixed_ip_address': '10.0.0.4', + 'floating_ip_address': '172.24.4.229', + 'floating_network_id': 'my-network-id', + 'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8', + 'port_id': None, + 'router_id': None, + 'status': 'ACTIVE', + 'tenant_id': '4969c491a3c74ee4af974e6d800c62df', + } + } + + mock_floating_ip_port_rep = { + 'floatingip': { + 'fixed_ip_address': '10.0.0.4', + 'floating_ip_address': '172.24.4.229', + 'floating_network_id': 'my-network-id', + 'id': '2f245a7b-796b-4f26-9cf9-9e82d248fda8', + 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', + 'router_id': None, + 'status': 'ACTIVE', + 'tenant_id': '4969c491a3c74ee4af974e6d800c62df', + } + } + + mock_get_network_rep = { + 'status': 'ACTIVE', + 'subnets': ['54d6f61d-db07-451c-9ab3-b9609b6b6f0b'], + 'name': 'my-network', + 'provider:physical_network': None, + 'admin_state_up': True, + 'tenant_id': '4fd44f30292945e481c7b8a0c8908869', + 'provider:network_type': 'local', + 'router:external': True, + 'shared': True, + 'id': 'my-network-id', + 'provider:segmentation_id': None, + } + + mock_search_ports_rep = [ + { + 'status': 'ACTIVE', + 'binding:host_id': 'devstack', + 'name': 'first-port', + 'created_at': datetime.datetime.now().isoformat(), + 'allowed_address_pairs': [], + 'admin_state_up': True, + 'network_id': '70c1db1f-b701-45bd-96e0-a313ee3430b3', + 'tenant_id': '', + 'extra_dhcp_opts': [], + 'binding:vif_details': { + 'port_filter': True, + 'ovs_hybrid_plug': True, + }, + 'binding:vif_type': 'ovs', + 'device_owner': 'compute:None', + 'mac_address': 'fa:16:3e:58:42:ed', + 'binding:profile': {}, + 'binding:vnic_type': 'normal', + 'fixed_ips': [ + { + 'subnet_id': '008ba151-0b8c-4a67-98b5-0d2b87666062', + 'ip_address': '172.24.4.2', + } + ], + 'id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', + 'security_groups': [], + 'device_id': 'server-id', + } + ] + + def assertAreInstances(self, elements, elem_type): + for e in elements: + self.assertIsInstance(e, elem_type) + + def setUp(self): + super().setUp() + + self.fake_server = fakes.make_fake_server( + 'server-id', + '', + 'ACTIVE', + addresses={ + 'test_pnztt_net': [ + { + 'OS-EXT-IPS:type': 'fixed', + 'addr': '192.0.2.129', + 'version': 4, + 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', + } + ] + }, + ) + self.floating_ip = self.mock_floating_ip_list_rep['floatingips'][0] + + def test_list_floating_ips(self): + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/floatingips', + json=self.mock_floating_ip_list_rep, + ) + ] + ) + + floating_ips = self.cloud.list_floating_ips() + + self.assertIsInstance(floating_ips, list) + self.assertAreInstances(floating_ips, dict) + self.assertEqual(2, len(floating_ips)) + + self.assert_calls() + + def test_list_floating_ips_with_filters(self): + self.register_uris( + [ + dict( + method='GET', + uri=( + 'https://network.example.com/v2.0/floatingips?' + 'description=42' + ), + json={'floatingips': []}, + ) + ] + ) + + self.cloud.list_floating_ips(filters={'description': 42}) + + self.assert_calls() + + def test_search_floating_ips(self): + self.register_uris( + [ + dict( + method='GET', + uri=('https://network.example.com/v2.0/floatingips'), + json=self.mock_floating_ip_list_rep, + ) + ] + ) + + floating_ips = self.cloud.search_floating_ips( + filters={'updated_at': 'never'} + ) + + self.assertIsInstance(floating_ips, list) + self.assertAreInstances(floating_ips, dict) + self.assertEqual(0, len(floating_ips)) + self.assert_calls() + + def test_get_floating_ip(self): + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/floatingips', + json=self.mock_floating_ip_list_rep, + ) + ] + ) + + floating_ip = self.cloud.get_floating_ip( + id='2f245a7b-796b-4f26-9cf9-9e82d248fda7' + ) + + self.assertIsInstance(floating_ip, dict) + self.assertEqual('172.24.4.229', floating_ip['floating_ip_address']) + self.assertEqual( + self.mock_floating_ip_list_rep['floatingips'][0]['tenant_id'], + floating_ip['project_id'], + ) + self.assertEqual( + self.mock_floating_ip_list_rep['floatingips'][0]['tenant_id'], + floating_ip['tenant_id'], + ) + self.assertIn('location', floating_ip) + self.assert_calls() + + def test_get_floating_ip_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/floatingips', + json=self.mock_floating_ip_list_rep, + ) + ] + ) + + floating_ip = self.cloud.get_floating_ip(id='non-existent') + + self.assertIsNone(floating_ip) + self.assert_calls() + + def test_get_floating_ip_by_id(self): + fid = self.mock_floating_ip_new_rep['floatingip']['id'] + self.register_uris( + [ + dict( + method='GET', + uri=f'https://network.example.com/v2.0/floatingips/{fid}', + json=self.mock_floating_ip_new_rep, + ) + ] + ) + + floating_ip = self.cloud.get_floating_ip_by_id(id=fid) + + self.assertIsInstance(floating_ip, dict) + self.assertEqual('172.24.4.229', floating_ip['floating_ip_address']) + self.assertEqual( + self.mock_floating_ip_new_rep['floatingip']['tenant_id'], + floating_ip['project_id'], + ) + self.assertEqual( + self.mock_floating_ip_new_rep['floatingip']['tenant_id'], + floating_ip['tenant_id'], + ) + self.assertIn('location', floating_ip) + self.assert_calls() + + def test_create_floating_ip(self): + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks/my-network', + status_code=404, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/networks' + '?name=my-network', + json={'networks': [self.mock_get_network_rep]}, + ), + dict( + method='POST', + uri='https://network.example.com/v2.0/floatingips', + json=self.mock_floating_ip_new_rep, + validate=dict( + json={ + 'floatingip': { + 'floating_network_id': 'my-network-id' + } + } + ), + ), + ] + ) + ip = self.cloud.create_floating_ip(network='my-network') + + self.assertEqual( + self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'], + ip['floating_ip_address'], + ) + self.assert_calls() + + def test_create_floating_ip_port_bad_response(self): + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks/my-network', + json=self.mock_get_network_rep, + ), + dict( + method='POST', + uri='https://network.example.com/v2.0/floatingips', + json=self.mock_floating_ip_new_rep, + validate=dict( + json={ + 'floatingip': { + 'floating_network_id': 'my-network-id', + 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ab', # noqa: E501 + } + } + ), + ), + ] + ) + + # Fails because we requested a port and the returned FIP has no port + self.assertRaises( + exceptions.SDKException, + self.cloud.create_floating_ip, + network='my-network', + port='ce705c24-c1ef-408a-bda3-7bbd946164ab', + ) + self.assert_calls() + + def test_create_floating_ip_port(self): + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks/my-network', + status_code=404, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/networks' + '?name=my-network', + json={'networks': [self.mock_get_network_rep]}, + ), + dict( + method='POST', + uri='https://network.example.com/v2.0/floatingips', + json=self.mock_floating_ip_port_rep, + validate=dict( + json={ + 'floatingip': { + 'floating_network_id': 'my-network-id', + 'port_id': 'ce705c24-c1ef-408a-bda3-7bbd946164ac', # noqa: E501 + } + } + ), + ), + ] + ) + + ip = self.cloud.create_floating_ip( + network='my-network', port='ce705c24-c1ef-408a-bda3-7bbd946164ac' + ) + + self.assertEqual( + self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'], + ip['floating_ip_address'], + ) + self.assert_calls() + + def test_neutron_available_floating_ips(self): + """ + Test without specifying a network name. + """ + fips_mock_uri = 'https://network.example.com/v2.0/floatingips' + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={'networks': [self.mock_get_network_rep]}, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': []}, + ), + dict( + method='GET', uri=fips_mock_uri, json={'floatingips': []} + ), + dict( + method='POST', + uri=fips_mock_uri, + json=self.mock_floating_ip_new_rep, + validate=dict( + json={ + 'floatingip': { + 'floating_network_id': self.mock_get_network_rep[ # noqa: E501 + 'id' + ] + } + } + ), + ), + ] + ) + + # Test if first network is selected if no network is given + self.cloud._neutron_available_floating_ips() + self.assert_calls() + + def test_neutron_available_floating_ips_network(self): + """ + Test with specifying a network name. + """ + fips_mock_uri = 'https://network.example.com/v2.0/floatingips' + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={'networks': [self.mock_get_network_rep]}, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': []}, + ), + dict( + method='GET', uri=fips_mock_uri, json={'floatingips': []} + ), + dict( + method='POST', + uri=fips_mock_uri, + json=self.mock_floating_ip_new_rep, + validate=dict( + json={ + 'floatingip': { + 'floating_network_id': self.mock_get_network_rep[ # noqa: E501 + 'id' + ] + } + } + ), + ), + ] + ) + + # Test if first network is selected if no network is given + self.cloud._neutron_available_floating_ips( + network=self.mock_get_network_rep['name'] + ) + self.assert_calls() + + def test_neutron_available_floating_ips_invalid_network(self): + """ + Test with an invalid network name. + """ + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={'networks': [self.mock_get_network_rep]}, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': []}, + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud._neutron_available_floating_ips, + network='INVALID', + ) + + self.assert_calls() + + def test_auto_ip_pool_no_reuse(self): + server_id = 'f80e3ad0-e13e-41d4-8e9c-be79bccdb8f7' + # payloads taken from citycloud + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks/ext-net', + status_code=404, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/networks?name=ext-net', + json={ + "networks": [ + { + "status": "ACTIVE", + "subnets": [ + "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", + "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", + "fc541f48-fc7f-48c0-a063-18de6ee7bdd7", + ], + "availability_zone_hints": [], + "availability_zones": ["nova"], + "name": "ext-net", + "admin_state_up": True, + "tenant_id": "a564613210ee43708b8a7fc6274ebd63", # noqa: E501 + "tags": [], + "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa: E501 + "mtu": 0, + "is_default": False, + "router:external": True, + "ipv4_address_scope": None, + "shared": False, + "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", + "description": None, + } + ] + }, + ), + dict( + method='GET', + uri=f'https://network.example.com/v2.0/ports?device_id={server_id}', + json={ + "ports": [ + { + "status": "ACTIVE", + "created_at": "2017-02-06T20:59:45", + "description": "", + "allowed_address_pairs": [], + "admin_state_up": True, + "network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", # noqa: E501 + "dns_name": None, + "extra_dhcp_opts": [], + "mac_address": "fa:16:3e:e8:7f:03", + "updated_at": "2017-02-06T20:59:49", + "name": "", + "device_owner": "compute:None", + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", # noqa: E501 + "binding:vnic_type": "normal", + "fixed_ips": [ + { + "subnet_id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9", # noqa: E501 + "ip_address": "10.4.0.16", + } + ], + "id": "a767944e-057a-47d1-a669-824a21b8fb7b", + "security_groups": [ + "9fb5ba44-5c46-4357-8e60-8b55526cab54" + ], + "device_id": server_id, + } + ] + }, + ), + dict( + method='POST', + uri='https://network.example.com/v2.0/floatingips', + json={ + "floatingip": { + "router_id": "9de9c787-8f89-4a53-8468-a5533d6d7fd1", # noqa: E501 + "status": "DOWN", + "description": "", + "dns_domain": "", + "floating_network_id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", # noqa: E501 + "fixed_ip_address": "10.4.0.16", + "floating_ip_address": "89.40.216.153", + "port_id": "a767944e-057a-47d1-a669-824a21b8fb7b", + "id": "e69179dc-a904-4c9a-a4c9-891e2ecb984c", + "dns_name": "", + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", + } + }, + validate=dict( + json={ + "floatingip": { + "floating_network_id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", # noqa: E501 + "fixed_ip_address": "10.4.0.16", + "port_id": "a767944e-057a-47d1-a669-824a21b8fb7b", # noqa: E501 + } + } + ), + ), + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=f'https://compute.example.com/v2.1/servers/{server_id}', + json={ + "server": { + "status": "ACTIVE", + "updated": "2017-02-06T20:59:49Z", + "addresses": { + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03", # noqa: E501 + "version": 4, + "addr": "10.4.0.16", + "OS-EXT-IPS:type": "fixed", + }, + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03", # noqa: E501 + "version": 4, + "addr": "89.40.216.153", + "OS-EXT-IPS:type": "floating", + }, + ] + }, + "key_name": None, + "image": { + "id": "95e4c449-8abf-486e-97d9-dc3f82417d2d" + }, + "OS-EXT-STS:task_state": None, + "OS-EXT-STS:vm_state": "active", + "OS-SRV-USG:launched_at": "2017-02-06T20:59:48.000000", # noqa: E501 + "flavor": { + "id": "2186bd79-a05e-4953-9dde-ddefb63c88d4" + }, + "id": server_id, + "security_groups": [{"name": "default"}], + "OS-SRV-USG:terminated_at": None, + "OS-EXT-AZ:availability_zone": "nova", + "user_id": "c17534835f8f42bf98fc367e0bf35e09", + "name": "testmt", + "created": "2017-02-06T20:59:44Z", + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", + "OS-DCF:diskConfig": "MANUAL", + "os-extended-volumes:volumes_attached": [], + "accessIPv4": "", + "accessIPv6": "", + "progress": 0, + "OS-EXT-STS:power_state": 1, + "config_drive": "", + "metadata": {}, + } + }, + ), + ] + ) + + self.cloud.add_ips_to_server( + utils.Munch( + id=server_id, + addresses={ + "private": [ + { + "OS-EXT-IPS-MAC:mac_addr": "fa:16:3e:e8:7f:03", + "version": 4, + "addr": "10.4.0.16", + "OS-EXT-IPS:type": "fixed", + } + ] + }, + ), + ip_pool='ext-net', + reuse=False, + ) + + self.assert_calls() + + def test_available_floating_ip_new(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': [self.mock_get_network_rep]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnets': []}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': []}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + validate=dict( + json={ + 'floatingip': { + 'floating_network_id': 'my-network-id' + } + } + ), + json=self.mock_floating_ip_new_rep, + ), + ] + ) + + ip = self.cloud.available_floating_ip(network='my-network') + + self.assertEqual( + self.mock_floating_ip_new_rep['floatingip']['floating_ip_address'], + ip['floating_ip_address'], + ) + self.assert_calls() + + def test_delete_floating_ip_existing(self): + fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7' + fake_fip = { + 'id': fip_id, + 'floating_ip_address': '172.99.106.167', + 'status': 'ACTIVE', + } + self.register_uris( + [ + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', f'floatingips/{fip_id}'], + ), + json={}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': [fake_fip]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', f'floatingips/{fip_id}'], + ), + json={}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': [fake_fip]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', f'floatingips/{fip_id}'], + ), + json={}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': []}, + ), + ] + ) + + self.assertTrue( + self.cloud.delete_floating_ip(floating_ip_id=fip_id, retry=2) + ) + self.assert_calls() + + def test_delete_floating_ip_existing_down(self): + fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7' + fake_fip = { + 'id': fip_id, + 'floating_ip_address': '172.99.106.167', + 'status': 'ACTIVE', + } + down_fip = { + 'id': fip_id, + 'floating_ip_address': '172.99.106.167', + 'status': 'DOWN', + } + self.register_uris( + [ + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', f'floatingips/{fip_id}'], + ), + json={}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': [fake_fip]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', f'floatingips/{fip_id}'], + ), + json={}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': [down_fip]}, + ), + ] + ) + + self.assertTrue( + self.cloud.delete_floating_ip(floating_ip_id=fip_id, retry=2) + ) + self.assert_calls() + + def test_delete_floating_ip_existing_no_delete(self): + fip_id = '2f245a7b-796b-4f26-9cf9-9e82d248fda7' + fake_fip = { + 'id': fip_id, + 'floating_ip_address': '172.99.106.167', + 'status': 'ACTIVE', + } + self.register_uris( + [ + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', f'floatingips/{fip_id}'], + ), + json={}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': [fake_fip]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', f'floatingips/{fip_id}'], + ), + json={}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': [fake_fip]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', f'floatingips/{fip_id}'], + ), + json={}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': [fake_fip]}, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.delete_floating_ip, + floating_ip_id=fip_id, + retry=2, + ) + self.assert_calls() + + def test_delete_floating_ip_not_found(self): + self.register_uris( + [ + dict( + method='DELETE', + uri=( + 'https://network.example.com/v2.0/floatingips/' + 'a-wild-id-appears' + ), + status_code=404, + ) + ] + ) + + ret = self.cloud.delete_floating_ip(floating_ip_id='a-wild-id-appears') + + self.assertFalse(ret) + self.assert_calls() + + def test_attach_ip_to_server(self): + fip = self.mock_floating_ip_list_rep['floatingips'][0].copy() + fip.update({'status': 'DOWN', 'port_id': None, 'router_id': None}) + device_id = self.fake_server['id'] + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports'], + qs_elements=[f"device_id={device_id}"], + ), + json={'ports': self.mock_search_ports_rep}, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'floatingips/{}'.format(fip['id'])], + ), + json={ + 'floatingip': self.mock_floating_ip_list_rep[ + 'floatingips' + ][0] + }, + validate=dict( + json={ + 'floatingip': { + 'port_id': self.mock_search_ports_rep[0]['id'], + 'fixed_ip_address': self.mock_search_ports_rep[ + 0 + ]['fixed_ips'][0]['ip_address'], + } + } + ), + ), + ] + ) + + self.cloud._attach_ip_to_server( + server=self.fake_server, + floating_ip=self.cloud._normalize_floating_ip(fip), + ) + self.assert_calls() + + def test_detach_ip_from_server(self): + fip = self.mock_floating_ip_new_rep['floatingip'] + attached_fip = copy.copy(fip) + attached_fip['port_id'] = 'server-port-id' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': [attached_fip]}, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'floatingips/{}'.format(fip['id'])], + ), + json={'floatingip': fip}, + validate=dict(json={'floatingip': {'port_id': None}}), + ), + ] + ) + self.cloud.detach_ip_from_server( + server_id='server-id', floating_ip_id=fip['id'] + ) + self.assert_calls() + + def test_add_ip_from_pool(self): + network = self.mock_get_network_rep + fip = self.mock_floating_ip_new_rep['floatingip'] + fixed_ip = self.mock_search_ports_rep[0]['fixed_ips'][0]['ip_address'] + port_id = self.mock_search_ports_rep[0]['id'] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': [network]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnets': []}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': [fip]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingip': fip}, + validate=dict( + json={ + 'floatingip': { + 'floating_network_id': network['id'] + } + } + ), + ), + dict( + method="GET", + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports'], + qs_elements=[ + "device_id={}".format(self.fake_server['id']) + ], + ), + json={'ports': self.mock_search_ports_rep}, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'floatingips/{}'.format(fip['id'])], + ), + json={'floatingip': fip}, + validate=dict( + json={ + 'floatingip': { + 'fixed_ip_address': fixed_ip, + 'port_id': port_id, + } + } + ), + ), + ] + ) + + server = self.cloud._add_ip_from_pool( + server=self.fake_server, + network=network['id'], + fixed_address=fixed_ip, + ) + + self.assertEqual(server, self.fake_server) + self.assert_calls() + + def test_cleanup_floating_ips(self): + floating_ips = [ + { + "id": "this-is-a-floating-ip-id", + "fixed_ip_address": None, + "internal_network": None, + "floating_ip_address": "203.0.113.29", + "network": "this-is-a-net-or-pool-id", + "port_id": None, + "status": "ACTIVE", + }, + { + "id": "this-is-a-second-floating-ip-id", + "fixed_ip_address": None, + "internal_network": None, + "floating_ip_address": "203.0.113.30", + "network": "this-is-a-net-or-pool-id", + "port_id": None, + "status": "ACTIVE", + }, + { + "id": "this-is-an-attached-floating-ip-id", + "fixed_ip_address": None, + "internal_network": None, + "floating_ip_address": "203.0.113.29", + "network": "this-is-a-net-or-pool-id", + "attached": True, + "port_id": "this-is-id-of-port-with-fip", + "status": "ACTIVE", + }, + ] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': floating_ips}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'floatingips/{}'.format(floating_ips[0]['id']), + ], + ), + json={}, + ), + # First IP has been deleted now, return just the second + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': floating_ips[1:]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'floatingips/{}'.format(floating_ips[1]['id']), + ], + ), + json={}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingips': [floating_ips[2]]}, + ), + ] + ) + cleaned_up = self.cloud.delete_unattached_floating_ips() + self.assertEqual(cleaned_up, 2) + self.assert_calls() + + def test_create_floating_ip_no_port(self): + server_port = { + "id": "port-id", + "device_id": "some-server", + 'created_at': datetime.datetime.now().isoformat(), + 'fixed_ips': [ + {'subnet_id': 'subnet-id', 'ip_address': '172.24.4.2'} + ], + } + floating_ip = {"id": "floating-ip-id", "port_id": None} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': [self.mock_get_network_rep]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnets': []}, + ), + dict( + method="GET", + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports'], + qs_elements=['device_id=some-server'], + ), + json={'ports': [server_port]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'floatingips'] + ), + json={'floatingip': floating_ip}, + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud._neutron_create_floating_ip, + server=dict(id='some-server'), + ) + self.assert_calls() + + def test_find_nat_source_inferred(self): + # payloads contrived but based on ones from citycloud + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={ + "networks": [ + { + "status": "ACTIVE", + "subnets": [ + "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", + "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", + "fc541f48-fc7f-48c0-a063-18de6ee7bdd7", + ], + "availability_zone_hints": [], + "availability_zones": ["nova"], + "name": "ext-net", + "admin_state_up": True, + "tenant_id": "a564613210ee43708b8a7fc6274ebd63", # noqa: E501 + "tags": [], + "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa: E501 + "mtu": 0, + "is_default": False, + "router:external": True, + "ipv4_address_scope": None, + "shared": False, + "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", + "description": None, + }, + { + "status": "ACTIVE", + "subnets": [ + "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", + "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", + "fc541f48-fc7f-48c0-a063-18de6ee7bdd7", + ], + "availability_zone_hints": [], + "availability_zones": ["nova"], + "name": "my-network", + "admin_state_up": True, + "tenant_id": "a564613210ee43708b8a7fc6274ebd63", # noqa: E501 + "tags": [], + "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa: E501 + "mtu": 0, + "is_default": False, + "router:external": True, + "ipv4_address_scope": None, + "shared": False, + "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebg", + "description": None, + }, + { + "status": "ACTIVE", + "subnets": [ + "f0ad1df5-53ee-473f-b86b-3604ea5591e9" + ], + "availability_zone_hints": [], + "availability_zones": ["nova"], + "name": "private", + "admin_state_up": True, + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", # noqa: E501 + "created_at": "2016-10-22T13:46:26", + "tags": [], + "updated_at": "2016-10-22T13:46:26", + "ipv6_address_scope": None, + "router:external": False, + "ipv4_address_scope": None, + "shared": False, + "mtu": 1450, + "id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", + "description": "", + }, + ] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={ + "subnets": [ + { + "description": "", + "enable_dhcp": True, + "network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", # noqa: E501 + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", # noqa: E501 + "created_at": "2016-10-22T13:46:26", + "dns_nameservers": [ + "89.36.90.101", + "89.36.90.102", + ], + "updated_at": "2016-10-22T13:46:26", + "gateway_ip": "10.4.0.1", + "ipv6_ra_mode": None, + "allocation_pools": [ + {"start": "10.4.0.2", "end": "10.4.0.200"} + ], + "host_routes": [], + "ip_version": 4, + "ipv6_address_mode": None, + "cidr": "10.4.0.0/24", + "id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9", + "subnetpool_id": None, + "name": "private-subnet-ipv4", + } + ] + }, + ), + ] + ) + + self.assertEqual('ext-net', self.cloud.get_nat_source()['name']) + + self.assert_calls() + + def test_find_nat_source_config(self): + self.cloud._nat_source = 'my-network' + + # payloads contrived but based on ones from citycloud + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={ + "networks": [ + { + "status": "ACTIVE", + "subnets": [ + "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", + "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", + "fc541f48-fc7f-48c0-a063-18de6ee7bdd7", + ], + "availability_zone_hints": [], + "availability_zones": ["nova"], + "name": "ext-net", + "admin_state_up": True, + "tenant_id": "a564613210ee43708b8a7fc6274ebd63", # noqa: E501 + "tags": [], + "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa: E501 + "mtu": 0, + "is_default": False, + "router:external": True, + "ipv4_address_scope": None, + "shared": False, + "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebf", + "description": None, + }, + { + "status": "ACTIVE", + "subnets": [ + "df3e17fa-a4b2-47ae-9015-bc93eb076ba2", + "6b0c3dc9-b0b8-4d87-976a-7f2ebf13e7ec", + "fc541f48-fc7f-48c0-a063-18de6ee7bdd7", + ], + "availability_zone_hints": [], + "availability_zones": ["nova"], + "name": "my-network", + "admin_state_up": True, + "tenant_id": "a564613210ee43708b8a7fc6274ebd63", # noqa: E501 + "tags": [], + "ipv6_address_scope": "9f03124f-89af-483a-b6fd-10f08079db4d", # noqa: E501 + "mtu": 0, + "is_default": False, + "router:external": True, + "ipv4_address_scope": None, + "shared": False, + "id": "0232c17f-2096-49bc-b205-d3dcd9a30ebg", + "description": None, + }, + { + "status": "ACTIVE", + "subnets": [ + "f0ad1df5-53ee-473f-b86b-3604ea5591e9" + ], + "availability_zone_hints": [], + "availability_zones": ["nova"], + "name": "private", + "admin_state_up": True, + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", # noqa: E501 + "created_at": "2016-10-22T13:46:26", + "tags": [], + "updated_at": "2016-10-22T13:46:26", + "ipv6_address_scope": None, + "router:external": False, + "ipv4_address_scope": None, + "shared": False, + "mtu": 1450, + "id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", + "description": "", + }, + ] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={ + "subnets": [ + { + "description": "", + "enable_dhcp": True, + "network_id": "2c9adcb5-c123-4c5a-a2ba-1ad4c4e1481f", # noqa: E501 + "tenant_id": "65222a4d09ea4c68934fa1028c77f394", # noqa: E501 + "created_at": "2016-10-22T13:46:26", + "dns_nameservers": [ + "89.36.90.101", + "89.36.90.102", + ], + "updated_at": "2016-10-22T13:46:26", + "gateway_ip": "10.4.0.1", + "ipv6_ra_mode": None, + "allocation_pools": [ + {"start": "10.4.0.2", "end": "10.4.0.200"} + ], + "host_routes": [], + "ip_version": 4, + "ipv6_address_mode": None, + "cidr": "10.4.0.0/24", + "id": "f0ad1df5-53ee-473f-b86b-3604ea5591e9", + "subnetpool_id": None, + "name": "private-subnet-ipv4", + } + ] + }, + ), + ] + ) + + self.assertEqual('my-network', self.cloud.get_nat_source()['name']) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_floating_ip_nova.py b/openstack/tests/unit/cloud/test_floating_ip_nova.py new file mode 100644 index 0000000000..a1a1d390a3 --- /dev/null +++ b/openstack/tests/unit/cloud/test_floating_ip_nova.py @@ -0,0 +1,440 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_floating_ip_nova +---------------------------------- + +Tests Floating IP resource methods for nova-network +""" + +from openstack.tests import fakes +from openstack.tests.unit import base + + +def get_fake_has_service(has_service): + def fake_has_service(s): + if s == 'network': + return False + return has_service(s) + + return fake_has_service + + +class TestFloatingIP(base.TestCase): + mock_floating_ip_list_rep = [ + { + 'fixed_ip': None, + 'id': 1, + 'instance_id': None, + 'ip': '203.0.113.1', + 'pool': 'nova', + }, + { + 'fixed_ip': None, + 'id': 2, + 'instance_id': None, + 'ip': '203.0.113.2', + 'pool': 'nova', + }, + { + 'fixed_ip': '192.0.2.3', + 'id': 29, + 'instance_id': 'myself', + 'ip': '198.51.100.29', + 'pool': 'black_hole', + }, + ] + + mock_floating_ip_pools = [ + {'id': 'pool1_id', 'name': 'nova'}, + {'id': 'pool2_id', 'name': 'pool2'}, + ] + + def assertAreInstances(self, elements, elem_type): + for e in elements: + self.assertIsInstance(e, elem_type) + + def setUp(self): + super().setUp() + + self.fake_server = fakes.make_fake_server( + 'server-id', + '', + 'ACTIVE', + addresses={ + 'test_pnztt_net': [ + { + 'OS-EXT-IPS:type': 'fixed', + 'addr': '192.0.2.129', + 'version': 4, + 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', + } + ] + }, + ) + + self.cloud.has_service = get_fake_has_service(self.cloud.has_service) + + def test_list_floating_ips(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ips': self.mock_floating_ip_list_rep}, + ), + ] + ) + floating_ips = self.cloud.list_floating_ips() + + self.assertIsInstance(floating_ips, list) + self.assertEqual(3, len(floating_ips)) + self.assertAreInstances(floating_ips, dict) + + self.assert_calls() + + def test_list_floating_ips_with_filters(self): + self.assertRaisesRegex( + ValueError, + "nova-network doesn't support server-side floating IPs filtering. " + "Use the 'search_floating_ips' method instead", + self.cloud.list_floating_ips, + filters={'Foo': 42}, + ) + + def test_search_floating_ips(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ips': self.mock_floating_ip_list_rep}, + ), + ] + ) + + floating_ips = self.cloud.search_floating_ips( + filters={'attached': False} + ) + + self.assertIsInstance(floating_ips, list) + self.assertEqual(2, len(floating_ips)) + self.assertAreInstances(floating_ips, dict) + + self.assert_calls() + + def test_get_floating_ip(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ips': self.mock_floating_ip_list_rep}, + ), + ] + ) + + floating_ip = self.cloud.get_floating_ip(id='29') + + self.assertIsInstance(floating_ip, dict) + self.assertEqual('198.51.100.29', floating_ip['floating_ip_address']) + + self.assert_calls() + + def test_get_floating_ip_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ips': self.mock_floating_ip_list_rep}, + ), + ] + ) + + floating_ip = self.cloud.get_floating_ip(id='666') + + self.assertIsNone(floating_ip) + + self.assert_calls() + + def test_get_floating_ip_by_id(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips', '1'] + ), + json={'floating_ip': self.mock_floating_ip_list_rep[0]}, + ), + ] + ) + + floating_ip = self.cloud.get_floating_ip_by_id(id='1') + + self.assertIsInstance(floating_ip, dict) + self.assertEqual('203.0.113.1', floating_ip['floating_ip_address']) + self.assert_calls() + + def test_create_floating_ip(self): + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ip': self.mock_floating_ip_list_rep[1]}, + validate=dict(json={'pool': 'nova'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips', '2'] + ), + json={'floating_ip': self.mock_floating_ip_list_rep[1]}, + ), + ] + ) + + self.cloud.create_floating_ip(network='nova') + + self.assert_calls() + + def test_available_floating_ip_existing(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ips': self.mock_floating_ip_list_rep[:1]}, + ), + ] + ) + + ip = self.cloud.available_floating_ip(network='nova') + + self.assertEqual( + self.mock_floating_ip_list_rep[0]['ip'], ip['floating_ip_address'] + ) + self.assert_calls() + + def test_available_floating_ip_new(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ips': []}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ip': self.mock_floating_ip_list_rep[0]}, + validate=dict(json={'pool': 'nova'}), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips', '1'] + ), + json={'floating_ip': self.mock_floating_ip_list_rep[0]}, + ), + ] + ) + + ip = self.cloud.available_floating_ip(network='nova') + + self.assertEqual( + self.mock_floating_ip_list_rep[0]['ip'], ip['floating_ip_address'] + ) + self.assert_calls() + + def test_delete_floating_ip_existing(self): + self.register_uris( + [ + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', + append=['os-floating-ips', 'a-wild-id-appears'], + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ips': []}, + ), + ] + ) + + ret = self.cloud.delete_floating_ip(floating_ip_id='a-wild-id-appears') + + self.assertTrue(ret) + self.assert_calls() + + def test_delete_floating_ip_not_found(self): + self.register_uris( + [ + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', + append=['os-floating-ips', 'a-wild-id-appears'], + ), + status_code=404, + ), + ] + ) + + ret = self.cloud.delete_floating_ip(floating_ip_id='a-wild-id-appears') + + self.assertFalse(ret) + self.assert_calls() + + def test_attach_ip_to_server(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ips': self.mock_floating_ip_list_rep}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + append=['servers', self.fake_server['id'], 'action'], + ), + validate=dict( + json={ + "addFloatingIp": { + "address": "203.0.113.1", + "fixed_address": "192.0.2.129", + } + } + ), + ), + ] + ) + + self.cloud._attach_ip_to_server( + server=self.fake_server, + floating_ip=self.cloud._normalize_floating_ip( + self.mock_floating_ip_list_rep[0] + ), + fixed_address='192.0.2.129', + ) + + self.assert_calls() + + def test_detach_ip_from_server(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ips': self.mock_floating_ip_list_rep}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + append=['servers', self.fake_server['id'], 'action'], + ), + validate=dict( + json={ + "removeFloatingIp": { + "address": "203.0.113.1", + } + } + ), + ), + ] + ) + + self.cloud.detach_ip_from_server( + server_id='server-id', floating_ip_id=1 + ) + self.assert_calls() + + def test_add_ip_from_pool(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ips': self.mock_floating_ip_list_rep}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', append=['os-floating-ips'] + ), + json={'floating_ips': self.mock_floating_ip_list_rep}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + append=['servers', self.fake_server['id'], 'action'], + ), + validate=dict( + json={ + "addFloatingIp": { + "address": "203.0.113.1", + "fixed_address": "192.0.2.129", + } + } + ), + ), + ] + ) + + server = self.cloud._add_ip_from_pool( + server=self.fake_server, + network='nova', + fixed_address='192.0.2.129', + ) + + self.assertEqual(server, self.fake_server) + self.assert_calls() + + def test_cleanup_floating_ips(self): + # This should not call anything because it's unsafe on nova. + self.assertFalse(self.cloud.delete_unattached_floating_ips()) diff --git a/openstack/tests/unit/cloud/test_floating_ip_pool.py b/openstack/tests/unit/cloud/test_floating_ip_pool.py new file mode 100644 index 0000000000..3f424c4190 --- /dev/null +++ b/openstack/tests/unit/cloud/test_floating_ip_pool.py @@ -0,0 +1,44 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_floating_ip_pool +---------------------------------- + +Test floating IP pool resource (managed by nova) +""" + +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestFloatingIPPool(base.TestCase): + pools = [{'name': 'public'}] + + def test_list_floating_ip_pools(self): + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-floating-ip-pools', + json={"floating_ip_pools": [{"name": "public"}]}, + ), + ] + ) + + floating_ip_pools = self.cloud.list_floating_ip_pools() + + self.assertCountEqual(floating_ip_pools, self.pools) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_fwaas.py b/openstack/tests/unit/cloud/test_fwaas.py new file mode 100644 index 0000000000..eb67f4953a --- /dev/null +++ b/openstack/tests/unit/cloud/test_fwaas.py @@ -0,0 +1,1723 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from copy import deepcopy +from unittest import mock + +from openstack import exceptions +from openstack.network.v2.firewall_group import FirewallGroup +from openstack.network.v2.firewall_policy import FirewallPolicy +from openstack.network.v2.firewall_rule import FirewallRule +from openstack.tests.unit import base + + +class FirewallTestCase(base.TestCase): + def _make_mock_url(self, *args, **params): + params_list = ['='.join([k, v]) for k, v in params.items()] + return self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'fwaas', *list(args)], + qs_elements=params_list or None, + ) + + +class TestFirewallRule(FirewallTestCase): + firewall_rule_name = 'deny_ssh' + firewall_rule_id = 'd525a9b2-ab28-493d-b988-b824c8c033b1' + _mock_firewall_rule_attrs = { + 'action': 'deny', + 'description': 'Deny SSH access', + 'destination_ip_address': None, + 'destination_port': 22, + 'enabled': True, + 'id': firewall_rule_id, + 'ip_version': 4, + 'name': firewall_rule_name, + 'project_id': 'ef44f1efcb9548d9a441cdc252a979a6', + 'protocol': 'tcp', + 'shared': False, + 'source_ip_address': None, + 'source_port': None, + } + mock_firewall_rule = None + + def setUp(self, cloud_config_fixture='clouds.yaml'): + super().setUp() + self.mock_firewall_rule = FirewallRule( + connection=self.cloud, **self._mock_firewall_rule_attrs + ).to_dict() + + def test_create_firewall_rule(self): + # attributes that are passed to the tested function + passed_attrs = self._mock_firewall_rule_attrs.copy() + del passed_attrs['id'] + + self.register_uris( + [ + # no validate due to added location key + dict( + method='POST', + uri=self._make_mock_url('firewall_rules'), + json={'firewall_rule': self.mock_firewall_rule.copy()}, + ) + ] + ) + r = self.cloud.create_firewall_rule(**passed_attrs) + self.assertDictEqual(self.mock_firewall_rule, r.to_dict()) + self.assert_calls() + + def test_create_firewall_rule_bad_protocol(self): + bad_rule = self._mock_firewall_rule_attrs.copy() + del bad_rule['id'] # id not allowed + bad_rule['ip_version'] = 5 + self.register_uris( + [ + # no validate due to added location key + dict( + method='POST', + uri=self._make_mock_url('firewall_rules'), + status_code=400, + json={}, + ) + ] + ) + self.assertRaises( + exceptions.BadRequestException, + self.cloud.create_firewall_rule, + **bad_rule, + ) + self.assert_calls() + + def test_delete_firewall_rule(self): + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_rules', self.firewall_rule_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_rules', name=self.firewall_rule_name + ), + json={'firewall_rules': [self.mock_firewall_rule]}, + ), + dict( + method='DELETE', + uri=self._make_mock_url( + 'firewall_rules', self.firewall_rule_id + ), + json={}, + status_code=204, + ), + ] + ) + self.assertTrue( + self.cloud.delete_firewall_rule(self.firewall_rule_name) + ) + self.assert_calls() + + def test_delete_firewall_rule_filters(self): + filters = {'project_id': self.mock_firewall_rule['project_id']} + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_rules', self.firewall_rule_name, **filters + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_rules', + name=self.firewall_rule_name, + **filters, + ), + json={'firewall_rules': [self.mock_firewall_rule]}, + ), + dict( + method='DELETE', + uri=self._make_mock_url( + 'firewall_rules', self.firewall_rule_id + ), + json={}, + status_code=204, + ), + ] + ) + self.assertTrue( + self.cloud.delete_firewall_rule(self.firewall_rule_name, filters) + ) + self.assert_calls() + + def test_delete_firewall_rule_not_found(self): + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_rules', self.firewall_rule_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url('firewall_rules'), + json={'firewall_rules': []}, + ), + ] + ) + + with ( + mock.patch.object(self.cloud.network, 'delete_firewall_rule'), + mock.patch.object(self.cloud.log, 'debug'), + ): + self.assertFalse( + self.cloud.delete_firewall_rule(self.firewall_rule_name) + ) + + self.cloud.network.delete_firewall_rule.assert_not_called() + self.cloud.log.debug.assert_called_once() + + def test_delete_firewall_multiple_matches(self): + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_rules', self.firewall_rule_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_rules', name=self.firewall_rule_name + ), + json={ + 'firewall_rules': [ + self.mock_firewall_rule, + self.mock_firewall_rule, + ] + }, + ), + ] + ) + self.assertRaises( + exceptions.DuplicateResource, + self.cloud.delete_firewall_rule, + self.firewall_rule_name, + ) + self.assert_calls() + + def test_get_firewall_rule(self): + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_rules', self.firewall_rule_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_rules', name=self.firewall_rule_name + ), + json={'firewall_rules': [self.mock_firewall_rule]}, + ), + ] + ) + r = self.cloud.get_firewall_rule(self.firewall_rule_name) + self.assertDictEqual(self.mock_firewall_rule, r) + self.assert_calls() + + def test_get_firewall_rule_not_found(self): + name = 'not_found' + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url('firewall_rules', name), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url('firewall_rules', name=name), + json={'firewall_rules': []}, + ), + ] + ) + self.assertIsNone(self.cloud.get_firewall_rule(name)) + self.assert_calls() + + def test_list_firewall_rules(self): + self.register_uris( + [ + dict( + method='GET', + uri=self._make_mock_url('firewall_rules'), + json={'firewall_rules': [self.mock_firewall_rule]}, + ) + ] + ) + self.assertDictEqual( + self.mock_firewall_rule, self.cloud.list_firewall_rules()[0] + ) + self.assert_calls() + + def test_update_firewall_rule(self): + params = {'description': 'UpdatedDescription'} + updated = self.mock_firewall_rule.copy() + updated.update(params) + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_rules', self.firewall_rule_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_rules', name=self.firewall_rule_name + ), + json={'firewall_rules': [self.mock_firewall_rule]}, + ), + dict( + method='PUT', + uri=self._make_mock_url( + 'firewall_rules', self.firewall_rule_id + ), + json={'firewall_rule': updated}, + validate=dict(json={'firewall_rule': params}), + ), + ] + ) + self.assertDictEqual( + updated, + self.cloud.update_firewall_rule(self.firewall_rule_name, **params), + ) + self.assert_calls() + + def test_update_firewall_rule_filters(self): + params = {'description': 'Updated!'} + filters = {'project_id': self.mock_firewall_rule['project_id']} + updated = self.mock_firewall_rule.copy() + updated.update(params) + updated_dict = self._mock_firewall_rule_attrs.copy() + updated_dict.update(params) + self.register_uris( + [ + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_rules', self.firewall_rule_name, **filters + ), + json={'firewall_rule': self._mock_firewall_rule_attrs}, + ), + dict( + method='PUT', + uri=self._make_mock_url( + 'firewall_rules', self.firewall_rule_id + ), + json={'firewall_rule': updated_dict}, + validate={ + 'json': {'firewall_rule': params}, + }, + ), + ] + ) + updated_rule = self.cloud.update_firewall_rule( + self.firewall_rule_name, filters, **params + ) + self.assertDictEqual(updated, updated_rule) + self.assert_calls() + + +class TestFirewallPolicy(FirewallTestCase): + firewall_policy_id = '78d05d20-d406-41ec-819d-06b65c2684e4' + firewall_policy_name = 'block_popular_services' + _mock_firewall_policy_attrs = { + 'audited': True, + 'description': 'block ports of well-known services', + 'firewall_rules': ['deny_ssh'], + 'id': firewall_policy_id, + 'name': firewall_policy_name, + 'project_id': 'b64238cb-a25d-41af-9ee1-42deb4587d20', + 'shared': False, + } + mock_firewall_policy = None + + def setUp(self, cloud_config_fixture='clouds.yaml'): + super().setUp() + self.mock_firewall_policy = FirewallPolicy( + connection=self.cloud, **self._mock_firewall_policy_attrs + ).to_dict() + + def test_create_firewall_policy(self): + # attributes that are passed to the tested method + passed_attrs = deepcopy(self._mock_firewall_policy_attrs) + del passed_attrs['id'] + + # policy that is returned by the POST request + created_attrs = deepcopy(self._mock_firewall_policy_attrs) + created_attrs['firewall_rules'][0] = TestFirewallRule.firewall_rule_id + created_policy = FirewallPolicy(connection=self.cloud, **created_attrs) + + # attributes used to validate the request inside register_uris() + validate_attrs = deepcopy(created_attrs) + del validate_attrs['id'] + + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_rules', TestFirewallRule.firewall_rule_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_rules', + name=TestFirewallRule.firewall_rule_name, + ), + json={ + 'firewall_rules': [ + TestFirewallRule._mock_firewall_rule_attrs + ] + }, + ), + dict( + method='POST', + uri=self._make_mock_url('firewall_policies'), + json={'firewall_policy': created_attrs}, + validate=dict(json={'firewall_policy': validate_attrs}), + ), + ] + ) + res = self.cloud.create_firewall_policy(**passed_attrs) + self.assertDictEqual(created_policy, res.to_dict()) + self.assert_calls() + + def test_create_firewall_policy_rule_not_found(self): + posted_policy = deepcopy(self._mock_firewall_policy_attrs) + del posted_policy['id'] + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_rules', posted_policy['firewall_rules'][0] + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_rules', + name=posted_policy['firewall_rules'][0], + ), + json={'firewall_rules': []}, + ), + ] + ) + + with mock.patch.object(self.cloud.network, 'create_firewall_policy'): + self.assertRaises( + exceptions.NotFoundException, + self.cloud.create_firewall_policy, + **posted_policy, + ) + self.cloud.network.create_firewall_policy.assert_not_called() + self.assert_calls() + + def test_delete_firewall_policy(self): + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', name=self.firewall_policy_name + ), + json={'firewall_policies': [self.mock_firewall_policy]}, + ), + dict( + method='DELETE', + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_id + ), + json={}, + status_code=204, + ), + ] + ) + + with mock.patch.object(self.cloud.log, 'debug'): + self.assertTrue( + self.cloud.delete_firewall_policy(self.firewall_policy_name) + ) + self.assert_calls() + self.cloud.log.debug.assert_not_called() + + def test_delete_firewall_policy_filters(self): + filters = {'project_id': self.mock_firewall_policy['project_id']} + self.register_uris( + [ + dict( + method='DELETE', + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_id + ), + json={}, + status_code=204, + ) + ] + ) + + with ( + mock.patch.object( + self.cloud.network, + 'find_firewall_policy', + return_value=self.mock_firewall_policy, + ), + mock.patch.object(self.cloud.log, 'debug'), + ): + self.assertTrue( + self.cloud.delete_firewall_policy( + self.firewall_policy_name, filters + ) + ) + self.assert_calls() + self.cloud.network.find_firewall_policy.assert_called_once_with( + self.firewall_policy_name, ignore_missing=False, **filters + ) + self.cloud.log.debug.assert_not_called() + + def test_delete_firewall_policy_not_found(self): + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', name=self.firewall_policy_name + ), + json={'firewall_policies': []}, + ), + ] + ) + + with mock.patch.object(self.cloud.log, 'debug'): + self.assertFalse( + self.cloud.delete_firewall_policy(self.firewall_policy_name) + ) + self.assert_calls() + self.cloud.log.debug.assert_called_once() + + def test_get_firewall_policy(self): + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', name=self.firewall_policy_name + ), + json={'firewall_policies': [self.mock_firewall_policy]}, + ), + ] + ) + self.assertDictEqual( + self.mock_firewall_policy, + self.cloud.get_firewall_policy(self.firewall_policy_name), + ) + self.assert_calls() + + def test_get_firewall_policy_not_found(self): + name = 'not_found' + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url('firewall_policies', name), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url('firewall_policies', name=name), + json={'firewall_policies': []}, + ), + ] + ) + self.assertIsNone(self.cloud.get_firewall_policy(name)) + self.assert_calls() + + def test_list_firewall_policies(self): + self.register_uris( + [ + dict( + method='GET', + uri=self._make_mock_url('firewall_policies'), + json={ + 'firewall_policies': [ + self.mock_firewall_policy.copy(), + self.mock_firewall_policy.copy(), + ] + }, + ) + ] + ) + policy = FirewallPolicy( + connection=self.cloud, **self.mock_firewall_policy + ) + self.assertListEqual( + self.cloud.list_firewall_policies(), [policy, policy] + ) + self.assert_calls() + + def test_list_firewall_policies_filters(self): + filters = {'project_id': self.mock_firewall_policy['project_id']} + self.register_uris( + [ + dict( + method='GET', + uri=self._make_mock_url('firewall_policies', **filters), + json={'firewall_policies': [self.mock_firewall_policy]}, + ) + ] + ) + self.assertListEqual( + self.cloud.list_firewall_policies(filters), + [ + FirewallPolicy( + connection=self.cloud, **self.mock_firewall_policy + ) + ], + ) + self.assert_calls() + + def test_update_firewall_policy(self): + lookup_rule = FirewallRule( + connection=self.cloud, **TestFirewallRule._mock_firewall_rule_attrs + ).to_dict() + params = { + 'firewall_rules': [lookup_rule['id']], + 'description': 'updated!', + } + retrieved_policy = deepcopy(self.mock_firewall_policy) + del retrieved_policy['firewall_rules'][0] + updated_policy = deepcopy(self.mock_firewall_policy) + updated_policy.update(params) + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', name=self.firewall_policy_name + ), + json={'firewall_policies': [retrieved_policy]}, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_rules', lookup_rule['id'] + ), + json={'firewall_rule': lookup_rule}, + ), + dict( + method='PUT', + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_id + ), + json={'firewall_policy': updated_policy}, + validate=dict(json={'firewall_policy': params}), + ), + ] + ) + self.assertDictEqual( + updated_policy, + self.cloud.update_firewall_policy( + self.firewall_policy_name, **params + ), + ) + self.assert_calls() + + def test_update_firewall_policy_no_rules(self): + params = {'description': 'updated!'} + updated_policy = deepcopy(self.mock_firewall_policy) + updated_policy.update(params) + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', name=self.firewall_policy_name + ), + json={ + 'firewall_policies': [ + deepcopy(self.mock_firewall_policy) + ] + }, + ), + dict( + method='PUT', + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_id + ), + json={'firewall_policy': updated_policy}, + validate=dict(json={'firewall_policy': params}), + ), + ] + ) + self.assertDictEqual( + updated_policy, + self.cloud.update_firewall_policy( + self.firewall_policy_name, **params + ), + ) + self.assert_calls() + + def test_update_firewall_policy_filters(self): + filters = {'project_id': self.mock_firewall_policy['project_id']} + params = {'description': 'updated!'} + updated_policy = deepcopy(self.mock_firewall_policy) + updated_policy.update(params) + + self.register_uris( + [ + dict( + method='PUT', + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_id + ), + json={'firewall_policy': updated_policy}, + validate=dict(json={'firewall_policy': params}), + ), + ] + ) + + with mock.patch.object( + self.cloud.network, + 'find_firewall_policy', + return_value=deepcopy(self.mock_firewall_policy), + ): + self.assertDictEqual( + updated_policy, + self.cloud.update_firewall_policy( + self.firewall_policy_name, filters, **params + ), + ) + self.assert_calls() + self.cloud.network.find_firewall_policy.assert_called_once_with( + self.firewall_policy_name, ignore_missing=False, **filters + ) + + def test_insert_rule_into_policy(self): + rule0 = FirewallRule( + connection=self.cloud, **TestFirewallRule._mock_firewall_rule_attrs + ) + + _rule1_attrs = deepcopy(TestFirewallRule._mock_firewall_rule_attrs) + _rule1_attrs.update( + id='8068fc06-0e72-43f2-a76f-a51a33b46e08', name='after_rule' + ) + rule1 = FirewallRule(**_rule1_attrs) + + _rule2_attrs = deepcopy(TestFirewallRule._mock_firewall_rule_attrs) + _rule2_attrs.update( + id='c716382d-183b-475d-b500-dcc762f45ce3', name='before_rule' + ) + rule2 = FirewallRule(**_rule2_attrs) + retrieved_policy = deepcopy(self.mock_firewall_policy) + retrieved_policy['firewall_rules'] = [rule1['id'], rule2['id']] + updated_policy = deepcopy(self.mock_firewall_policy) + updated_policy['firewall_rules'] = [ + rule0['id'], + rule1['id'], + rule2['id'], + ] + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_name + ), + status_code=404, + ), + dict( + method='GET', # get policy + uri=self._make_mock_url( + 'firewall_policies', name=self.firewall_policy_name + ), + json={'firewall_policies': [retrieved_policy]}, + ), + dict( + method='GET', # short-circuit + uri=self._make_mock_url('firewall_rules', rule0['name']), + status_code=404, + ), + dict( + method='GET', # get rule to add + uri=self._make_mock_url( + 'firewall_rules', name=rule0['name'] + ), + json={'firewall_rules': [rule0]}, + ), + dict( + method='GET', # short-circuit + uri=self._make_mock_url('firewall_rules', rule1['name']), + status_code=404, + ), + dict( + method='GET', # get after rule + uri=self._make_mock_url( + 'firewall_rules', name=rule1['name'] + ), + json={'firewall_rules': [rule1]}, + ), + dict( + method='GET', # short-circuit + uri=self._make_mock_url('firewall_rules', rule2['name']), + status_code=404, + ), + dict( + method='GET', # get before rule + uri=self._make_mock_url( + 'firewall_rules', name=rule2['name'] + ), + json={'firewall_rules': [rule2]}, + ), + dict( + method='PUT', # add rule + uri=self._make_mock_url( + 'firewall_policies', + self.firewall_policy_id, + 'insert_rule', + ), + json=updated_policy, + validate=dict( + json={ + 'firewall_rule_id': rule0['id'], + 'insert_after': rule1['id'], + 'insert_before': rule2['id'], + } + ), + ), + ] + ) + r = self.cloud.insert_rule_into_policy( + name_or_id=self.firewall_policy_name, + rule_name_or_id=rule0['name'], + insert_after=rule1['name'], + insert_before=rule2['name'], + ) + self.assertDictEqual(updated_policy, r.to_dict()) + self.assert_calls() + + def test_insert_rule_into_policy_compact(self): + """ + Tests without insert_after and insert_before + """ + rule = FirewallRule(**TestFirewallRule._mock_firewall_rule_attrs) + retrieved_policy = deepcopy(self.mock_firewall_policy) + retrieved_policy['firewall_rules'] = [] + updated_policy = deepcopy(retrieved_policy) + updated_policy['firewall_rules'].append(rule['id']) + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', name=self.firewall_policy_name + ), + json={'firewall_policies': [retrieved_policy]}, + ), + dict( + method='GET', # short-circuit + uri=self._make_mock_url('firewall_rules', rule['name']), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_rules', name=rule['name'] + ), + json={'firewall_rules': [rule]}, + ), + dict( + method='PUT', + uri=self._make_mock_url( + 'firewall_policies', + retrieved_policy['id'], + 'insert_rule', + ), + json=updated_policy, + validate=dict( + json={ + 'firewall_rule_id': rule['id'], + 'insert_after': None, + 'insert_before': None, + } + ), + ), + ] + ) + r = self.cloud.insert_rule_into_policy( + self.firewall_policy_name, rule['name'] + ) + self.assertDictEqual(updated_policy, r.to_dict()) + self.assert_calls() + + def test_insert_rule_into_policy_not_found(self): + policy_name = 'bogus_policy' + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url('firewall_policies', policy_name), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', name=policy_name + ), + json={'firewall_policies': []}, + ), + ] + ) + + with mock.patch.object(self.cloud.network, 'find_firewall_rule'): + self.assertRaises( + exceptions.NotFoundException, + self.cloud.insert_rule_into_policy, + policy_name, + 'bogus_rule', + ) + self.assert_calls() + self.cloud.network.find_firewall_rule.assert_not_called() + + def test_insert_rule_into_policy_rule_not_found(self): + rule_name = 'unknown_rule' + self.register_uris( + [ + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_id + ), + json={'firewall_policy': self.mock_firewall_policy}, + ), + dict( + method='GET', # short-circuit + uri=self._make_mock_url('firewall_rules', rule_name), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url('firewall_rules', name=rule_name), + json={'firewall_rules': []}, + ), + ] + ) + self.assertRaises( + exceptions.NotFoundException, + self.cloud.insert_rule_into_policy, + self.firewall_policy_id, + rule_name, + ) + self.assert_calls() + + def test_insert_rule_into_policy_already_associated(self): + rule = FirewallRule( + **TestFirewallRule._mock_firewall_rule_attrs + ).to_dict() + policy = deepcopy(self.mock_firewall_policy) + policy['firewall_rules'] = [rule['id']] + self.register_uris( + [ + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_id + ), + json={'firewall_policy': policy}, + ), + dict( + method='GET', + uri=self._make_mock_url('firewall_rules', rule['id']), + json={'firewall_rule': rule}, + ), + ] + ) + + with mock.patch.object(self.cloud.log, 'debug'): + r = self.cloud.insert_rule_into_policy(policy['id'], rule['id']) + self.assertDictEqual(policy, r.to_dict()) + self.assert_calls() + self.cloud.log.debug.assert_called() + + def test_remove_rule_from_policy(self): + policy_name = self.firewall_policy_name + rule = FirewallRule(**TestFirewallRule._mock_firewall_rule_attrs) + + retrieved_policy = deepcopy(self.mock_firewall_policy) + retrieved_policy['firewall_rules'][0] = rule['id'] + + updated_policy = deepcopy(self.mock_firewall_policy) + del updated_policy['firewall_rules'][0] + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url('firewall_policies', policy_name), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', name=policy_name + ), + json={'firewall_policies': [retrieved_policy]}, + ), + dict( + method='GET', # short-circuit + uri=self._make_mock_url('firewall_rules', rule['name']), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_rules', name=rule['name'] + ), + json={'firewall_rules': [rule]}, + ), + dict( + method='PUT', + uri=self._make_mock_url( + 'firewall_policies', + self.firewall_policy_id, + 'remove_rule', + ), + json=updated_policy, + validate=dict(json={'firewall_rule_id': rule['id']}), + ), + ] + ) + r = self.cloud.remove_rule_from_policy(policy_name, rule['name']) + self.assertDictEqual(updated_policy, r.to_dict()) + self.assert_calls() + + def test_remove_rule_from_policy_not_found(self): + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', name=self.firewall_policy_name + ), + json={'firewall_policies': []}, + ), + ] + ) + + with mock.patch.object(self.cloud.network, 'find_firewall_rule'): + self.assertRaises( + exceptions.NotFoundException, + self.cloud.remove_rule_from_policy, + self.firewall_policy_name, + TestFirewallRule.firewall_rule_name, + ) + self.assert_calls() + self.cloud.network.find_firewall_rule.assert_not_called() + + def test_remove_rule_from_policy_rule_not_found(self): + retrieved_policy = deepcopy(self.mock_firewall_policy) + rule = FirewallRule(**TestFirewallRule._mock_firewall_rule_attrs) + retrieved_policy['firewall_rules'][0] = rule['id'] + self.register_uris( + [ + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', self.firewall_policy_id + ), + json={'firewall_policy': retrieved_policy}, + ), + dict( + method='GET', # short-circuit + uri=self._make_mock_url('firewall_rules', rule['name']), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_rules', name=rule['name'] + ), + json={'firewall_rules': []}, + ), + ] + ) + r = self.cloud.remove_rule_from_policy( + self.firewall_policy_id, rule['name'] + ) + self.assertDictEqual(retrieved_policy, r.to_dict()) + self.assert_calls() + + def test_remove_rule_from_policy_not_associated(self): + rule = FirewallRule( + **TestFirewallRule._mock_firewall_rule_attrs + ).to_dict() + policy = deepcopy(self.mock_firewall_policy) + del policy['firewall_rules'][0] + + self.register_uris( + [ + dict( + method='GET', + uri=self._make_mock_url('firewall_policies', policy['id']), + json={'firewall_policy': policy}, + ), + dict( + method='GET', + uri=self._make_mock_url('firewall_rules', rule['id']), + json={'firewall_rule': rule}, + ), + ] + ) + + with ( + mock.patch.object(self.cloud.network, 'remove_rule_from_policy'), + mock.patch.object(self.cloud.log, 'debug'), + ): + r = self.cloud.remove_rule_from_policy(policy['id'], rule['id']) + self.assertDictEqual(policy, r.to_dict()) + self.assert_calls() + self.cloud.log.debug.assert_called_once() + self.cloud.network.remove_rule_from_policy.assert_not_called() + + +class TestFirewallGroup(FirewallTestCase): + firewall_group_id = '700eed7a-b979-4b80-a06d-14f000d0f645' + firewall_group_name = 'max_security_group' + mock_port = { + 'name': 'mock_port', + 'id': '7d90977c-45ec-467e-a16d-dcaed772a161', + } + _mock_egress_policy_attrs = { + 'id': '34335e5b-44af-4ffd-9dcf-518133f897c7', + 'name': 'safe_outgoing_data', + } + _mock_ingress_policy_attrs = { + 'id': 'cd28fb50-85d0-4f36-89af-50fac08ac174', + 'name': 'bad_incoming_data', + } + _mock_firewall_group_attrs = { + 'admin_state_up': True, + 'description': 'Providing max security!', + 'egress_firewall_policy': _mock_egress_policy_attrs['name'], + 'ingress_firewall_policy': _mock_ingress_policy_attrs['name'], + 'id': firewall_group_id, + 'name': firewall_group_name, + 'ports': [mock_port['name']], + 'project_id': 'da347b09-0b4f-4994-a3ef-05d13eaecb2c', + 'shared': False, + } + _mock_returned_firewall_group_attrs = { + 'admin_state_up': True, + 'description': 'Providing max security!', + 'egress_firewall_policy': _mock_egress_policy_attrs['name'], + 'egress_firewall_policy_id': _mock_egress_policy_attrs['id'], + 'ingress_firewall_policy': _mock_ingress_policy_attrs['name'], + 'ingress_firewall_policy_id': _mock_ingress_policy_attrs['id'], + 'id': firewall_group_id, + 'name': firewall_group_name, + 'ports': [mock_port['id']], + 'project_id': 'da347b09-0b4f-4994-a3ef-05d13eaecb2c', + 'shared': False, + } + mock_egress_policy = None + mock_ingress_policy = None + mock_firewall_rule = None + mock_returned_firewall_rule = None + + def setUp(self, cloud_config_fixture='clouds.yaml'): + super().setUp() + self.mock_egress_policy = FirewallPolicy( + connection=self.cloud, **self._mock_egress_policy_attrs + ).to_dict() + self.mock_ingress_policy = FirewallPolicy( + connection=self.cloud, **self._mock_ingress_policy_attrs + ).to_dict() + self.mock_firewall_group = FirewallGroup( + connection=self.cloud, **self._mock_firewall_group_attrs + ).to_dict() + self.mock_returned_firewall_group = FirewallGroup( + connection=self.cloud, **self._mock_returned_firewall_group_attrs + ).to_dict() + + def test_create_firewall_group(self): + create_group_attrs = self._mock_firewall_group_attrs.copy() + del create_group_attrs['id'] + posted_group_attrs = self._mock_returned_firewall_group_attrs.copy() + del posted_group_attrs['egress_firewall_policy'] + del posted_group_attrs['ingress_firewall_policy'] + del posted_group_attrs['id'] + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_policies', self.mock_egress_policy['name'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', + name=self.mock_egress_policy['name'], + ), + json={'firewall_policies': [self.mock_egress_policy]}, + ), + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_policies', self.mock_ingress_policy['name'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', + name=self.mock_ingress_policy['name'], + ), + json={'firewall_policies': [self.mock_ingress_policy]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports', self.mock_port['name']], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports'], + qs_elements=['name={}'.format(self.mock_port['name'])], + ), + json={'ports': [self.mock_port]}, + ), + dict( + method='POST', + uri=self._make_mock_url('firewall_groups'), + json={ + 'firewall_group': deepcopy( + self.mock_returned_firewall_group + ) + }, + validate=dict(json={'firewall_group': posted_group_attrs}), + ), + ] + ) + r = self.cloud.create_firewall_group(**create_group_attrs) + self.assertDictEqual(self.mock_returned_firewall_group, r.to_dict()) + self.assert_calls() + + def test_create_firewall_group_compact(self): + """ + Tests firewall group creation without policies or ports + """ + firewall_group = deepcopy(self._mock_firewall_group_attrs) + del firewall_group['ports'] + del firewall_group['egress_firewall_policy'] + del firewall_group['ingress_firewall_policy'] + created_firewall = deepcopy(firewall_group) + created_firewall.update( + egress_firewall_policy_id=None, + ingress_firewall_policy_id=None, + ports=[], + ) + del firewall_group['id'] + self.register_uris( + [ + dict( + method='POST', + uri=self._make_mock_url('firewall_groups'), + json={'firewall_group': created_firewall}, + validate=dict(json={'firewall_group': firewall_group}), + ) + ] + ) + r = self.cloud.create_firewall_group(**firewall_group) + self.assertDictEqual( + FirewallGroup(connection=self.cloud, **created_firewall).to_dict(), + r.to_dict(), + ) + self.assert_calls() + + def test_delete_firewall_group(self): + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_groups', name=self.firewall_group_name + ), + json={ + 'firewall_groups': [ + deepcopy(self.mock_returned_firewall_group) + ] + }, + ), + dict( + method='DELETE', + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_id + ), + status_code=204, + ), + ] + ) + self.assertTrue( + self.cloud.delete_firewall_group(self.firewall_group_name) + ) + self.assert_calls() + + def test_delete_firewall_group_filters(self): + filters = {'project_id': self.mock_firewall_group['project_id']} + self.register_uris( + [ + dict( + method='DELETE', + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_id + ), + status_code=204, + ) + ] + ) + + with mock.patch.object( + self.cloud.network, + 'find_firewall_group', + return_value=deepcopy(self.mock_firewall_group), + ): + self.assertTrue( + self.cloud.delete_firewall_group( + self.firewall_group_name, filters + ) + ) + self.assert_calls() + self.cloud.network.find_firewall_group.assert_called_once_with( + self.firewall_group_name, ignore_missing=False, **filters + ) + + def test_delete_firewall_group_not_found(self): + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_groups', name=self.firewall_group_name + ), + json={'firewall_groups': []}, + ), + ] + ) + + with mock.patch.object(self.cloud.log, 'debug'): + self.assertFalse( + self.cloud.delete_firewall_group(self.firewall_group_name) + ) + self.assert_calls() + self.cloud.log.debug.assert_called_once() + + def test_get_firewall_group(self): + returned_group = deepcopy(self.mock_returned_firewall_group) + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_groups', name=self.firewall_group_name + ), + json={'firewall_groups': [returned_group]}, + ), + ] + ) + self.assertDictEqual( + returned_group, + self.cloud.get_firewall_group(self.firewall_group_name), + ) + self.assert_calls() + + def test_get_firewall_group_not_found(self): + name = 'not_found' + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url('firewall_groups', name), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url('firewall_groups', name=name), + json={'firewall_groups': []}, + ), + ] + ) + self.assertIsNone(self.cloud.get_firewall_group(name)) + self.assert_calls() + + def test_get_firewall_group_by_id(self): + returned_group = deepcopy(self.mock_returned_firewall_group) + self.register_uris( + [ + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_id + ), + json={'firewall_group': returned_group}, + ) + ] + ) + r = self.cloud.get_firewall_group(self.firewall_group_id) + self.assertDictEqual(returned_group, r.to_dict()) + self.assert_calls() + + def test_list_firewall_groups(self): + returned_attrs = deepcopy(self.mock_returned_firewall_group) + self.register_uris( + [ + dict( + method='GET', + uri=self._make_mock_url('firewall_groups'), + json={'firewall_groups': [returned_attrs, returned_attrs]}, + ) + ] + ) + group = FirewallGroup(connection=self.cloud, **returned_attrs) + self.assertListEqual([group, group], self.cloud.list_firewall_groups()) + self.assert_calls() + + def test_update_firewall_group(self): + params = { + 'description': 'updated!', + 'egress_firewall_policy': self.mock_egress_policy['name'], + 'ingress_firewall_policy': self.mock_ingress_policy['name'], + 'ports': [self.mock_port['name']], + } + updated_group = deepcopy(self.mock_returned_firewall_group) + updated_group['description'] = params['description'] + + returned_group = deepcopy(self.mock_returned_firewall_group) + # unset attributes that will be updated! + returned_group.update( + ingress_firewall_policy_id=None, + egress_firewall_policy_id=None, + ports=[], + ) + self.register_uris( + [ + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_name + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_groups', name=self.firewall_group_name + ), + json={'firewall_groups': [returned_group]}, + ), + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_policies', self.mock_egress_policy['name'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', + name=self.mock_egress_policy['name'], + ), + json={ + 'firewall_policies': [ + deepcopy(self.mock_egress_policy) + ] + }, + ), + dict( + method='GET', # short-circuit + uri=self._make_mock_url( + 'firewall_policies', self.mock_ingress_policy['name'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_policies', + name=self.mock_ingress_policy['name'], + ), + json={ + 'firewall_policies': [ + deepcopy(self.mock_ingress_policy) + ] + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports', self.mock_port['name']], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports'], + qs_elements=['name={}'.format(self.mock_port['name'])], + ), + json={'ports': [self.mock_port]}, + ), + dict( + method='PUT', + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_id + ), + json={'firewall_group': updated_group}, + validate=dict( + json={ + 'firewall_group': { + 'description': params['description'], + 'egress_firewall_policy_id': self.mock_egress_policy[ # noqa: E501 + 'id' + ], + 'ingress_firewall_policy_id': self.mock_ingress_policy[ # noqa: E501 + 'id' + ], + 'ports': [self.mock_port['id']], + } + } + ), + ), + ] + ) + self.assertDictEqual( + updated_group, + self.cloud.update_firewall_group( + self.firewall_group_name, **params + ), + ) + self.assert_calls() + + def test_update_firewall_group_compact(self): + params = {'description': 'updated again!'} + updated_group = deepcopy(self.mock_returned_firewall_group) + updated_group.update(params) + + self.register_uris( + [ + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_id + ), + json={ + 'firewall_group': deepcopy( + self.mock_returned_firewall_group + ) + }, + ), + dict( + method='PUT', + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_id + ), + json={'firewall_group': updated_group}, + validate=dict(json={'firewall_group': params}), + ), + ] + ) + self.assertDictEqual( + updated_group, + self.cloud.update_firewall_group(self.firewall_group_id, **params), + ) + self.assert_calls() + + def test_update_firewall_group_filters(self): + filters = {'project_id': self.mock_firewall_group['project_id']} + params = {'description': 'updated again!'} + updated_group = deepcopy(self.mock_returned_firewall_group) + self.register_uris( + [ + dict( + method='PUT', + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_id + ), + json={'firewall_group': updated_group}, + validate=dict(json={'firewall_group': params}), + ) + ] + ) + + with mock.patch.object( + self.cloud.network, + 'find_firewall_group', + return_value=deepcopy(self.mock_firewall_group), + ): + r = self.cloud.update_firewall_group( + self.firewall_group_name, filters, **params + ) + self.assertDictEqual(updated_group, r.to_dict()) + self.assert_calls() + self.cloud.network.find_firewall_group.assert_called_once_with( + self.firewall_group_name, ignore_missing=False, **filters + ) + + def test_update_firewall_group_unset_policies(self): + transformed_params = { + 'ingress_firewall_policy_id': None, + 'egress_firewall_policy_id': None, + } + updated_group = deepcopy(self.mock_returned_firewall_group) + updated_group.update(**transformed_params) + returned_group = deepcopy(self.mock_returned_firewall_group) + self.register_uris( + [ + dict( + method='GET', + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_id + ), + json={'firewall_group': returned_group}, + ), + dict( + method='PUT', + uri=self._make_mock_url( + 'firewall_groups', self.firewall_group_id + ), + json={'firewall_group': updated_group}, + validate=dict(json={'firewall_group': transformed_params}), + ), + ] + ) + self.assertDictEqual( + updated_group, + self.cloud.update_firewall_group( + self.firewall_group_id, + ingress_firewall_policy=None, + egress_firewall_policy=None, + ), + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_groups.py b/openstack/tests/unit/cloud/test_groups.py new file mode 100644 index 0000000000..b099658851 --- /dev/null +++ b/openstack/tests/unit/cloud/test_groups.py @@ -0,0 +1,163 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.tests.unit import base + + +class TestGroups(base.TestCase): + def setUp(self, cloud_config_fixture='clouds.yaml'): + super().setUp(cloud_config_fixture=cloud_config_fixture) + self.addCleanup(self.assert_calls) + + def get_mock_url( + self, + service_type='identity', + interface='public', + resource='groups', + append=None, + base_url_append='v3', + ): + return super().get_mock_url( + service_type='identity', + interface=interface, + resource=resource, + append=append, + base_url_append=base_url_append, + ) + + def test_list_groups(self): + group_data = self._get_group_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'groups': [group_data.json_response['group']]}, + ) + ] + ) + self.cloud.list_groups() + + def test_get_group(self): + group_data = self._get_group_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[group_data.group_id]), + status_code=200, + json=group_data.json_response, + ), + ] + ) + self.cloud.get_group(group_data.group_id) + + def test_delete_group(self): + group_data = self._get_group_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[group_data.group_id]), + status_code=200, + json={'group': group_data.json_response['group']}, + ), + dict( + method='DELETE', + uri=self.get_mock_url(append=[group_data.group_id]), + status_code=204, + ), + ] + ) + self.assertTrue(self.cloud.delete_group(group_data.group_id)) + + def test_create_group(self): + domain_data = self._get_domain_data() + group_data = self._get_group_data(domain_id=domain_data.domain_id) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='domains', append=[domain_data.domain_id] + ), + status_code=200, + json=domain_data.json_response, + ), + dict( + method='POST', + uri=self.get_mock_url(), + status_code=200, + json=group_data.json_response, + validate=dict(json=group_data.json_request), + ), + ] + ) + self.cloud.create_group( + name=group_data.group_name, + description=group_data.description, + domain=group_data.domain_id, + ) + + def test_update_group(self): + group_data = self._get_group_data() + # Domain ID is not sent + group_data.json_request['group'].pop('domain_id') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[group_data.group_id]), + status_code=200, + json={'group': group_data.json_response['group']}, + ), + dict( + method='PATCH', + uri=self.get_mock_url(append=[group_data.group_id]), + status_code=200, + json=group_data.json_response, + validate=dict( + json={ + 'group': { + 'name': 'new_name', + 'description': 'new_description', + } + } + ), + ), + ] + ) + self.cloud.update_group( + group_data.group_id, 'new_name', 'new_description' + ) + + def test_list_user_groups(self): + user_data = self._get_user_data() + group_data = self._get_group_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='users', + append=[user_data.user_id, 'groups'], + ), + status_code=200, + json={'groups': [group_data.json_response['group']]}, + ) + ] + ) + groups = list(self.cloud.identity.user_groups(user_data.user_id)) + self.assertEqual(1, len(groups)) + self.assertEqual(group_data.group_id, groups[0].id) diff --git a/openstack/tests/unit/cloud/test_identity_roles.py b/openstack/tests/unit/cloud/test_identity_roles.py new file mode 100644 index 0000000000..467e7bec0f --- /dev/null +++ b/openstack/tests/unit/cloud/test_identity_roles.py @@ -0,0 +1,340 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import testtools +from testtools import matchers + +from openstack import exceptions +from openstack.tests.unit import base + + +RAW_ROLE_ASSIGNMENTS = [ + { + "links": {"assignment": "http://example"}, + "role": {"id": "123456"}, + "scope": {"domain": {"id": "161718"}}, + "user": {"id": "313233"}, + }, + { + "links": {"assignment": "http://example"}, + "group": {"id": "101112"}, + "role": {"id": "123456"}, + "scope": {"project": {"id": "456789"}}, + }, +] + + +class TestIdentityRoles(base.TestCase): + def get_mock_url( + self, + service_type='identity', + interface='public', + resource='roles', + append=None, + base_url_append='v3', + qs_elements=None, + ): + return super().get_mock_url( + service_type, + interface, + resource, + append, + base_url_append, + qs_elements, + ) + + def test_list_roles(self): + role_data = self._get_role_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'roles': [role_data.json_response['role']]}, + ) + ] + ) + self.cloud.list_roles() + self.assert_calls() + + def test_list_role_by_name(self): + role_data = self._get_role_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + qs_elements=[f'name={role_data.role_name}'] + ), + status_code=200, + json={'roles': [role_data.json_response['role']]}, + ) + ] + ) + role = self.cloud.list_roles(name=role_data.role_name)[0] + + self.assertIsNotNone(role) + self.assertThat(role.id, matchers.Equals(role_data.role_id)) + self.assertThat(role.name, matchers.Equals(role_data.role_name)) + self.assert_calls() + + def test_get_role_by_name(self): + role_data = self._get_role_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[role_data.role_name]), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + qs_elements=[f'name={role_data.role_name}'] + ), + status_code=200, + json={'roles': [role_data.json_response['role']]}, + ), + ] + ) + role = self.cloud.get_role(role_data.role_name) + + self.assertIsNotNone(role) + self.assertThat(role.id, matchers.Equals(role_data.role_id)) + self.assertThat(role.name, matchers.Equals(role_data.role_name)) + self.assert_calls() + + def test_get_role_by_id(self): + role_data = self._get_role_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[role_data.role_id]), + status_code=200, + json=role_data.json_response, + ) + ] + ) + role = self.cloud.get_role(role_data.role_id) + + self.assertIsNotNone(role) + self.assertThat(role.id, matchers.Equals(role_data.role_id)) + self.assertThat(role.name, matchers.Equals(role_data.role_name)) + self.assert_calls() + + def test_create_role(self): + role_data = self._get_role_data() + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(), + status_code=200, + json=role_data.json_response, + validate=dict(json=role_data.json_request), + ) + ] + ) + + role = self.cloud.create_role(role_data.role_name) + + self.assertIsNotNone(role) + self.assertThat(role.name, matchers.Equals(role_data.role_name)) + self.assertThat(role.id, matchers.Equals(role_data.role_id)) + self.assert_calls() + + def test_update_role(self): + role_data = self._get_role_data() + req = {'role': {'name': 'new_name'}} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[role_data.role_id]), + status_code=200, + json=role_data.json_response, + ), + dict( + method='PATCH', + uri=self.get_mock_url(append=[role_data.role_id]), + status_code=200, + json=role_data.json_response, + validate=dict(json=req), + ), + ] + ) + + role = self.cloud.update_role(role_data.role_id, 'new_name') + + self.assertIsNotNone(role) + self.assertThat(role.name, matchers.Equals(role_data.role_name)) + self.assertThat(role.id, matchers.Equals(role_data.role_id)) + self.assert_calls() + + def test_delete_role_by_id(self): + role_data = self._get_role_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[role_data.role_id]), + status_code=200, + json=role_data.json_response, + ), + dict( + method='DELETE', + uri=self.get_mock_url(append=[role_data.role_id]), + status_code=204, + ), + ] + ) + role = self.cloud.delete_role(role_data.role_id) + self.assertThat(role, matchers.Equals(True)) + self.assert_calls() + + def test_delete_role_by_name(self): + role_data = self._get_role_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[role_data.role_name]), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + qs_elements=[f'name={role_data.role_name}'] + ), + status_code=200, + json={'roles': [role_data.json_response['role']]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url(append=[role_data.role_id]), + status_code=204, + ), + ] + ) + role = self.cloud.delete_role(role_data.role_name) + self.assertThat(role, matchers.Equals(True)) + self.assert_calls() + + def test_list_role_assignments(self): + domain_data = self._get_domain_data() + user_data = self._get_user_data(domain_id=domain_data.domain_id) + group_data = self._get_group_data(domain_id=domain_data.domain_id) + project_data = self._get_project_data(domain_id=domain_data.domain_id) + role_data = self._get_role_data() + response = [ + { + 'links': 'https://example.com', + 'role': {'id': role_data.role_id}, + 'scope': {'domain': {'id': domain_data.domain_id}}, + 'user': {'id': user_data.user_id}, + }, + { + 'links': 'https://example.com', + 'role': {'id': role_data.role_id}, + 'scope': {'project': {'id': project_data.project_id}}, + 'group': {'id': group_data.group_id}, + }, + ] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='role_assignments'), + status_code=200, + json={'role_assignments': response}, + complete_qs=True, + ) + ] + ) + ret = self.cloud.list_role_assignments() + self.assertThat(len(ret), matchers.Equals(2)) + self.assertThat(ret[0].user['id'], matchers.Equals(user_data.user_id)) + self.assertThat(ret[0].role['id'], matchers.Equals(role_data.role_id)) + self.assertThat( + ret[0].scope['domain']['id'], + matchers.Equals(domain_data.domain_id), + ) + self.assertThat( + ret[1].group['id'], matchers.Equals(group_data.group_id) + ) + self.assertThat(ret[1].role['id'], matchers.Equals(role_data.role_id)) + self.assertThat( + ret[1].scope['project']['id'], + matchers.Equals(project_data.project_id), + ) + + def test_list_role_assignments_filters(self): + domain_data = self._get_domain_data() + user_data = self._get_user_data(domain_id=domain_data.domain_id) + role_data = self._get_role_data() + response = [ + { + 'links': 'https://example.com', + 'role': {'id': role_data.role_id}, + 'scope': {'domain': {'id': domain_data.domain_id}}, + 'user': {'id': user_data.user_id}, + } + ] + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='role_assignments', + qs_elements=[ + f'scope.domain.id={domain_data.domain_id}', + f'user.id={user_data.user_id}', + 'effective=True', + ], + ), + status_code=200, + json={'role_assignments': response}, + complete_qs=True, + ) + ] + ) + params = dict( + user=user_data.user_id, + domain=domain_data.domain_id, + effective=True, + ) + ret = self.cloud.list_role_assignments(filters=params) + self.assertThat(len(ret), matchers.Equals(1)) + self.assertThat(ret[0].user['id'], matchers.Equals(user_data.user_id)) + self.assertThat(ret[0].role['id'], matchers.Equals(role_data.role_id)) + self.assertThat( + ret[0].scope['domain']['id'], + matchers.Equals(domain_data.domain_id), + ) + + def test_list_role_assignments_exception(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(resource='role_assignments'), + status_code=403, + ) + ] + ) + with testtools.ExpectedException(exceptions.ForbiddenException): + self.cloud.list_role_assignments() + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_identity_users.py b/openstack/tests/unit/cloud/test_identity_users.py new file mode 100644 index 0000000000..97e6ca4337 --- /dev/null +++ b/openstack/tests/unit/cloud/test_identity_users.py @@ -0,0 +1,92 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. +from testtools import matchers + +from openstack.tests.unit import base + + +class TestIdentityUsers(base.TestCase): + def get_mock_url( + self, + service_type='identity', + interface='public', + resource='users', + append=None, + base_url_append='v3', + qs_elements=None, + ): + return super().get_mock_url( + service_type, + interface, + resource, + append, + base_url_append, + qs_elements, + ) + + def test_create_user(self): + domain_data = self._get_domain_data() + user_data = self._get_user_data( + "myusername", "mypassword", domain_id=domain_data.domain_id + ) + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(), + status_code=200, + json=user_data.json_response, + validate=dict(json=user_data.json_request), + ) + ] + ) + + user = self.cloud.create_user( + user_data.name, + password=user_data.password, + domain_id=domain_data.domain_id, + ) + + self.assertIsNotNone(user) + self.assertThat(user.name, matchers.Equals(user_data.name)) + self.assert_calls() + + def test_create_user_without_password(self): + domain_data = self._get_domain_data() + user_data = self._get_user_data( + "myusername", domain_id=domain_data.domain_id + ) + user_data._replace( + password=None, + json_request=user_data.json_request["user"].pop("password"), + ) + + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(), + status_code=200, + json=user_data.json_response, + validate=dict(json=user_data.json_request), + ) + ] + ) + + user = self.cloud.create_user( + user_data.name, domain_id=domain_data.domain_id + ) + + self.assertIsNotNone(user) + self.assertThat(user.name, matchers.Equals(user_data.name)) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_image.py b/openstack/tests/unit/cloud/test_image.py new file mode 100644 index 0000000000..e72c39c987 --- /dev/null +++ b/openstack/tests/unit/cloud/test_image.py @@ -0,0 +1,1973 @@ +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import io +import operator +import tempfile +from unittest import mock +import uuid + +from openstack.cloud import meta +from openstack import exceptions +from openstack.image.v1 import image as image_v1 +from openstack.image.v2 import image +from openstack.tests import fakes +from openstack.tests.unit import base + + +IMPORT_METHODS = 'glance-direct,web-download' + + +class BaseTestImage(base.TestCase): + def setUp(self): + super().setUp() + self.image_id = str(uuid.uuid4()) + self.image_name = self.getUniqueString('image') + self.object_name = f'images/{self.image_name}' + self.imagefile = tempfile.NamedTemporaryFile(delete=False) + data = b'\2\0' + self.imagefile.write(data) + self.imagefile.close() + self.output = data + self.fake_image_dict = fakes.make_fake_image( + image_id=self.image_id, + image_name=self.image_name, + data=self.imagefile.name, + ) + self.fake_search_return = {'images': [self.fake_image_dict]} + self.container_name = self.getUniqueString('container') + + def _compare_images(self, exp, real): + self.assertDictEqual( + image.Image(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def _compare_images_v1(self, exp, real): + self.assertDictEqual( + image_v1.Image(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + +class TestImage(BaseTestImage): + def setUp(self): + super().setUp() + self.use_glance() + + def test_download_image_no_output(self): + self.assertRaises( + exceptions.SDKException, + self.cloud.download_image, + self.image_name, + ) + + def test_download_image_two_outputs(self): + fake_fd = io.BytesIO() + self.assertRaises( + exceptions.SDKException, + self.cloud.download_image, + self.image_name, + output_path='fake_path', + output_file=fake_fd, + ) + + def test_download_image_no_images_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=f'https://image.example.com/v2/images/{self.image_name}', + status_code=404, + ), + dict( + method='GET', + uri=f'https://image.example.com/v2/images?name={self.image_name}', + json=dict(images=[]), + ), + dict( + method='GET', + uri='https://image.example.com/v2/images?os_hidden=True', + json=dict(images=[]), + ), + ] + ) + self.assertRaises( + exceptions.NotFoundException, + self.cloud.download_image, + self.image_name, + output_path='fake_path', + ) + self.assert_calls() + + def _register_image_mocks(self): + self.register_uris( + [ + dict( + method='GET', + uri=f'https://image.example.com/v2/images/{self.image_name}', + status_code=404, + ), + dict( + method='GET', + uri=f'https://image.example.com/v2/images?name={self.image_name}', + json=self.fake_search_return, + ), + dict( + method='GET', + uri=f'https://image.example.com/v2/images/{self.image_id}', + json=self.fake_image_dict, + ), + dict( + method='GET', + uri=f'https://image.example.com/v2/images/{self.image_id}/file', + content=self.output, + headers={ + 'Content-Type': 'application/octet-stream', + 'Content-MD5': self.fake_image_dict['checksum'], + }, + ), + ] + ) + + def test_download_image_with_fd(self): + self._register_image_mocks() + output_file = io.BytesIO() + self.cloud.download_image(self.image_name, output_file=output_file) + output_file.seek(0) + self.assertEqual(output_file.read(), self.output) + self.assert_calls() + + def test_download_image_with_path(self): + self._register_image_mocks() + output_file = tempfile.NamedTemporaryFile() + self.cloud.download_image( + self.image_name, output_path=output_file.name + ) + output_file.seek(0) + self.assertEqual(output_file.read(), self.output) + self.assert_calls() + + @mock.patch('openstack.image.v2._proxy.Proxy.find_image') + def test_get_images(self, mock_find): + image1 = dict(id='123', name='mickey') + mock_find.return_value = image1 + r = self.cloud.get_image('mickey') + self.assertIsNotNone(r) + self.assertDictEqual(image1, r) + + @mock.patch('openstack.image.v2._proxy.Proxy.find_image') + def test_get_image_not_found(self, mock_find): + mock_find.return_value = None + r = self.cloud.get_image('doesNotExist') + self.assertIsNone(r) + + def test_get_image_name(self, cloud=None): + cloud = cloud or self.cloud + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json=self.fake_search_return, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json=self.fake_search_return, + ), + ] + ) + + self.assertEqual(self.image_name, cloud.get_image_name(self.image_id)) + self.assertEqual( + self.image_name, cloud.get_image_name(self.image_name) + ) + + self.assert_calls() + + def test_get_image_by_id(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + json=self.fake_image_dict, + ) + ] + ) + self._compare_images( + self.fake_image_dict, self.cloud.get_image_by_id(self.image_id) + ) + self.assert_calls() + + def test_get_image_id(self, cloud=None): + cloud = cloud or self.cloud + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json=self.fake_search_return, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json=self.fake_search_return, + ), + ] + ) + + self.assertEqual(self.image_id, cloud.get_image_id(self.image_id)) + self.assertEqual(self.image_id, cloud.get_image_id(self.image_name)) + + self.assert_calls() + + def test_get_image_name_operator(self): + # This should work the same as non-operator, just verifying it does. + self.test_get_image_name(cloud=self.cloud) + + def test_get_image_id_operator(self): + # This should work the same as the other test, just verifying it does. + self.test_get_image_id(cloud=self.cloud) + + def test_empty_list_images(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json={'images': []}, + ) + ] + ) + self.assertEqual([], self.cloud.list_images()) + self.assert_calls() + + def test_list_images(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json=self.fake_search_return, + ) + ] + ) + [ + self._compare_images(a, b) + for a, b in zip([self.fake_image_dict], self.cloud.list_images()) + ] + self.assert_calls() + + def test_list_images_show_all(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['member_status=all'], + ), + json=self.fake_search_return, + ) + ] + ) + [ + self._compare_images(a, b) + for a, b in zip( + [self.fake_image_dict], self.cloud.list_images(show_all=True) + ) + ] + self.assert_calls() + + def test_list_images_show_all_deleted(self): + deleted_image = self.fake_image_dict.copy() + deleted_image['status'] = 'deleted' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['member_status=all'], + ), + json={'images': [self.fake_image_dict, deleted_image]}, + ) + ] + ) + [ + self._compare_images(a, b) + for a, b in zip( + [self.fake_image_dict], self.cloud.list_images(show_all=True) + ) + ] + self.assert_calls() + + def test_list_images_no_filter_deleted(self): + deleted_image = self.fake_image_dict.copy() + deleted_image['status'] = 'deleted' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json={'images': [self.fake_image_dict, deleted_image]}, + ) + ] + ) + [ + self._compare_images(a, b) + for a, b in zip( + [self.fake_image_dict], + self.cloud.list_images(filter_deleted=False), + ) + ] + self.assert_calls() + + def test_list_images_filter_deleted(self): + deleted_image = self.fake_image_dict.copy() + deleted_image['status'] = 'deleted' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json={'images': [self.fake_image_dict, deleted_image]}, + ) + ] + ) + [ + self._compare_images(a, b) + for a, b in zip([self.fake_image_dict], self.cloud.list_images()) + ] + self.assert_calls() + + def test_list_images_string_properties(self): + image_dict = self.fake_image_dict.copy() + image_dict['properties'] = 'list,of,properties' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json={'images': [image_dict]}, + ), + ] + ) + images = self.cloud.list_images() + [self._compare_images(a, b) for a, b in zip([image_dict], images)] + + self.assertEqual( + images[0]['properties']['properties'], 'list,of,properties' + ) + self.assert_calls() + + def test_list_images_paginated(self): + marker = str(uuid.uuid4()) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json={ + 'images': [self.fake_image_dict], + 'next': f'/v2/images?marker={marker}', + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=[f'marker={marker}'], + ), + json=self.fake_search_return, + ), + ] + ) + [ + self._compare_images(a, b) + for a, b in zip([self.fake_image_dict], self.cloud.list_images()) + ] + self.assert_calls() + + def test_create_image_put_v2_no_import(self): + self.cloud.image_api_use_tasks = False + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_name], + base_url_append='v2', + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['name=' + self.image_name], + ), + validate=dict(), + json={'images': []}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['os_hidden=True'], + ), + json={'images': []}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json=self.fake_image_dict, + validate=dict( + json={ + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'name': self.image_name, + 'owner_specified.openstack.md5': self.fake_image_dict[ # noqa: E501 + 'owner_specified.openstack.md5' + ], + 'owner_specified.openstack.object': self.object_name, # noqa: E501 + 'owner_specified.openstack.sha256': self.fake_image_dict[ # noqa: E501 + 'owner_specified.openstack.sha256' + ], + 'visibility': 'private', + 'tags': ['tag1', 'tag2'], + } + ), + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id, 'file'], + base_url_append='v2', + ), + request_headers={ + 'Content-Type': 'application/octet-stream' + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.fake_image_dict['id']], + base_url_append='v2', + ), + json=self.fake_image_dict, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.fake_image_dict['id']], + base_url_append='v2', + ), + complete_qs=True, + json=self.fake_image_dict, + ), + ] + ) + + self.cloud.create_image( + self.image_name, + self.imagefile.name, + wait=True, + timeout=1, + tags=['tag1', 'tag2'], + is_public=False, + validate_checksum=True, + ) + + self.assert_calls() + self.assertEqual( + self.adapter.request_history[7].text.read(), self.output + ) + + def test_create_image_put_v2_import_supported(self): + self.cloud.image_api_use_tasks = False + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_name], + base_url_append='v2', + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['name=' + self.image_name], + ), + validate=dict(), + json={'images': []}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['os_hidden=True'], + ), + json={'images': []}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json=self.fake_image_dict, + headers={ + 'OpenStack-image-import-methods': IMPORT_METHODS, + }, + validate=dict( + json={ + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'name': self.image_name, + 'owner_specified.openstack.md5': self.fake_image_dict[ # noqa: E501 + 'owner_specified.openstack.md5' + ], + 'owner_specified.openstack.object': self.object_name, # noqa: E501 + 'owner_specified.openstack.sha256': self.fake_image_dict[ # noqa: E501 + 'owner_specified.openstack.sha256' + ], + 'visibility': 'private', + 'tags': ['tag1', 'tag2'], + } + ), + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id, 'file'], + base_url_append='v2', + ), + request_headers={ + 'Content-Type': 'application/octet-stream' + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.fake_image_dict['id']], + base_url_append='v2', + ), + json=self.fake_image_dict, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.fake_image_dict['id']], + base_url_append='v2', + ), + complete_qs=True, + json=self.fake_image_dict, + ), + ] + ) + + self.cloud.create_image( + self.image_name, + self.imagefile.name, + wait=True, + timeout=1, + tags=['tag1', 'tag2'], + is_public=False, + validate_checksum=True, + ) + + self.assert_calls() + self.assertEqual( + self.adapter.request_history[7].text.read(), self.output + ) + + def test_create_image_use_import(self): + self.cloud.image_api_use_tasks = False + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_name], + base_url_append='v2', + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['name=' + self.image_name], + ), + validate=dict(), + json={'images': []}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['os_hidden=True'], + ), + json={'images': []}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json=self.fake_image_dict, + headers={ + 'OpenStack-image-import-methods': IMPORT_METHODS, + }, + validate=dict( + json={ + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'name': self.image_name, + 'owner_specified.openstack.md5': self.fake_image_dict[ # noqa: E501 + 'owner_specified.openstack.md5' + ], + 'owner_specified.openstack.object': self.object_name, # noqa: E501 + 'owner_specified.openstack.sha256': self.fake_image_dict[ # noqa: E501 + 'owner_specified.openstack.sha256' + ], + 'visibility': 'private', + 'tags': ['tag1', 'tag2'], + } + ), + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id, 'stage'], + base_url_append='v2', + ), + request_headers={ + 'Content-Type': 'application/octet-stream' + }, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id, 'import'], + base_url_append='v2', + ), + json={'method': {'name': 'glance-direct'}}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.fake_image_dict['id']], + base_url_append='v2', + ), + json=self.fake_image_dict, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.fake_image_dict['id']], + base_url_append='v2', + ), + complete_qs=True, + json=self.fake_image_dict, + ), + ] + ) + + self.cloud.create_image( + self.image_name, + self.imagefile.name, + wait=True, + timeout=1, + tags=['tag1', 'tag2'], + is_public=False, + validate_checksum=True, + use_import=True, + import_method='glance-direct', + ) + + self.assert_calls() + self.assertEqual( + self.adapter.request_history[7].text.read(), self.output + ) + + def test_create_image_task(self): + self.cloud.image_api_use_tasks = True + endpoint = self.cloud.object_store.get_endpoint() + + task_id = str(uuid.uuid4()) + args = dict( + id=task_id, + status='success', + type='import', + result={ + 'image_id': self.image_id, + }, + ) + + image_no_checksums = self.fake_image_dict.copy() + del image_no_checksums['owner_specified.openstack.md5'] + del image_no_checksums['owner_specified.openstack.sha256'] + del image_no_checksums['owner_specified.openstack.object'] + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_name], + base_url_append='v2', + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['name=' + self.image_name], + ), + validate=dict(), + json={'images': []}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['os_hidden=True'], + ), + json={'images': []}, + ), + dict( + method='HEAD', + uri=f'{endpoint}/{self.container_name}', + status_code=404, + ), + dict( + method='PUT', + uri=f'{endpoint}/{self.container_name}', + status_code=201, + headers={ + 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', + 'Content-Length': '0', + 'Content-Type': 'text/html; charset=UTF-8', + }, + ), + dict( + method='HEAD', + uri=f'{endpoint}/{self.container_name}', + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8', + }, + ), + dict( + method='GET', + # This is explicitly not using get_mock_url because that + # gets us a project-id oriented URL. + uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': 1000}, + slo={'min_segment_size': 500}, + ), + ), + dict( + method='HEAD', + uri=f'{endpoint}/{self.container_name}/{self.image_name}', + status_code=404, + ), + dict( + method='PUT', + uri=f'{endpoint}/{self.container_name}/{self.image_name}', + status_code=201, + validate=dict( + headers={ + 'X-Object-Meta-x-sdk-md5': self.fake_image_dict[ + 'owner_specified.openstack.md5' + ], + 'X-Object-Meta-x-sdk-sha256': self.fake_image_dict[ + 'owner_specified.openstack.sha256' + ], + } + ), + ), + dict( + method='POST', + uri=self.get_mock_url( + 'image', append=['tasks'], base_url_append='v2' + ), + json={'id': task_id, 'status': 'processing'}, + validate=dict( + json=dict( + type='import', + input={ + 'import_from': f'{self.container_name}/{self.image_name}', # noqa: E501 + 'image_properties': {'name': self.image_name}, + }, + ) + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['tasks', task_id], + base_url_append='v2', + ), + status_code=503, + text='Random error', + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['tasks', task_id], + base_url_append='v2', + ), + json=args, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + json=image_no_checksums, + ), + dict( + method='PATCH', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + validate=dict( + json=sorted( + [ + { + 'op': 'add', + 'value': f'{self.container_name}/{self.image_name}', # noqa: E501 + 'path': '/owner_specified.openstack.object', # noqa: E501 + }, + { + 'op': 'add', + 'value': self.fake_image_dict[ + 'owner_specified.openstack.md5' + ], + 'path': '/owner_specified.openstack.md5', + }, + { + 'op': 'add', + 'value': self.fake_image_dict[ + 'owner_specified.openstack.sha256' + ], + 'path': '/owner_specified.openstack.sha256', # noqa: E501 + }, + ], + key=operator.itemgetter('path'), + ), + headers={ + 'Content-Type': 'application/openstack-images-v2.1-json-patch' # noqa: E501 + }, + ), + json=self.fake_search_return, + ), + dict( + method='HEAD', + uri=f'{endpoint}/{self.container_name}/{self.image_name}', + headers={ + 'X-Timestamp': '1429036140.50253', + 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', + 'Content-Length': '1290170880', + 'Last-Modified': 'Tue, 14 Apr 2015 18:29:01 GMT', + 'X-Object-Meta-X-Sdk-Sha256': self.fake_image_dict[ + 'owner_specified.openstack.sha256' + ], + 'X-Object-Meta-X-Sdk-Md5': self.fake_image_dict[ + 'owner_specified.openstack.md5' + ], + 'Date': 'Thu, 16 Nov 2017 15:24:30 GMT', + 'Accept-Ranges': 'bytes', + 'Content-Type': 'application/octet-stream', + 'Etag': fakes.NO_MD5, + }, + ), + dict( + method='DELETE', + uri=f'{endpoint}/{self.container_name}/{self.image_name}', + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + complete_qs=True, + json=image_no_checksums, + ), + ] + ) + + self.cloud.create_image( + self.image_name, + self.imagefile.name, + wait=True, + timeout=1, + disk_format='vhd', + container_format='ovf', + is_public=False, + validate_checksum=True, + container=self.container_name, + ) + + self.assert_calls() + + def test_delete_autocreated_no_tasks(self): + self.use_keystone_v3() + self.cloud.image_api_use_tasks = False + deleted = self.cloud.delete_autocreated_image_objects( + container=self.container_name + ) + self.assertFalse(deleted) + self.assert_calls([]) + + def test_delete_image_task(self): + self.cloud.image_api_use_tasks = True + endpoint = self.cloud.object_store.get_endpoint() + + object_path = self.fake_image_dict['owner_specified.openstack.object'] + + image_no_checksums = self.fake_image_dict.copy() + del image_no_checksums['owner_specified.openstack.md5'] + del image_no_checksums['owner_specified.openstack.sha256'] + del image_no_checksums['owner_specified.openstack.object'] + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + json=self.fake_image_dict, + ), + dict( + method='DELETE', + uri=f'https://image.example.com/v2/images/{self.image_id}', + ), + dict( + method='HEAD', + uri=f'{endpoint}/{object_path}', + headers={ + 'X-Timestamp': '1429036140.50253', + 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', + 'Content-Length': '1290170880', + 'Last-Modified': 'Tue, 14 Apr 2015 18:29:01 GMT', + 'X-Object-Meta-X-Sdk-Sha256': self.fake_image_dict[ + 'owner_specified.openstack.sha256' + ], + 'X-Object-Meta-X-Sdk-Md5': self.fake_image_dict[ + 'owner_specified.openstack.md5' + ], + 'Date': 'Thu, 16 Nov 2017 15:24:30 GMT', + 'Accept-Ranges': 'bytes', + 'Content-Type': 'application/octet-stream', + 'Etag': fakes.NO_MD5, + }, + ), + dict( + method='DELETE', + uri=f'{endpoint}/{object_path}', + ), + ] + ) + + self.cloud.delete_image(self.image_id) + + self.assert_calls() + + def test_delete_autocreated_image_objects(self): + self.use_keystone_v3() + self.cloud.image_api_use_tasks = True + endpoint = self.cloud.object_store.get_endpoint() + other_image = self.getUniqueString('no-delete') + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + service_type='object-store', + resource=self.container_name, + qs_elements=['format=json'], + ), + json=[ + { + 'content_type': 'application/octet-stream', + 'bytes': 1437258240, + 'hash': '249219347276c331b87bf1ac2152d9af', + 'last_modified': '2015-02-16T17:50:05.289600', + 'name': other_image, + }, + { + 'content_type': 'application/octet-stream', + 'bytes': 1290170880, + 'hash': fakes.NO_MD5, + 'last_modified': '2015-04-14T18:29:00.502530', + 'name': self.image_name, + }, + ], + ), + dict( + method='HEAD', + uri=self.get_mock_url( + service_type='object-store', + resource=self.container_name, + append=[other_image], + ), + headers={ + 'X-Timestamp': '1429036140.50253', + 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', + 'Content-Length': '1290170880', + 'Last-Modified': 'Tue, 14 Apr 2015 18:29:01 GMT', + 'X-Object-Meta-X-Shade-Sha256': 'does not matter', + 'X-Object-Meta-X-Shade-Md5': 'does not matter', + 'Date': 'Thu, 16 Nov 2017 15:24:30 GMT', + 'Accept-Ranges': 'bytes', + 'Content-Type': 'application/octet-stream', + 'Etag': '249219347276c331b87bf1ac2152d9af', + }, + ), + dict( + method='HEAD', + uri=self.get_mock_url( + service_type='object-store', + resource=self.container_name, + append=[self.image_name], + ), + headers={ + 'X-Timestamp': '1429036140.50253', + 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', + 'Content-Length': '1290170880', + 'Last-Modified': 'Tue, 14 Apr 2015 18:29:01 GMT', + 'X-Object-Meta-X-Shade-Sha256': fakes.NO_SHA256, + 'X-Object-Meta-X-Shade-Md5': fakes.NO_MD5, + 'Date': 'Thu, 16 Nov 2017 15:24:30 GMT', + 'Accept-Ranges': 'bytes', + 'Content-Type': 'application/octet-stream', + ( + 'X-Object-Meta-' + + self.cloud._OBJECT_AUTOCREATE_KEY + ): 'true', + 'Etag': fakes.NO_MD5, + 'X-Static-Large-Object': 'false', + }, + ), + dict( + method='DELETE', + uri=f'{endpoint}/{self.container_name}/{self.image_name}', + ), + ] + ) + + deleted = self.cloud.delete_autocreated_image_objects( + container=self.container_name + ) + self.assertTrue(deleted) + + self.assert_calls() + + def _image_dict(self, fake_image): + return self.cloud._normalize_image(meta.obj_to_munch(fake_image)) + + def _call_create_image(self, name, **kwargs): + imagefile = tempfile.NamedTemporaryFile(delete=False) + imagefile.write(b'\0') + imagefile.close() + self.cloud.create_image( + name, + imagefile.name, + wait=True, + timeout=1, + is_public=False, + validate_checksum=True, + **kwargs, + ) + + def test_create_image_put_v1(self): + self.cloud.config.config['image_api_version'] = '1' + + args = { + 'name': self.image_name, + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'properties': { + 'owner_specified.openstack.md5': fakes.NO_MD5, + 'owner_specified.openstack.sha256': fakes.NO_SHA256, + 'owner_specified.openstack.object': f'images/{self.image_name}', # noqa: E501 + 'is_public': False, + }, + } + + ret = args.copy() + ret['id'] = self.image_id + ret['status'] = 'success' + + self.register_uris( + [ + dict( + method='GET', + uri='https://image.example.com/v1/images/' + + self.image_name, + status_code=404, + ), + dict( + method='GET', + uri='https://image.example.com/v1/images/detail?name=' + + self.image_name, + json={'images': []}, + ), + dict( + method='POST', + uri='https://image.example.com/v1/images', + json={'image': ret}, + validate=dict(json=args), + ), + dict( + method='PUT', + uri=f'https://image.example.com/v1/images/{self.image_id}', + json=ret, + validate=dict( + headers={ + 'x-image-meta-checksum': fakes.NO_MD5, + 'x-glance-registry-purge-props': 'false', + } + ), + ), + dict( + method='GET', + uri='https://image.example.com/v1/images/detail', + json={'images': [ret]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v1', + ), + complete_qs=True, + json=ret, + ), + ] + ) + self._call_create_image(self.image_name) + [ + self._compare_images_v1(b, a) + for a, b in zip(self.cloud.list_images(), [ret]) + ] + + def test_create_image_put_v1_bad_delete(self): + self.cloud.config.config['image_api_version'] = '1' + + args = { + 'name': self.image_name, + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'properties': { + 'owner_specified.openstack.md5': fakes.NO_MD5, + 'owner_specified.openstack.sha256': fakes.NO_SHA256, + 'owner_specified.openstack.object': f'images/{self.image_name}', # noqa: E501 + 'is_public': False, + }, + 'validate_checksum': True, + } + + ret = args.copy() + ret['id'] = self.image_id + ret['status'] = 'success' + + self.register_uris( + [ + dict( + method='GET', + uri='https://image.example.com/v1/images/' + + self.image_name, + status_code=404, + ), + dict( + method='GET', + uri='https://image.example.com/v1/images/detail?name=' + + self.image_name, + json={'images': []}, + ), + dict( + method='POST', + uri='https://image.example.com/v1/images', + json={'image': ret}, + validate=dict(json=args), + ), + dict( + method='PUT', + uri=f'https://image.example.com/v1/images/{self.image_id}', + status_code=400, + validate=dict( + headers={ + 'x-image-meta-checksum': fakes.NO_MD5, + 'x-glance-registry-purge-props': 'false', + } + ), + ), + dict( + method='DELETE', + uri=f'https://image.example.com/v1/images/{self.image_id}', + json={'images': [ret]}, + ), + ] + ) + + self.assertRaises( + exceptions.HttpException, + self._call_create_image, + self.image_name, + ) + + self.assert_calls() + + def test_update_image_no_patch(self): + self.cloud.image_api_use_tasks = False + + args = { + 'name': self.image_name, + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'owner_specified.openstack.md5': fakes.NO_MD5, + 'owner_specified.openstack.sha256': fakes.NO_SHA256, + 'owner_specified.openstack.object': f'images/{self.image_name}', + 'visibility': 'private', + } + + ret = args.copy() + ret['id'] = self.image_id + ret['status'] = 'success' + + self.cloud.update_image_properties( + image=image.Image.existing(**ret), + **{ + 'owner_specified.openstack.object': f'images/{self.image_name}' + }, + ) + + self.assert_calls() + + def test_create_image_put_v2_bad_delete(self): + self.cloud.image_api_use_tasks = False + + args = { + 'name': self.image_name, + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'owner_specified.openstack.md5': fakes.NO_MD5, + 'owner_specified.openstack.sha256': fakes.NO_SHA256, + 'owner_specified.openstack.object': f'images/{self.image_name}', + 'visibility': 'private', + } + + ret = args.copy() + ret['id'] = self.image_id + ret['status'] = 'success' + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_name], + base_url_append='v2', + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['name=' + self.image_name], + ), + validate=dict(), + json={'images': []}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['os_hidden=True'], + ), + json={'images': []}, + ), + dict( + method='POST', + uri='https://image.example.com/v2/images', + json=ret, + validate=dict(json=args), + ), + dict( + method='PUT', + uri=f'https://image.example.com/v2/images/{self.image_id}/file', + status_code=400, + validate=dict( + headers={ + 'Content-Type': 'application/octet-stream', + }, + ), + ), + dict( + method='DELETE', + uri=f'https://image.example.com/v2/images/{self.image_id}', + ), + ] + ) + + self.assertRaises( + exceptions.HttpException, + self._call_create_image, + self.image_name, + ) + + self.assert_calls() + + def test_create_image_put_v2_wrong_checksum_delete(self): + self.cloud.image_api_use_tasks = False + + fake_image = self.fake_image_dict + + fake_image['owner_specified.openstack.md5'] = 'a' + fake_image['owner_specified.openstack.sha256'] = 'b' + + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json=self.fake_image_dict, + validate=dict( + json={ + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'name': self.image_name, + 'owner_specified.openstack.md5': fake_image[ + 'owner_specified.openstack.md5' + ], + 'owner_specified.openstack.object': self.object_name, # noqa: E501 + 'owner_specified.openstack.sha256': fake_image[ + 'owner_specified.openstack.sha256' + ], + 'visibility': 'private', + } + ), + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id, 'file'], + base_url_append='v2', + ), + request_headers={ + 'Content-Type': 'application/octet-stream' + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.fake_image_dict['id']], + base_url_append='v2', + ), + json=fake_image, + ), + dict( + method='DELETE', + uri=f'https://image.example.com/v2/images/{self.image_id}', + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.create_image, + self.image_name, + self.imagefile.name, + is_public=False, + md5='a', + sha256='b', + allow_duplicates=True, + validate_checksum=True, + ) + + self.assert_calls() + + def test_create_image_put_bad_int(self): + self.cloud.image_api_use_tasks = False + + self.assertRaises( + exceptions.SDKException, + self._call_create_image, + self.image_name, + allow_duplicates=True, + min_disk='fish', + min_ram=0, + ) + + self.assert_calls() + + def test_create_image_put_user_int(self): + self.cloud.image_api_use_tasks = False + + args = { + 'name': self.image_name, + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'owner_specified.openstack.md5': fakes.NO_MD5, + 'owner_specified.openstack.sha256': fakes.NO_SHA256, + 'owner_specified.openstack.object': f'images/{self.image_name}', + 'int_v': '12345', + 'visibility': 'private', + 'min_disk': 0, + 'min_ram': 0, + } + + ret = args.copy() + ret['id'] = self.image_id + ret['status'] = 'success' + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_name], + base_url_append='v2', + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['name=' + self.image_name], + ), + validate=dict(), + json={'images': []}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['os_hidden=True'], + ), + json={'images': []}, + ), + dict( + method='POST', + uri='https://image.example.com/v2/images', + json=ret, + validate=dict(json=args), + ), + dict( + method='PUT', + uri=f'https://image.example.com/v2/images/{self.image_id}/file', + validate=dict( + headers={ + 'Content-Type': 'application/octet-stream', + }, + ), + ), + dict( + method='GET', + uri=f'https://image.example.com/v2/images/{self.image_id}', + json=ret, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + complete_qs=True, + json=ret, + ), + ] + ) + + self._call_create_image( + self.image_name, min_disk='0', min_ram=0, int_v=12345 + ) + + self.assert_calls() + + def test_create_image_put_meta_int(self): + self.cloud.image_api_use_tasks = False + + args = { + 'name': self.image_name, + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'owner_specified.openstack.md5': fakes.NO_MD5, + 'owner_specified.openstack.sha256': fakes.NO_SHA256, + 'owner_specified.openstack.object': f'images/{self.image_name}', + 'int_v': 12345, + 'visibility': 'private', + 'min_disk': 0, + 'min_ram': 0, + } + + ret = args.copy() + ret['id'] = self.image_id + ret['status'] = 'success' + ret['checksum'] = fakes.NO_MD5 + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_name], + base_url_append='v2', + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['name=' + self.image_name], + ), + validate=dict(), + json={'images': []}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['os_hidden=True'], + ), + json={'images': []}, + ), + dict( + method='POST', + uri='https://image.example.com/v2/images', + json=ret, + validate=dict(json=args), + ), + dict( + method='PUT', + uri=f'https://image.example.com/v2/images/{self.image_id}/file', + validate=dict( + headers={ + 'Content-Type': 'application/octet-stream', + }, + ), + ), + dict( + method='GET', + uri=f'https://image.example.com/v2/images/{self.image_id}', + json=ret, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + complete_qs=True, + json=ret, + ), + ] + ) + + self._call_create_image( + self.image_name, min_disk='0', min_ram=0, meta={'int_v': 12345} + ) + + self.assert_calls() + + def test_create_image_put_protected(self): + self.cloud.image_api_use_tasks = False + + args = { + 'name': self.image_name, + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'owner_specified.openstack.md5': fakes.NO_MD5, + 'owner_specified.openstack.sha256': fakes.NO_SHA256, + 'owner_specified.openstack.object': f'images/{self.image_name}', + 'int_v': '12345', + 'protected': False, + 'visibility': 'private', + 'min_disk': 0, + 'min_ram': 0, + } + + ret = args.copy() + ret['id'] = self.image_id + ret['status'] = 'success' + ret['checksum'] = fakes.NO_MD5 + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_name], + base_url_append='v2', + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['name=' + self.image_name], + ), + validate=dict(), + json={'images': []}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=['os_hidden=True'], + ), + json={'images': []}, + ), + dict( + method='POST', + uri='https://image.example.com/v2/images', + json=ret, + validate=dict(json=args), + ), + dict( + method='PUT', + uri=f'https://image.example.com/v2/images/{self.image_id}/file', + validate=dict( + headers={ + 'Content-Type': 'application/octet-stream', + }, + ), + ), + dict( + method='GET', + uri=f'https://image.example.com/v2/images/{self.image_id}', + json=ret, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + complete_qs=True, + json=ret, + ), + ] + ) + + self._call_create_image( + self.image_name, + min_disk='0', + min_ram=0, + properties={'int_v': 12345}, + is_protected=False, + ) + + self.assert_calls() + + +class TestImageSuburl(BaseTestImage): + def setUp(self): + super().setUp() + self.os_fixture.use_suburl() + self.os_fixture.build_tokens() + self.use_keystone_v3() + self.use_glance( + image_version_json='image-version-suburl.json', + image_discovery_url='https://example.com/image', + ) + + def test_list_images(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json=self.fake_search_return, + ) + ] + ) + [ + self._compare_images(b, a) + for a, b in zip(self.cloud.list_images(), [self.fake_image_dict]) + ] + self.assert_calls() + + def test_list_images_paginated(self): + marker = str(uuid.uuid4()) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'image', append=['images'], base_url_append='v2' + ), + json={ + 'images': [self.fake_image_dict], + 'next': f'/v2/images?marker={marker}', + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images'], + base_url_append='v2', + qs_elements=[f'marker={marker}'], + ), + json=self.fake_search_return, + ), + ] + ) + [ + self._compare_images(b, a) + for a, b in zip( + self.cloud.list_images(), + [self.fake_image_dict, self.fake_image_dict], + ) + ] + self.assert_calls() + + +class TestImageVolume(BaseTestImage): + def setUp(self): + super().setUp() + self.volume_id = str(uuid.uuid4()) + + def test_create_image_volume(self): + self.register_uris( + [ + self.get_cinder_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', + append=['volumes', self.volume_id, 'action'], + ), + json={ + 'os-volume_upload_image': {'image_id': self.image_id} + }, + validate=dict( + json={ + 'os-volume_upload_image': { + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'force': False, + 'image_name': 'fake_image', + } + } + ), + ), + # NOTE(notmorgan): Glance discovery happens here, insert the + # glance discovery mock at this point, DO NOT use the + # .use_glance() method, that is intended only for use in + # .setUp + self.get_glance_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + json=self.fake_image_dict, + ), + ] + ) + + self.cloud.create_image( + 'fake_image', + self.imagefile.name, + wait=True, + timeout=1, + volume={'id': self.volume_id}, + ) + + self.assert_calls() + + def test_create_image_volume_duplicate(self): + self.register_uris( + [ + self.get_cinder_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', + append=['volumes', self.volume_id, 'action'], + ), + json={ + 'os-volume_upload_image': {'image_id': self.image_id} + }, + validate=dict( + json={ + 'os-volume_upload_image': { + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'force': True, + 'image_name': 'fake_image', + } + } + ), + ), + # NOTE(notmorgan): Glance discovery happens here, insert the + # glance discovery mock at this point, DO NOT use the + # .use_glance() method, that is intended only for use in + # .setUp + self.get_glance_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + json=self.fake_image_dict, + ), + ] + ) + + self.cloud.create_image( + 'fake_image', + self.imagefile.name, + wait=True, + timeout=1, + volume={'id': self.volume_id}, + allow_duplicates=True, + ) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_image_snapshot.py b/openstack/tests/unit/cloud/test_image_snapshot.py new file mode 100644 index 0000000000..5d7c64c63a --- /dev/null +++ b/openstack/tests/unit/cloud/test_image_snapshot.py @@ -0,0 +1,133 @@ +# Copyright 2016 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestImageSnapshot(base.TestCase): + def setUp(self): + super().setUp() + self.server_id = str(uuid.uuid4()) + self.image_id = str(uuid.uuid4()) + self.server_name = self.getUniqueString('name') + self.fake_server = fakes.make_fake_server( + self.server_id, self.server_name + ) + + def test_create_image_snapshot_wait_until_active_never_active(self): + snapshot_name = 'test-snapshot' + pending_image = fakes.make_fake_image(self.image_id, status='pending') + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action', + headers=dict( + Location='{endpoint}/images/{image_id}'.format( + endpoint='https://images.example.com', + image_id=self.image_id, + ) + ), + validate=dict( + json={ + "createImage": { + "name": snapshot_name, + "metadata": {}, + } + } + ), + ), + self.get_glance_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + json=pending_image, + ), + ] + ) + + self.assertRaises( + exceptions.ResourceTimeout, + self.cloud.create_image_snapshot, + snapshot_name, + dict(id=self.server_id), + wait=True, + timeout=0.01, + ) + + # After the fifth call, we just keep polling get images for status. + # Due to mocking sleep, we have no clue how many times we'll call it. + self.assert_calls(stop_after=5, do_count=False) + + def test_create_image_snapshot_wait_active(self): + snapshot_name = 'test-snapshot' + pending_image = fakes.make_fake_image(self.image_id, status='pending') + fake_image = fakes.make_fake_image(self.image_id) + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action', + headers=dict( + Location='{endpoint}/images/{image_id}'.format( + endpoint='https://images.example.com', + image_id=self.image_id, + ) + ), + validate=dict( + json={ + "createImage": { + "name": snapshot_name, + "metadata": {}, + } + } + ), + ), + self.get_glance_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + json=pending_image, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'image', + append=['images', self.image_id], + base_url_append='v2', + ), + json=fake_image, + ), + ] + ) + image = self.cloud.create_image_snapshot( + 'test-snapshot', dict(id=self.server_id), wait=True, timeout=2 + ) + self.assertEqual(image['id'], self.image_id) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_inventory.py b/openstack/tests/unit/cloud/test_inventory.py new file mode 100644 index 0000000000..b2c65a5e25 --- /dev/null +++ b/openstack/tests/unit/cloud/test_inventory.py @@ -0,0 +1,147 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.cloud import inventory +import openstack.config +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestInventory(base.TestCase): + def setUp(self): + super().setUp() + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.connection.Connection") + def test__init(self, mock_cloud, mock_config): + mock_config.return_value.get_all.return_value = [{}] + + inv = inventory.OpenStackInventory() + + mock_config.assert_called_once_with( + config_files=openstack.config.loader.CONFIG_FILES + ) + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + self.assertTrue(mock_config.return_value.get_all.called) + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.connection.Connection") + def test__init_one_cloud(self, mock_cloud, mock_config): + mock_config.return_value.get_one.return_value = [{}] + + inv = inventory.OpenStackInventory(cloud='supercloud') + + mock_config.assert_called_once_with( + config_files=openstack.config.loader.CONFIG_FILES + ) + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + self.assertFalse(mock_config.return_value.get_all.called) + mock_config.return_value.get_one.assert_called_once_with('supercloud') + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.connection.Connection") + def test_list_hosts(self, mock_cloud, mock_config): + mock_config.return_value.get_all.return_value = [{}] + + inv = inventory.OpenStackInventory() + + server = dict(id='server_id', name='server_name') + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + inv.clouds[0].list_servers.return_value = [server] + inv.clouds[0].get_openstack_vars.return_value = server + + ret = inv.list_hosts() + + inv.clouds[0].list_servers.assert_called_once_with( + detailed=True, all_projects=False + ) + self.assertFalse(inv.clouds[0].get_openstack_vars.called) + self.assertEqual([server], ret) + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.connection.Connection") + def test_list_hosts_no_detail(self, mock_cloud, mock_config): + mock_config.return_value.get_all.return_value = [{}] + + inv = inventory.OpenStackInventory() + + server = self.cloud._normalize_server( + fakes.make_fake_server('1234', 'test', 'ACTIVE', addresses={}) + ) + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + inv.clouds[0].list_servers.return_value = [server] + + inv.list_hosts(expand=False) + + inv.clouds[0].list_servers.assert_called_once_with( + detailed=False, all_projects=False + ) + self.assertFalse(inv.clouds[0].get_openstack_vars.called) + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.connection.Connection") + def test_list_hosts_all_projects(self, mock_cloud, mock_config): + mock_config.return_value.get_all.return_value = [{}] + + inv = inventory.OpenStackInventory() + + server = dict(id='server_id', name='server_name') + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + inv.clouds[0].list_servers.return_value = [server] + inv.clouds[0].get_openstack_vars.return_value = server + + ret = inv.list_hosts(all_projects=True) + + inv.clouds[0].list_servers.assert_called_once_with( + detailed=True, all_projects=True + ) + self.assertFalse(inv.clouds[0].get_openstack_vars.called) + self.assertEqual([server], ret) + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.connection.Connection") + def test_search_hosts(self, mock_cloud, mock_config): + mock_config.return_value.get_all.return_value = [{}] + + inv = inventory.OpenStackInventory() + + server = dict(id='server_id', name='server_name') + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + inv.clouds[0].list_servers.return_value = [server] + inv.clouds[0].get_openstack_vars.return_value = server + + ret = inv.search_hosts('server_id') + self.assertEqual([server], ret) + + @mock.patch("openstack.config.loader.OpenStackConfig") + @mock.patch("openstack.connection.Connection") + def test_get_host(self, mock_cloud, mock_config): + mock_config.return_value.get_all.return_value = [{}] + + inv = inventory.OpenStackInventory() + + server = dict(id='server_id', name='server_name') + self.assertIsInstance(inv.clouds, list) + self.assertEqual(1, len(inv.clouds)) + inv.clouds[0].list_servers.return_value = [server] + inv.clouds[0].get_openstack_vars.return_value = server + + ret = inv.get_host('server_id') + self.assertEqual(server, ret) diff --git a/openstack/tests/unit/cloud/test_keypair.py b/openstack/tests/unit/cloud/test_keypair.py new file mode 100644 index 0000000000..79694f4046 --- /dev/null +++ b/openstack/tests/unit/cloud/test_keypair.py @@ -0,0 +1,199 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import fixtures + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestKeypair(base.TestCase): + def setUp(self): + super().setUp() + self.keyname = self.getUniqueString('key') + self.key = fakes.make_fake_keypair(self.keyname) + self.useFixture( + fixtures.MonkeyPatch( + 'openstack.utils.maximum_supported_microversion', + lambda *args, **kwargs: '2.10', + ) + ) + + def test_create_keypair(self): + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['os-keypairs'] + ), + json={'keypair': self.key}, + validate=dict( + json={ + 'keypair': { + 'name': self.key['name'], + 'public_key': self.key['public_key'], + } + } + ), + ), + ] + ) + + new_key = self.cloud.create_keypair( + self.keyname, self.key['public_key'] + ) + new_key_cmp = new_key.to_dict(ignore_none=True) + new_key_cmp.pop('location') + new_key_cmp.pop('id') + self.assertEqual(new_key_cmp, self.key) + + self.assert_calls() + + def test_create_keypair_exception(self): + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['os-keypairs'] + ), + status_code=400, + validate=dict( + json={ + 'keypair': { + 'name': self.key['name'], + 'public_key': self.key['public_key'], + } + } + ), + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.create_keypair, + self.keyname, + self.key['public_key'], + ) + + self.assert_calls() + + def test_delete_keypair(self): + self.register_uris( + [ + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-keypairs', self.keyname], + ), + status_code=202, + ), + ] + ) + self.assertTrue(self.cloud.delete_keypair(self.keyname)) + + self.assert_calls() + + def test_delete_keypair_not_found(self): + self.register_uris( + [ + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-keypairs', self.keyname], + ), + status_code=404, + ), + ] + ) + self.assertFalse(self.cloud.delete_keypair(self.keyname)) + + self.assert_calls() + + def test_list_keypairs(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-keypairs'] + ), + json={'keypairs': [{'keypair': self.key}]}, + ), + ] + ) + keypairs = self.cloud.list_keypairs() + self.assertEqual(len(keypairs), 1) + self.assertEqual(keypairs[0].name, self.key['name']) + self.assert_calls() + + def test_list_keypairs_empty_filters(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-keypairs'] + ), + json={'keypairs': [{'keypair': self.key}]}, + ), + ] + ) + keypairs = self.cloud.list_keypairs(filters=None) + self.assertEqual(len(keypairs), 1) + self.assertEqual(keypairs[0].name, self.key['name']) + self.assert_calls() + + def test_list_keypairs_notempty_filters(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-keypairs'], + qs_elements=['user_id=b'], + ), + json={'keypairs': [{'keypair': self.key}]}, + ), + ] + ) + keypairs = self.cloud.list_keypairs( + filters={'user_id': 'b', 'fake': 'dummy'} + ) + self.assertEqual(len(keypairs), 1) + self.assertEqual(keypairs[0].name, self.key['name']) + self.assert_calls() + + def test_list_keypairs_exception(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['os-keypairs'] + ), + status_code=400, + ), + ] + ) + self.assertRaises(exceptions.SDKException, self.cloud.list_keypairs) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_limits.py b/openstack/tests/unit/cloud/test_limits.py new file mode 100644 index 0000000000..6fbf8a5319 --- /dev/null +++ b/openstack/tests/unit/cloud/test_limits.py @@ -0,0 +1,105 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.unit import base + + +class TestLimits(base.TestCase): + def test_get_compute_limits(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['limits'] + ), + json={ + "limits": { + "absolute": { + "maxImageMeta": 128, + "maxPersonality": 5, + "maxPersonalitySize": 10240, + "maxSecurityGroupRules": 20, + "maxSecurityGroups": 10, + "maxServerMeta": 128, + "maxTotalCores": 20, + "maxTotalFloatingIps": 10, + "maxTotalInstances": 10, + "maxTotalKeypairs": 100, + "maxTotalRAMSize": 51200, + "maxServerGroups": 10, + "maxServerGroupMembers": 10, + "totalCoresUsed": 0, + "totalInstancesUsed": 0, + "totalRAMUsed": 0, + "totalSecurityGroupsUsed": 0, + "totalFloatingIpsUsed": 0, + "totalServerGroupsUsed": 0, + }, + "rate": [], + } + }, + ), + ] + ) + + self.cloud.get_compute_limits() + + self.assert_calls() + + def test_other_get_compute_limits(self): + project = self.mock_for_keystone_projects( + project_count=1, id_get=True + )[0] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['limits'], + qs_elements=[f'tenant_id={project.project_id}'], + ), + json={ + "limits": { + "absolute": { + "maxImageMeta": 128, + "maxPersonality": 5, + "maxPersonalitySize": 10240, + "maxSecurityGroupRules": 20, + "maxSecurityGroups": 10, + "maxServerMeta": 128, + "maxTotalCores": 20, + "maxTotalFloatingIps": 10, + "maxTotalInstances": 10, + "maxTotalKeypairs": 100, + "maxTotalRAMSize": 51200, + "maxServerGroups": 10, + "maxServerGroupMembers": 10, + "totalCoresUsed": 0, + "totalInstancesUsed": 0, + "totalRAMUsed": 0, + "totalSecurityGroupsUsed": 0, + "totalFloatingIpsUsed": 0, + "totalServerGroupsUsed": 0, + }, + "rate": [], + } + }, + ), + ] + ) + + self.cloud.get_compute_limits(project.project_id) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_magnum_services.py b/openstack/tests/unit/cloud/test_magnum_services.py new file mode 100644 index 0000000000..d30531d4ba --- /dev/null +++ b/openstack/tests/unit/cloud/test_magnum_services.py @@ -0,0 +1,48 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.container_infrastructure_management.v1 import service +from openstack.tests.unit import base + + +magnum_service_obj = dict( + binary='fake-service', + created_at='2015-08-27T09:49:58-05:00', + disabled_reason=None, + host='fake-host', + id=1, + report_count=1, + state='up', + updated_at=None, +) + + +class TestMagnumServices(base.TestCase): + def test_list_magnum_services(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + service_type='container-infrastructure-management', + resource='mservices', + ), + json=dict(mservices=[magnum_service_obj]), + ) + ] + ) + mservices_list = self.cloud.list_magnum_services() + self.assertEqual( + mservices_list[0].to_dict(computed=False), + service.Service(**magnum_service_obj).to_dict(computed=False), + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_meta.py b/openstack/tests/unit/cloud/test_meta.py new file mode 100644 index 0000000000..58cbd162e3 --- /dev/null +++ b/openstack/tests/unit/cloud/test_meta.py @@ -0,0 +1,1420 @@ +# Copyrigh +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from unittest import mock + +from openstack.cloud import meta +from openstack.compute.v2 import server as _server +from openstack import connection +from openstack.tests import fakes +from openstack.tests.unit import base + + +PRIVATE_V4 = '198.51.100.3' +PUBLIC_V4 = '192.0.2.99' +PUBLIC_V6 = '2001:0db8:face:0da0:face::0b00:1c' # rfc3849 + + +class FakeConfig: + def get_region_name(self, service_type=None): + # TODO(efried): Validate service_type? + return 'test-region' + + +class FakeCloud: + config = FakeConfig() + name = 'test-name' + private = False + force_ipv4 = False + service_val = True + _unused = "useless" + _local_ipv6 = True + + def get_flavor_name(self, id): + return 'test-flavor-name' + + def get_image_name(self, id): + return 'test-image-name' + + def get_volumes(self, server): + return [] + + def has_service(self, service_name): + return self.service_val + + def use_internal_network(self): + return True + + def use_external_network(self): + return True + + def get_internal_networks(self): + return [] + + def get_external_networks(self): + return [] + + def get_internal_ipv4_networks(self): + return [] + + def get_external_ipv4_networks(self): + return [] + + def get_internal_ipv6_networks(self): + return [] + + def get_external_ipv6_networks(self): + return [] + + def list_server_security_groups(self, server): + return [] + + def get_default_network(self): + return None + + +standard_fake_server = fakes.make_fake_server( + server_id='test-id-0', + name='test-id-0', + status='ACTIVE', + addresses={ + 'private': [ + {'OS-EXT-IPS:type': 'fixed', 'addr': PRIVATE_V4, 'version': 4} + ], + 'public': [ + {'OS-EXT-IPS:type': 'floating', 'addr': PUBLIC_V4, 'version': 4} + ], + }, + flavor={'id': '101'}, + image={'id': '471c2475-da2f-47ac-aba5-cb4aa3d546f5'}, +) +standard_fake_server['metadata'] = {'group': 'test-group'} + +SUBNETS_WITH_NAT = [ + { + 'name': '', + 'enable_dhcp': True, + 'network_id': '5ef0358f-9403-4f7b-9151-376ca112abf7', + 'tenant_id': '29c79f394b2946f1a0f8446d715dc301', + 'dns_nameservers': [], + 'ipv6_ra_mode': None, + 'allocation_pools': [{'start': '10.10.10.2', 'end': '10.10.10.254'}], + 'gateway_ip': '10.10.10.1', + 'ipv6_address_mode': None, + 'ip_version': 4, + 'host_routes': [], + 'cidr': '10.10.10.0/24', + 'id': '14025a85-436e-4418-b0ee-f5b12a50f9b4', + }, +] + +OSIC_NETWORKS = [ + { + 'admin_state_up': True, + 'id': '7004a83a-13d3-4dcd-8cf5-52af1ace4cae', + 'mtu': 0, + 'name': 'GATEWAY_NET', + 'router:external': True, + 'shared': True, + 'status': 'ACTIVE', + 'subnets': ['cf785ee0-6cc9-4712-be3d-0bf6c86cf455'], + 'tenant_id': '7a1ca9f7cc4e4b13ac0ed2957f1e8c32', + }, + { + 'admin_state_up': True, + 'id': '405abfcc-77dc-49b2-a271-139619ac9b26', + 'mtu': 0, + 'name': 'openstackjenkins-network1', + 'router:external': False, + 'shared': False, + 'status': 'ACTIVE', + 'subnets': ['a47910bc-f649-45db-98ec-e2421c413f4e'], + 'tenant_id': '7e9c4d5842b3451d94417bd0af03a0f4', + }, + { + 'admin_state_up': True, + 'id': '54753d2c-0a58-4928-9b32-084c59dd20a6', + 'mtu': 0, + 'name': 'GATEWAY_NET_V6', + 'router:external': True, + 'shared': True, + 'status': 'ACTIVE', + 'subnets': [ + '9c21d704-a8b9-409a-b56d-501cb518d380', + '7cb0ce07-64c3-4a3d-92d3-6f11419b45b9', + ], + 'tenant_id': '7a1ca9f7cc4e4b13ac0ed2957f1e8c32', + }, +] + +OSIC_SUBNETS = [ + { + 'allocation_pools': [ + {'end': '172.99.106.254', 'start': '172.99.106.5'} + ], + 'cidr': '172.99.106.0/24', + 'dns_nameservers': ['69.20.0.164', '69.20.0.196'], + 'enable_dhcp': True, + 'gateway_ip': '172.99.106.1', + 'host_routes': [], + 'id': 'cf785ee0-6cc9-4712-be3d-0bf6c86cf455', + 'ip_version': 4, + 'ipv6_address_mode': None, + 'ipv6_ra_mode': None, + 'name': 'GATEWAY_NET', + 'network_id': '7004a83a-13d3-4dcd-8cf5-52af1ace4cae', + 'subnetpool_id': None, + 'tenant_id': '7a1ca9f7cc4e4b13ac0ed2957f1e8c32', + }, + { + 'allocation_pools': [{'end': '10.0.1.254', 'start': '10.0.1.2'}], + 'cidr': '10.0.1.0/24', + 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], + 'enable_dhcp': True, + 'gateway_ip': '10.0.1.1', + 'host_routes': [], + 'id': 'a47910bc-f649-45db-98ec-e2421c413f4e', + 'ip_version': 4, + 'ipv6_address_mode': None, + 'ipv6_ra_mode': None, + 'name': 'openstackjenkins-subnet1', + 'network_id': '405abfcc-77dc-49b2-a271-139619ac9b26', + 'subnetpool_id': None, + 'tenant_id': '7e9c4d5842b3451d94417bd0af03a0f4', + }, + { + 'allocation_pools': [{'end': '10.255.255.254', 'start': '10.0.0.2'}], + 'cidr': '10.0.0.0/8', + 'dns_nameservers': ['8.8.8.8', '8.8.4.4'], + 'enable_dhcp': True, + 'gateway_ip': '10.0.0.1', + 'host_routes': [], + 'id': '9c21d704-a8b9-409a-b56d-501cb518d380', + 'ip_version': 4, + 'ipv6_address_mode': None, + 'ipv6_ra_mode': None, + 'name': 'GATEWAY_SUBNET_V6V4', + 'network_id': '54753d2c-0a58-4928-9b32-084c59dd20a6', + 'subnetpool_id': None, + 'tenant_id': '7a1ca9f7cc4e4b13ac0ed2957f1e8c32', + }, + { + 'allocation_pools': [ + { + 'end': '2001:4800:1ae1:18:ffff:ffff:ffff:ffff', + 'start': '2001:4800:1ae1:18::2', + } + ], + 'cidr': '2001:4800:1ae1:18::/64', + 'dns_nameservers': ['2001:4860:4860::8888'], + 'enable_dhcp': True, + 'gateway_ip': '2001:4800:1ae1:18::1', + 'host_routes': [], + 'id': '7cb0ce07-64c3-4a3d-92d3-6f11419b45b9', + 'ip_version': 6, + 'ipv6_address_mode': 'dhcpv6-stateless', + 'ipv6_ra_mode': None, + 'name': 'GATEWAY_SUBNET_V6V6', + 'network_id': '54753d2c-0a58-4928-9b32-084c59dd20a6', + 'subnetpool_id': None, + 'tenant_id': '7a1ca9f7cc4e4b13ac0ed2957f1e8c32', + }, +] + + +class TestMeta(base.TestCase): + def test_find_nova_addresses_key_name(self): + # Note 198.51.100.0/24 is TEST-NET-2 from rfc5737 + addrs = { + 'public': [{'addr': '198.51.100.1', 'version': 4}], + 'private': [{'addr': '192.0.2.5', 'version': 4}], + } + self.assertEqual( + ['198.51.100.1'], + meta.find_nova_addresses(addrs, key_name='public'), + ) + self.assertEqual([], meta.find_nova_addresses(addrs, key_name='foo')) + + def test_find_nova_addresses_ext_tag(self): + addrs = { + 'public': [ + { + 'OS-EXT-IPS:type': 'fixed', + 'addr': '198.51.100.2', + 'version': 4, + } + ] + } + self.assertEqual( + ['198.51.100.2'], meta.find_nova_addresses(addrs, ext_tag='fixed') + ) + self.assertEqual([], meta.find_nova_addresses(addrs, ext_tag='foo')) + + def test_find_nova_addresses_key_name_and_ext_tag(self): + addrs = { + 'public': [ + { + 'OS-EXT-IPS:type': 'fixed', + 'addr': '198.51.100.2', + 'version': 4, + } + ] + } + self.assertEqual( + ['198.51.100.2'], + meta.find_nova_addresses( + addrs, key_name='public', ext_tag='fixed' + ), + ) + self.assertEqual( + [], + meta.find_nova_addresses(addrs, key_name='public', ext_tag='foo'), + ) + self.assertEqual( + [], + meta.find_nova_addresses(addrs, key_name='bar', ext_tag='fixed'), + ) + + def test_find_nova_addresses_all(self): + addrs = { + 'public': [ + { + 'OS-EXT-IPS:type': 'fixed', + 'addr': '198.51.100.2', + 'version': 4, + } + ] + } + self.assertEqual( + ['198.51.100.2'], + meta.find_nova_addresses( + addrs, key_name='public', ext_tag='fixed', version=4 + ), + ) + self.assertEqual( + [], + meta.find_nova_addresses( + addrs, key_name='public', ext_tag='fixed', version=6 + ), + ) + + def test_find_nova_addresses_floating_first(self): + # Note 198.51.100.0/24 is TEST-NET-2 from rfc5737 + addrs = { + 'private': [ + {'addr': '192.0.2.5', 'version': 4, 'OS-EXT-IPS:type': 'fixed'} + ], + 'public': [ + { + 'addr': '198.51.100.1', + 'version': 4, + 'OS-EXT-IPS:type': 'floating', + } + ], + } + self.assertEqual( + ['198.51.100.1', '192.0.2.5'], meta.find_nova_addresses(addrs) + ) + + def test_get_server_ip(self): + srv = meta.obj_to_munch(standard_fake_server) + self.assertEqual(PRIVATE_V4, meta.get_server_ip(srv, ext_tag='fixed')) + self.assertEqual( + PUBLIC_V4, meta.get_server_ip(srv, ext_tag='floating') + ) + + def test_get_server_private_ip(self): + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={ + 'networks': [ + {'id': 'test-net-id', 'name': 'test-net-name'} + ] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': SUBNETS_WITH_NAT}, + ), + ] + ) + + srv = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + addresses={ + 'private': [ + { + 'OS-EXT-IPS:type': 'fixed', + 'addr': PRIVATE_V4, + 'version': 4, + } + ], + 'public': [ + { + 'OS-EXT-IPS:type': 'floating', + 'addr': PUBLIC_V4, + 'version': 4, + } + ], + }, + ) + + self.assertEqual( + PRIVATE_V4, meta.get_server_private_ip(srv, self.cloud) + ) + self.assert_calls() + + def test_get_server_multiple_private_ip(self): + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={ + 'networks': [{'id': 'test-net-id', 'name': 'test-net'}] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': SUBNETS_WITH_NAT}, + ), + ] + ) + + shared_mac = '11:22:33:44:55:66' + distinct_mac = '66:55:44:33:22:11' + srv = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + addresses={ + 'test-net': [ + { + 'OS-EXT-IPS:type': 'fixed', + 'OS-EXT-IPS-MAC:mac_addr': distinct_mac, + 'addr': '10.0.0.100', + 'version': 4, + }, + { + 'OS-EXT-IPS:type': 'fixed', + 'OS-EXT-IPS-MAC:mac_addr': shared_mac, + 'addr': '10.0.0.101', + 'version': 4, + }, + ], + 'public': [ + { + 'OS-EXT-IPS:type': 'floating', + 'OS-EXT-IPS-MAC:mac_addr': shared_mac, + 'addr': PUBLIC_V4, + 'version': 4, + } + ], + }, + ) + + self.assertEqual( + '10.0.0.101', meta.get_server_private_ip(srv, self.cloud) + ) + self.assert_calls() + + @mock.patch.object(connection.Connection, 'has_service') + @mock.patch.object(connection.Connection, 'get_volumes') + @mock.patch.object(connection.Connection, 'get_image_name') + @mock.patch.object(connection.Connection, 'get_flavor_name') + def test_get_server_private_ip_devstack( + self, + mock_get_flavor_name, + mock_get_image_name, + mock_get_volumes, + mock_has_service, + ): + mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' + mock_get_flavor_name.return_value = 'm1.tiny' + mock_get_volumes.return_value = [] + mock_has_service.return_value = True + + fake_server = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + flavor={'id': '1'}, + image={ + 'name': 'cirros-0.3.4-x86_64-uec', + 'id': 'f93d000b-7c29-4489-b375-3641a1758fe1', + }, + addresses={ + 'test_pnztt_net': [ + { + 'OS-EXT-IPS:type': 'fixed', + 'addr': PRIVATE_V4, + 'version': 4, + 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', + } + ] + }, + ) + + self.register_uris( + [ + dict( + method='GET', + uri=( + 'https://network.example.com/v2.0/ports?' + 'device_id=test-id' + ), + json={ + 'ports': [ + { + 'id': 'test_port_id', + 'mac_address': 'fa:16:3e:ae:7d:42', + 'device_id': 'test-id', + } + ] + }, + ), + dict( + method='GET', + uri=( + 'https://network.example.com/v2.0/' + 'floatingips?port_id=test_port_id' + ), + json={'floatingips': []}, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={ + 'networks': [ + { + 'id': 'test_pnztt_net', + 'name': 'test_pnztt_net', + 'router:external': False, + }, + {'id': 'private', 'name': 'private'}, + ] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': SUBNETS_WITH_NAT}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', fake_server['id']], + ), + json=fake_server, + ), + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups', + json={'security_groups': []}, + ), + ] + ) + + srv = self.cloud.get_openstack_vars(_server.Server(**fake_server)) + + self.assertEqual(PRIVATE_V4, srv['private_v4']) + self.assert_calls() + + @mock.patch.object(connection.Connection, 'get_volumes') + @mock.patch.object(connection.Connection, 'get_image_name') + @mock.patch.object(connection.Connection, 'get_flavor_name') + def test_get_server_private_ip_no_fip( + self, mock_get_flavor_name, mock_get_image_name, mock_get_volumes + ): + self.cloud._floating_ip_source = None + + mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' + mock_get_flavor_name.return_value = 'm1.tiny' + mock_get_volumes.return_value = [] + + fake_server = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + flavor={'id': '1'}, + image={ + 'name': 'cirros-0.3.4-x86_64-uec', + 'id': 'f93d000b-7c29-4489-b375-3641a1758fe1', + }, + addresses={ + 'test_pnztt_net': [ + { + 'OS-EXT-IPS:type': 'fixed', + 'addr': PRIVATE_V4, + 'version': 4, + 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', + } + ] + }, + ) + + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={ + 'networks': [ + { + 'id': 'test_pnztt_net', + 'name': 'test_pnztt_net', + 'router:external': False, + }, + {'id': 'private', 'name': 'private'}, + ] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': SUBNETS_WITH_NAT}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', fake_server['id']], + ), + json=fake_server, + ), + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups', + json={'security_groups': []}, + ), + ] + ) + + srv = self.cloud.get_openstack_vars(_server.Server(**fake_server)) + + self.assertEqual(PRIVATE_V4, srv['private_v4']) + self.assert_calls() + + @mock.patch.object(connection.Connection, 'get_volumes') + @mock.patch.object(connection.Connection, 'get_image_name') + @mock.patch.object(connection.Connection, 'get_flavor_name') + def test_get_server_cloud_no_fips( + self, mock_get_flavor_name, mock_get_image_name, mock_get_volumes + ): + self.cloud._floating_ip_source = None + mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' + mock_get_flavor_name.return_value = 'm1.tiny' + mock_get_volumes.return_value = [] + + fake_server = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + flavor={'id': '1'}, + image={ + 'name': 'cirros-0.3.4-x86_64-uec', + 'id': 'f93d000b-7c29-4489-b375-3641a1758fe1', + }, + addresses={ + 'test_pnztt_net': [ + { + 'addr': PRIVATE_V4, + 'version': 4, + } + ] + }, + ) + + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={ + 'networks': [ + { + 'id': 'test_pnztt_net', + 'name': 'test_pnztt_net', + 'router:external': False, + }, + {'id': 'private', 'name': 'private'}, + ] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': SUBNETS_WITH_NAT}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', fake_server['id']], + ), + json=fake_server, + ), + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups', + json={'security_groups': []}, + ), + ] + ) + + srv = self.cloud.get_openstack_vars(_server.Server(**fake_server)) + + self.assertEqual(PRIVATE_V4, srv['private_v4']) + self.assert_calls() + + @mock.patch.object(connection.Connection, 'has_service') + @mock.patch.object(connection.Connection, 'get_volumes') + @mock.patch.object(connection.Connection, 'get_image_name') + @mock.patch.object(connection.Connection, 'get_flavor_name') + def test_get_server_cloud_missing_fips( + self, + mock_get_flavor_name, + mock_get_image_name, + mock_get_volumes, + mock_has_service, + ): + mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' + mock_get_flavor_name.return_value = 'm1.tiny' + mock_get_volumes.return_value = [] + mock_has_service.return_value = True + + fake_server = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + flavor={'id': '1'}, + image={ + 'name': 'cirros-0.3.4-x86_64-uec', + 'id': 'f93d000b-7c29-4489-b375-3641a1758fe1', + }, + addresses={ + 'test_pnztt_net': [ + { + 'addr': PRIVATE_V4, + 'version': 4, + 'OS-EXT-IPS-MAC:mac_addr': 'fa:16:3e:ae:7d:42', + } + ] + }, + ) + + self.register_uris( + [ + # self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=( + 'https://network.example.com/v2.0/ports?' + 'device_id=test-id' + ), + json={ + 'ports': [ + { + 'id': 'test_port_id', + 'mac_address': 'fa:16:3e:ae:7d:42', + 'device_id': 'test-id', + } + ] + }, + ), + dict( + method='GET', + uri=( + 'https://network.example.com/v2.0/floatingips' + '?port_id=test_port_id' + ), + json={ + 'floatingips': [ + { + 'id': 'floating-ip-id', + 'port_id': 'test_port_id', + 'fixed_ip_address': PRIVATE_V4, + 'floating_ip_address': PUBLIC_V4, + } + ] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={ + 'networks': [ + { + 'id': 'test_pnztt_net', + 'name': 'test_pnztt_net', + 'router:external': False, + }, + { + 'id': 'private', + 'name': 'private', + }, + ] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': SUBNETS_WITH_NAT}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', fake_server['id']], + ), + json=fake_server, + ), + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups', + json={'security_groups': []}, + ), + ] + ) + + srv = self.cloud.get_openstack_vars(_server.Server(**fake_server)) + + self.assertEqual(PUBLIC_V4, srv['public_v4']) + self.assert_calls() + + @mock.patch.object(connection.Connection, 'get_volumes') + @mock.patch.object(connection.Connection, 'get_image_name') + @mock.patch.object(connection.Connection, 'get_flavor_name') + def test_get_server_cloud_rackspace_v6( + self, mock_get_flavor_name, mock_get_image_name, mock_get_volumes + ): + self.cloud.config.config['has_network'] = False + self.cloud._floating_ip_source = None + self.cloud.force_ipv4 = False + self.cloud._local_ipv6 = True + mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' + mock_get_flavor_name.return_value = 'm1.tiny' + mock_get_volumes.return_value = [] + fake_server = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + flavor={'id': '1'}, + image={ + 'name': 'cirros-0.3.4-x86_64-uec', + 'id': 'f93d000b-7c29-4489-b375-3641a1758fe1', + }, + addresses={ + 'private': [{'addr': "10.223.160.141", 'version': 4}], + 'public': [ + {'addr': "104.130.246.91", 'version': 4}, + { + 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", + 'version': 6, + }, + ], + }, + ) + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', fake_server['id']], + ), + json=fake_server, + ), + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups', + json={'security_groups': []}, + ), + ] + ) + + srv = self.cloud.get_openstack_vars(_server.Server(**fake_server)) + + self.assertEqual("10.223.160.141", srv['private_v4']) + self.assertEqual("104.130.246.91", srv['public_v4']) + self.assertEqual( + "2001:4800:7819:103:be76:4eff:fe05:8525", srv['public_v6'] + ) + self.assertEqual( + "2001:4800:7819:103:be76:4eff:fe05:8525", srv['interface_ip'] + ) + self.assert_calls() + + @mock.patch.object(connection.Connection, 'get_volumes') + @mock.patch.object(connection.Connection, 'get_image_name') + @mock.patch.object(connection.Connection, 'get_flavor_name') + def test_get_server_cloud_osic_split( + self, mock_get_flavor_name, mock_get_image_name, mock_get_volumes + ): + self.cloud._floating_ip_source = None + self.cloud.force_ipv4 = False + self.cloud._local_ipv6 = True + self.cloud._external_ipv4_names = ['GATEWAY_NET'] + self.cloud._external_ipv6_names = ['GATEWAY_NET_V6'] + self.cloud._internal_ipv4_names = ['GATEWAY_NET_V6'] + self.cloud._internal_ipv6_names = [] + mock_get_image_name.return_value = 'cirros-0.3.4-x86_64-uec' + mock_get_flavor_name.return_value = 'm1.tiny' + mock_get_volumes.return_value = [] + + fake_server = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + flavor={'id': '1'}, + image={ + 'name': 'cirros-0.3.4-x86_64-uec', + 'id': 'f93d000b-7c29-4489-b375-3641a1758fe1', + }, + addresses={ + 'private': [{'addr': "10.223.160.141", 'version': 4}], + 'public': [ + {'addr': "104.130.246.91", 'version': 4}, + { + 'addr': "2001:4800:7819:103:be76:4eff:fe05:8525", + 'version': 6, + }, + ], + }, + ) + + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={'networks': OSIC_NETWORKS}, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': OSIC_SUBNETS}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', fake_server['id']], + ), + json=fake_server, + ), + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/test-id/os-security-groups', + json={'security_groups': []}, + ), + ] + ) + + srv = self.cloud.get_openstack_vars(_server.Server(**fake_server)) + + self.assertEqual("10.223.160.141", srv['private_v4']) + self.assertEqual("104.130.246.91", srv['public_v4']) + self.assertEqual( + "2001:4800:7819:103:be76:4eff:fe05:8525", srv['public_v6'] + ) + self.assertEqual( + "2001:4800:7819:103:be76:4eff:fe05:8525", srv['interface_ip'] + ) + self.assert_calls() + + def test_get_server_external_ipv4_neutron(self): + # Testing Clouds with Neutron + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={ + 'networks': [ + { + 'id': 'test-net-id', + 'name': 'test-net', + 'router:external': True, + } + ] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': SUBNETS_WITH_NAT}, + ), + ] + ) + srv = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + addresses={'test-net': [{'addr': PUBLIC_V4, 'version': 4}]}, + ) + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertEqual(PUBLIC_V4, ip) + self.assert_calls() + + def test_get_server_external_provider_ipv4_neutron(self): + # Testing Clouds with Neutron + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={ + 'networks': [ + { + 'id': 'test-net-id', + 'name': 'test-net', + 'provider:network_type': 'vlan', + 'provider:physical_network': 'vlan', + } + ] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': SUBNETS_WITH_NAT}, + ), + ] + ) + + srv = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + addresses={'test-net': [{'addr': PUBLIC_V4, 'version': 4}]}, + ) + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertEqual(PUBLIC_V4, ip) + self.assert_calls() + + def test_get_server_internal_provider_ipv4_neutron(self): + # Testing Clouds with Neutron + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={ + 'networks': [ + { + 'id': 'test-net-id', + 'name': 'test-net', + 'router:external': False, + 'provider:network_type': 'vxlan', + 'provider:physical_network': None, + } + ] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': SUBNETS_WITH_NAT}, + ), + ] + ) + srv = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + addresses={'test-net': [{'addr': PRIVATE_V4, 'version': 4}]}, + ) + self.assertIsNone( + meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + ) + int_ip = meta.get_server_private_ip(cloud=self.cloud, server=srv) + + self.assertEqual(PRIVATE_V4, int_ip) + self.assert_calls() + + def test_get_server_external_none_ipv4_neutron(self): + # Testing Clouds with Neutron + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + json={ + 'networks': [ + { + 'id': 'test-net-id', + 'name': 'test-net', + 'router:external': False, + } + ] + }, + ), + dict( + method='GET', + uri='https://network.example.com/v2.0/subnets', + json={'subnets': SUBNETS_WITH_NAT}, + ), + ] + ) + + srv = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + addresses={'test-net': [{'addr': PUBLIC_V4, 'version': 4}]}, + ) + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertIsNone(ip) + self.assert_calls() + + def test_get_server_external_ipv4_neutron_accessIPv4(self): + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE' + ) + srv['accessIPv4'] = PUBLIC_V4 + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertEqual(PUBLIC_V4, ip) + + def test_get_server_external_ipv4_neutron_accessIPv6(self): + srv = fakes.make_fake_server( + server_id='test-id', name='test-name', status='ACTIVE' + ) + srv['accessIPv6'] = PUBLIC_V6 + ip = meta.get_server_external_ipv6(server=srv) + + self.assertEqual(PUBLIC_V6, ip) + + def test_get_server_external_ipv4_neutron_exception(self): + # Testing Clouds with a non working Neutron + self.register_uris( + [ + dict( + method='GET', + uri='https://network.example.com/v2.0/networks', + status_code=404, + ) + ] + ) + + srv = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + addresses={'public': [{'addr': PUBLIC_V4, 'version': 4}]}, + ) + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertEqual(PUBLIC_V4, ip) + self.assert_calls() + + def test_get_server_external_ipv4_nova_public(self): + # Testing Clouds w/o Neutron and a network named public + self.cloud.config.config['has_network'] = False + + srv = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + addresses={'public': [{'addr': PUBLIC_V4, 'version': 4}]}, + ) + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertEqual(PUBLIC_V4, ip) + + def test_get_server_external_ipv4_nova_none(self): + # Testing Clouds w/o Neutron or a globally routable IP + self.cloud.config.config['has_network'] = False + + srv = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + addresses={'test-net': [{'addr': PRIVATE_V4}]}, + ) + ip = meta.get_server_external_ipv4(cloud=self.cloud, server=srv) + + self.assertIsNone(ip) + + def test_get_server_external_ipv6(self): + srv = fakes.make_fake_server( + server_id='test-id', + name='test-name', + status='ACTIVE', + addresses={ + 'test-net': [ + {'addr': PUBLIC_V4, 'version': 4}, + {'addr': PUBLIC_V6, 'version': 6}, + ] + }, + ) + ip = meta.get_server_external_ipv6(srv) + self.assertEqual(PUBLIC_V6, ip) + + def test_get_groups_from_server(self): + server_vars = { + 'flavor': 'test-flavor', + 'image': 'test-image', + 'az': 'test-az', + } + self.assertEqual( + [ + 'test-name', + 'test-region', + 'test-name_test-region', + 'test-group', + 'instance-test-id-0', + 'meta-group_test-group', + 'test-az', + 'test-region_test-az', + 'test-name_test-region_test-az', + ], + meta.get_groups_from_server( + FakeCloud(), + meta.obj_to_munch(standard_fake_server), + server_vars, + ), + ) + + def test_obj_list_to_munch(self): + """Test conversion of a list of objects to a list of dictonaries""" + + class obj0: + value = 0 + + class obj1: + value = 1 + + list = [obj0, obj1] + new_list = meta.obj_list_to_munch(list) + self.assertEqual(new_list[0]['value'], 0) + self.assertEqual(new_list[1]['value'], 1) + + @mock.patch.object(FakeCloud, 'list_server_security_groups') + def test_get_security_groups(self, mock_list_server_security_groups): + '''This test verifies that calling get_hostvars_froms_server + ultimately calls list_server_security_groups, and that the return + value from list_server_security_groups ends up in + server['security_groups'].''' + mock_list_server_security_groups.return_value = [ + {'name': 'testgroup', 'id': '1'} + ] + + server = meta.obj_to_munch(standard_fake_server) + hostvars = meta.get_hostvars_from_server(FakeCloud(), server) + + mock_list_server_security_groups.assert_called_once_with(server) + self.assertEqual('testgroup', hostvars['security_groups'][0]['name']) + + @mock.patch.object(meta, 'get_server_external_ipv6') + @mock.patch.object(meta, 'get_server_external_ipv4') + def test_basic_hostvars( + self, mock_get_server_external_ipv4, mock_get_server_external_ipv6 + ): + mock_get_server_external_ipv4.return_value = PUBLIC_V4 + mock_get_server_external_ipv6.return_value = PUBLIC_V6 + + hostvars = meta.get_hostvars_from_server( + FakeCloud(), + self.cloud._normalize_server( + meta.obj_to_munch(standard_fake_server) + ), + ) + self.assertNotIn('links', hostvars) + self.assertEqual(PRIVATE_V4, hostvars['private_v4']) + self.assertEqual(PUBLIC_V4, hostvars['public_v4']) + self.assertEqual(PUBLIC_V6, hostvars['public_v6']) + self.assertEqual(PUBLIC_V6, hostvars['interface_ip']) + self.assertEqual('RegionOne', hostvars['region']) + self.assertEqual('_test_cloud_', hostvars['cloud']) + self.assertIn('location', hostvars) + self.assertEqual('_test_cloud_', hostvars['location']['cloud']) + self.assertEqual('RegionOne', hostvars['location']['region_name']) + self.assertEqual( + fakes.PROJECT_ID, hostvars['location']['project']['id'] + ) + self.assertEqual("test-image-name", hostvars['image']['name']) + self.assertEqual( + standard_fake_server['image']['id'], hostvars['image']['id'] + ) + self.assertNotIn('links', hostvars['image']) + self.assertEqual( + standard_fake_server['flavor']['id'], hostvars['flavor']['id'] + ) + self.assertEqual("test-flavor-name", hostvars['flavor']['name']) + self.assertNotIn('links', hostvars['flavor']) + # test having volumes + # test volume exception + self.assertEqual([], hostvars['volumes']) + + @mock.patch.object(meta, 'get_server_external_ipv6') + @mock.patch.object(meta, 'get_server_external_ipv4') + def test_ipv4_hostvars( + self, mock_get_server_external_ipv4, mock_get_server_external_ipv6 + ): + mock_get_server_external_ipv4.return_value = PUBLIC_V4 + mock_get_server_external_ipv6.return_value = PUBLIC_V6 + + fake_cloud = FakeCloud() + fake_cloud.force_ipv4 = True + hostvars = meta.get_hostvars_from_server( + fake_cloud, meta.obj_to_munch(standard_fake_server) + ) + self.assertEqual(PUBLIC_V4, hostvars['interface_ip']) + self.assertEqual('', hostvars['public_v6']) + + @mock.patch.object(meta, 'get_server_external_ipv4') + def test_private_interface_ip(self, mock_get_server_external_ipv4): + mock_get_server_external_ipv4.return_value = PUBLIC_V4 + + cloud = FakeCloud() + cloud.private = True + hostvars = meta.get_hostvars_from_server( + cloud, meta.obj_to_munch(standard_fake_server) + ) + self.assertEqual(PRIVATE_V4, hostvars['interface_ip']) + + @mock.patch.object(meta, 'get_server_external_ipv4') + def test_image_string(self, mock_get_server_external_ipv4): + mock_get_server_external_ipv4.return_value = PUBLIC_V4 + + server = standard_fake_server + server['image'] = 'fake-image-id' + hostvars = meta.get_hostvars_from_server( + FakeCloud(), meta.obj_to_munch(server) + ) + self.assertEqual('fake-image-id', hostvars['image']['id']) + + def test_az(self): + server = standard_fake_server + server['OS-EXT-AZ:availability_zone'] = 'az1' + + hostvars = self.cloud._normalize_server(meta.obj_to_munch(server)) + self.assertEqual('az1', hostvars['az']) + + def test_current_location(self): + self.assertEqual( + { + 'cloud': '_test_cloud_', + 'project': { + 'id': mock.ANY, + 'name': 'admin', + 'domain_id': None, + 'domain_name': 'default', + }, + 'region_name': 'RegionOne', + 'zone': None, + }, + self.cloud.current_location, + ) + + def test_current_project(self): + self.assertEqual( + { + 'id': mock.ANY, + 'name': 'admin', + 'domain_id': None, + 'domain_name': 'default', + }, + self.cloud.current_project, + ) + + def test_has_volume(self): + mock_cloud = mock.MagicMock() + + fake_volume = fakes.FakeVolume( + id='volume1', + status='available', + name='Volume 1 Display Name', + attachments=[{'device': '/dev/sda0'}], + ) + fake_volume_dict = meta.obj_to_munch(fake_volume) + mock_cloud.get_volumes.return_value = [fake_volume_dict] + hostvars = meta.get_hostvars_from_server( + mock_cloud, meta.obj_to_munch(standard_fake_server) + ) + self.assertEqual('volume1', hostvars['volumes'][0]['id']) + self.assertEqual('/dev/sda0', hostvars['volumes'][0]['device']) + + def test_has_no_volume_service(self): + fake_cloud = FakeCloud() + fake_cloud.service_val = False + hostvars = meta.get_hostvars_from_server( + fake_cloud, meta.obj_to_munch(standard_fake_server) + ) + self.assertEqual([], hostvars['volumes']) + + def test_unknown_volume_exception(self): + mock_cloud = mock.MagicMock() + + class FakeException(Exception): + pass + + def side_effect(*args): + raise FakeException("No Volumes") + + mock_cloud.get_volumes.side_effect = side_effect + self.assertRaises( + FakeException, + meta.get_hostvars_from_server, + mock_cloud, + meta.obj_to_munch(standard_fake_server), + ) + + def test_obj_to_munch(self): + cloud = FakeCloud() + cloud.subcloud = FakeCloud() + cloud_dict = meta.obj_to_munch(cloud) + self.assertEqual(FakeCloud.name, cloud_dict['name']) + self.assertNotIn('_unused', cloud_dict) + self.assertNotIn('get_flavor_name', cloud_dict) + self.assertNotIn('subcloud', cloud_dict) + self.assertTrue(hasattr(cloud_dict, 'name')) + self.assertEqual(cloud_dict.name, cloud_dict['name']) + + def test_obj_to_munch_subclass(self): + class FakeObjDict(dict): + additional = 1 + + obj = FakeObjDict(foo='bar') + obj_dict = meta.obj_to_munch(obj) + self.assertIn('additional', obj_dict) + self.assertIn('foo', obj_dict) + self.assertEqual(obj_dict['additional'], 1) + self.assertEqual(obj_dict['foo'], 'bar') diff --git a/openstack/tests/unit/cloud/test_network.py b/openstack/tests/unit/cloud/test_network.py new file mode 100644 index 0000000000..53b3c5740e --- /dev/null +++ b/openstack/tests/unit/cloud/test_network.py @@ -0,0 +1,684 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy +from unittest import mock + +import testtools + +from openstack import exceptions +from openstack.network.v2 import network as _network +from openstack.tests.unit import base + + +class TestNeutronExtensions(base.TestCase): + def test__neutron_extensions(self): + body = [ + { + "updated": "2014-06-1T10:00:00-00:00", + "name": "Distributed Virtual Router", + "links": [], + "alias": "dvr", + "description": "Enables configuration of Distributed Virtual Routers.", # noqa: E501 + }, + { + "updated": "2013-07-23T10:00:00-00:00", + "name": "Allowed Address Pairs", + "links": [], + "alias": "allowed-address-pairs", + "description": "Provides allowed address pairs", + }, + ] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json=dict(extensions=body), + ) + ] + ) + extensions = self.cloud._neutron_extensions() + self.assertEqual({'dvr', 'allowed-address-pairs'}, extensions) + + self.assert_calls() + + def test__neutron_extensions_fails(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + status_code=404, + ) + ] + ) + with testtools.ExpectedException(exceptions.NotFoundException): + self.cloud._neutron_extensions() + + self.assert_calls() + + def test__has_neutron_extension(self): + body = [ + { + "updated": "2014-06-1T10:00:00-00:00", + "name": "Distributed Virtual Router", + "links": [], + "alias": "dvr", + "description": "Enables configuration of Distributed Virtual Routers.", # noqa: E501 + }, + { + "updated": "2013-07-23T10:00:00-00:00", + "name": "Allowed Address Pairs", + "links": [], + "alias": "allowed-address-pairs", + "description": "Provides allowed address pairs", + }, + ] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json=dict(extensions=body), + ) + ] + ) + self.assertTrue(self.cloud._has_neutron_extension('dvr')) + self.assert_calls() + + def test__has_neutron_extension_missing(self): + body = [ + { + "updated": "2014-06-1T10:00:00-00:00", + "name": "Distributed Virtual Router", + "links": [], + "alias": "dvr", + "description": "Enables configuration of Distributed Virtual Routers.", # noqa: E501 + }, + { + "updated": "2013-07-23T10:00:00-00:00", + "name": "Allowed Address Pairs", + "links": [], + "alias": "allowed-address-pairs", + "description": "Provides allowed address pairs", + }, + ] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json=dict(extensions=body), + ) + ] + ) + self.assertFalse(self.cloud._has_neutron_extension('invalid')) + self.assert_calls() + + +class TestNetworks(base.TestCase): + mock_new_network_rep = { + 'provider:physical_network': None, + 'ipv6_address_scope': None, + 'revision_number': 3, + 'port_security_enabled': True, + 'provider:network_type': 'local', + 'id': '881d1bb7-a663-44c0-8f9f-ee2765b74486', + 'router:external': False, + 'availability_zone_hints': [], + 'availability_zones': [], + 'provider:segmentation_id': None, + 'ipv4_address_scope': None, + 'shared': False, + 'project_id': '861808a93da0484ea1767967c4df8a23', + 'status': 'ACTIVE', + 'subnets': [], + 'description': '', + 'tags': [], + 'updated_at': '2017-04-22T19:22:53Z', + 'is_default': False, + 'qos_policy_id': None, + 'name': 'netname', + 'admin_state_up': True, + 'created_at': '2017-04-22T19:22:53Z', + 'mtu': 0, + 'dns_domain': 'sample.openstack.org.', + 'vlan_transparent': None, + 'vlan_qinq': None, + 'segments': None, + } + + network_availability_zone_extension = { + "alias": "network_availability_zone", + "updated": "2015-01-01T10:00:00-00:00", + "description": "Availability zone support for router.", + "links": [], + "name": "Network Availability Zone", + } + + enabled_neutron_extensions = [network_availability_zone_extension] + + def _compare_networks(self, exp, real): + self.assertDictEqual( + _network.Network(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def test_list_networks(self): + net1 = {'id': '1', 'name': 'net1'} + net2 = {'id': '2', 'name': 'net2'} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': [net1, net2]}, + ) + ] + ) + nets = self.cloud.list_networks() + self.assertEqual( + [ + _network.Network(**i).to_dict(computed=False) + for i in [net1, net2] + ], + [i.to_dict(computed=False) for i in nets], + ) + self.assert_calls() + + def test_list_networks_filtered(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=["name=test"], + ), + json={'networks': []}, + ) + ] + ) + self.cloud.list_networks(filters={'name': 'test'}) + self.assert_calls() + + def test_list_networks_neutron_not_found(self): + self.use_nothing() + self.cloud.has_service = mock.Mock(return_value=False) + self.assertEqual([], self.cloud.list_networks()) + self.assert_calls() + + def test_create_network(self): + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'network': self.mock_new_network_rep}, + validate=dict( + json={ + 'network': { + 'admin_state_up': True, + 'name': 'netname', + } + } + ), + ) + ] + ) + network = self.cloud.create_network("netname") + self._compare_networks(self.mock_new_network_rep, network) + self.assert_calls() + + def test_create_network_specific_tenant(self): + project_id = "project_id_value" + mock_new_network_rep = copy.copy(self.mock_new_network_rep) + mock_new_network_rep['project_id'] = project_id + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'network': mock_new_network_rep}, + validate=dict( + json={ + 'network': { + 'admin_state_up': True, + 'name': 'netname', + 'project_id': project_id, + } + } + ), + ) + ] + ) + network = self.cloud.create_network("netname", project_id=project_id) + self._compare_networks(mock_new_network_rep, network) + self.assert_calls() + + def test_create_network_external(self): + mock_new_network_rep = copy.copy(self.mock_new_network_rep) + mock_new_network_rep['router:external'] = True + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'network': mock_new_network_rep}, + validate=dict( + json={ + 'network': { + 'admin_state_up': True, + 'name': 'netname', + 'router:external': True, + } + } + ), + ) + ] + ) + network = self.cloud.create_network("netname", external=True) + self._compare_networks(mock_new_network_rep, network) + self.assert_calls() + + def test_create_network_provider(self): + provider_opts = { + 'physical_network': 'mynet', + 'network_type': 'vlan', + 'segmentation_id': 'vlan1', + } + new_network_provider_opts = { + 'provider:physical_network': 'mynet', + 'provider:network_type': 'vlan', + 'provider:segmentation_id': 'vlan1', + } + mock_new_network_rep = copy.copy(self.mock_new_network_rep) + mock_new_network_rep.update(new_network_provider_opts) + expected_send_params = {'admin_state_up': True, 'name': 'netname'} + expected_send_params.update(new_network_provider_opts) + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'network': mock_new_network_rep}, + validate=dict(json={'network': expected_send_params}), + ) + ] + ) + network = self.cloud.create_network("netname", provider=provider_opts) + self._compare_networks(mock_new_network_rep, network) + self.assert_calls() + + def test_update_network_provider(self): + network_id = "test-net-id" + network_name = "network" + network = {'id': network_id, 'name': network_name} + provider_opts = { + 'physical_network': 'mynet', + 'network_type': 'vlan', + 'segmentation_id': 'vlan1', + 'should_not_be_passed': 1, + } + update_network_provider_opts = { + 'provider:physical_network': 'mynet', + 'provider:network_type': 'vlan', + 'provider:segmentation_id': 'vlan1', + } + mock_update_rep = copy.copy(self.mock_new_network_rep) + mock_update_rep.update(update_network_provider_opts) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', network_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=[f'name={network_name}'], + ), + json={'networks': [network]}, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', network_id], + ), + json={'network': mock_update_rep}, + validate=dict( + json={'network': update_network_provider_opts} + ), + ), + ] + ) + network = self.cloud.update_network( + network_name, provider=provider_opts + ) + self._compare_networks(mock_update_rep, network) + self.assert_calls() + + def test_create_network_with_availability_zone_hints(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'network': self.mock_new_network_rep}, + validate=dict( + json={ + 'network': { + 'admin_state_up': True, + 'name': 'netname', + 'availability_zone_hints': ['nova'], + } + } + ), + ), + ] + ) + network = self.cloud.create_network( + "netname", availability_zone_hints=['nova'] + ) + self._compare_networks(self.mock_new_network_rep, network) + self.assert_calls() + + def test_create_network_provider_ignored_value(self): + provider_opts = { + 'physical_network': 'mynet', + 'network_type': 'vlan', + 'segmentation_id': 'vlan1', + 'should_not_be_passed': 1, + } + new_network_provider_opts = { + 'provider:physical_network': 'mynet', + 'provider:network_type': 'vlan', + 'provider:segmentation_id': 'vlan1', + } + mock_new_network_rep = copy.copy(self.mock_new_network_rep) + mock_new_network_rep.update(new_network_provider_opts) + expected_send_params = {'admin_state_up': True, 'name': 'netname'} + expected_send_params.update(new_network_provider_opts) + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'network': mock_new_network_rep}, + validate=dict(json={'network': expected_send_params}), + ) + ] + ) + network = self.cloud.create_network("netname", provider=provider_opts) + self._compare_networks(mock_new_network_rep, network) + self.assert_calls() + + def test_create_network_wrong_availability_zone_hints_type(self): + azh_opts = "invalid" + with testtools.ExpectedException( + exceptions.SDKException, + "Parameter 'availability_zone_hints' must be a list", + ): + self.cloud.create_network( + "netname", availability_zone_hints=azh_opts + ) + + def test_create_network_provider_wrong_type(self): + provider_opts = "invalid" + with testtools.ExpectedException( + exceptions.SDKException, + "Parameter 'provider' must be a dict", + ): + self.cloud.create_network("netname", provider=provider_opts) + + def test_create_network_port_security_disabled(self): + port_security_state = False + mock_new_network_rep = copy.copy(self.mock_new_network_rep) + mock_new_network_rep['port_security_enabled'] = port_security_state + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'network': mock_new_network_rep}, + validate=dict( + json={ + 'network': { + 'admin_state_up': True, + 'name': 'netname', + 'port_security_enabled': port_security_state, + } + } + ), + ) + ] + ) + network = self.cloud.create_network( + "netname", port_security_enabled=port_security_state + ) + self._compare_networks(mock_new_network_rep, network) + self.assert_calls() + + def test_create_network_with_mtu(self): + mtu_size = 1500 + mock_new_network_rep = copy.copy(self.mock_new_network_rep) + mock_new_network_rep['mtu'] = mtu_size + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'network': mock_new_network_rep}, + validate=dict( + json={ + 'network': { + 'admin_state_up': True, + 'name': 'netname', + 'mtu': mtu_size, + } + } + ), + ) + ] + ) + network = self.cloud.create_network("netname", mtu_size=mtu_size) + self._compare_networks(mock_new_network_rep, network) + self.assert_calls() + + def test_create_network_with_wrong_mtu_size(self): + with testtools.ExpectedException( + exceptions.SDKException, + "Parameter 'mtu_size' must be greater than 67.", + ): + self.cloud.create_network("netname", mtu_size=42) + + def test_create_network_with_wrong_mtu_type(self): + with testtools.ExpectedException( + exceptions.SDKException, + "Parameter 'mtu_size' must be an integer.", + ): + self.cloud.create_network("netname", mtu_size="fourty_two") + + def test_delete_network(self): + network_id = "test-net-id" + network_name = "network" + network = {'id': network_id, 'name': network_name} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', network_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=[f'name={network_name}'], + ), + json={'networks': [network]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', network_id], + ), + json={}, + ), + ] + ) + self.assertTrue(self.cloud.delete_network(network_name)) + self.assert_calls() + + def test_delete_network_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', 'test-net'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=['name=test-net'], + ), + json={'networks': []}, + ), + ] + ) + self.assertFalse(self.cloud.delete_network('test-net')) + self.assert_calls() + + def test_delete_network_exception(self): + network_id = "test-net-id" + network_name = "network" + network = {'id': network_id, 'name': network_name} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', network_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=[f'name={network_name}'], + ), + json={'networks': [network]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', network_id], + ), + status_code=503, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.delete_network, + network_name, + ) + self.assert_calls() + + def test_get_network_by_id(self): + network_id = "test-net-id" + network_name = "network" + network = {'id': network_id, 'name': network_name} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', network_id], + ), + json={'network': network}, + ) + ] + ) + self.assertTrue(self.cloud.get_network_by_id(network_id)) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_object.py b/openstack/tests/unit/cloud/test_object.py new file mode 100644 index 0000000000..0ea67d8b14 --- /dev/null +++ b/openstack/tests/unit/cloud/test_object.py @@ -0,0 +1,1625 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tempfile +from unittest import mock + +import testtools + +from openstack.cloud import _object_store +from openstack import exceptions +from openstack.object_store.v1 import _proxy +from openstack.object_store.v1 import container +from openstack.object_store.v1 import obj +from openstack.tests.unit import base +from openstack import utils + + +class BaseTestObject(base.TestCase): + def setUp(self): + super().setUp() + + self.container = self.getUniqueString() + self.object = self.getUniqueString() + self.endpoint = self.cloud.object_store.get_endpoint() + self.container_endpoint = f'{self.endpoint}/{self.container}' + self.object_endpoint = f'{self.container_endpoint}/{self.object}' + + def _compare_containers(self, exp, real): + self.assertDictEqual( + container.Container(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def _compare_objects(self, exp, real): + self.assertDictEqual( + obj.Object(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + +class TestObject(BaseTestObject): + def test_create_container(self): + """Test creating a (private) container""" + self.register_uris( + [ + dict( + method='HEAD', uri=self.container_endpoint, status_code=404 + ), + dict( + method='PUT', + uri=self.container_endpoint, + status_code=201, + headers={ + 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', + 'Content-Length': '0', + 'Content-Type': 'text/html; charset=UTF-8', + }, + ), + dict( + method='HEAD', + uri=self.container_endpoint, + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8', + }, + ), + ] + ) + + self.cloud.create_container(self.container) + self.assert_calls() + + def test_create_container_public(self): + """Test creating a public container""" + self.register_uris( + [ + dict( + method='HEAD', uri=self.container_endpoint, status_code=404 + ), + dict( + method='PUT', + uri=self.container_endpoint, + status_code=201, + headers={ + 'Date': 'Fri, 16 Dec 2016 18:21:20 GMT', + 'Content-Length': '0', + 'Content-Type': 'text/html; charset=UTF-8', + 'x-container-read': _object_store.OBJECT_CONTAINER_ACLS[ # noqa: E501 + 'public' + ], + }, + ), + dict( + method='HEAD', + uri=self.container_endpoint, + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8', + }, + ), + ] + ) + + self.cloud.create_container(self.container, public=True) + self.assert_calls() + + def test_create_container_exists(self): + """Test creating a container that exists.""" + self.register_uris( + [ + dict( + method='HEAD', + uri=self.container_endpoint, + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8', + }, + ) + ] + ) + + container = self.cloud.create_container(self.container) + + self.assert_calls() + self.assertIsNotNone(container) + + def test_delete_container(self): + self.register_uris( + [dict(method='DELETE', uri=self.container_endpoint)] + ) + + self.assertTrue(self.cloud.delete_container(self.container)) + self.assert_calls() + + def test_delete_container_404(self): + """No exception when deleting a container that does not exist""" + self.register_uris( + [ + dict( + method='DELETE', + uri=self.container_endpoint, + status_code=404, + ) + ] + ) + + self.assertFalse(self.cloud.delete_container(self.container)) + self.assert_calls() + + def test_delete_container_error(self): + """Non-404 swift error re-raised as OSCE""" + # 409 happens if the container is not empty + self.register_uris( + [ + dict( + method='DELETE', + uri=self.container_endpoint, + status_code=409, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.delete_container, + self.container, + ) + self.assert_calls() + + def test_update_container(self): + headers = { + 'x-container-read': _object_store.OBJECT_CONTAINER_ACLS['public'] + } + self.register_uris( + [ + dict( + method='POST', + uri=self.container_endpoint, + status_code=204, + validate=dict(headers=headers), + ) + ] + ) + + self.cloud.update_container(self.container, headers) + self.assert_calls() + + def test_update_container_error(self): + """Swift error re-raised as OSCE""" + # This test is of questionable value - the swift API docs do not + # declare error codes (other than 404 for the container) for this + # method, and I cannot make a synthetic failure to validate a real + # error code. So we're really just testing the shade adapter error + # raising logic here, rather than anything specific to swift. + self.register_uris( + [dict(method='POST', uri=self.container_endpoint, status_code=409)] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.update_container, + self.container, + dict(foo='bar'), + ) + self.assert_calls() + + def test_set_container_access_public(self): + self.register_uris( + [ + dict( + method='POST', + uri=self.container_endpoint, + status_code=204, + validate=dict( + headers={ + 'x-container-read': _object_store.OBJECT_CONTAINER_ACLS[ # noqa: E501 + 'public' + ] + } + ), + ) + ] + ) + + self.cloud.set_container_access(self.container, 'public') + + self.assert_calls() + + def test_set_container_access_private(self): + self.register_uris( + [ + dict( + method='POST', + uri=self.container_endpoint, + status_code=204, + validate=dict( + headers={ + 'x-container-read': _object_store.OBJECT_CONTAINER_ACLS[ # noqa: E501 + 'private' + ] + } + ), + ) + ] + ) + + self.cloud.set_container_access(self.container, 'private') + + self.assert_calls() + + def test_set_container_access_invalid(self): + self.assertRaises( + exceptions.SDKException, + self.cloud.set_container_access, + self.container, + 'invalid', + ) + + def test_get_container_access(self): + self.register_uris( + [ + dict( + method='HEAD', + uri=self.container_endpoint, + headers={ + 'x-container-read': str( + _object_store.OBJECT_CONTAINER_ACLS['public'] + ) + }, + ) + ] + ) + access = self.cloud.get_container_access(self.container) + self.assertEqual('public', access) + + def test_get_container_invalid(self): + self.register_uris( + [ + dict( + method='HEAD', + uri=self.container_endpoint, + headers={'x-container-read': 'invalid'}, + ) + ] + ) + + with testtools.ExpectedException( + exceptions.SDKException, + "Could not determine container access for ACL: invalid", + ): + self.cloud.get_container_access(self.container) + + def test_get_container_access_not_found(self): + self.register_uris( + [dict(method='HEAD', uri=self.container_endpoint, status_code=404)] + ) + with testtools.ExpectedException( + exceptions.SDKException, + f"Container not found: {self.container}", + ): + self.cloud.get_container_access(self.container) + + def test_list_containers(self): + endpoint = f'{self.endpoint}/' + containers = [{'count': 0, 'bytes': 0, 'name': self.container}] + + self.register_uris( + [ + dict( + method='GET', + uri=endpoint, + complete_qs=True, + json=containers, + ) + ] + ) + + ret = self.cloud.list_containers() + + self.assert_calls() + for a, b in zip(containers, ret): + self._compare_containers(a, b) + + def test_list_containers_exception(self): + endpoint = f'{self.endpoint}/' + self.register_uris( + [ + dict( + method='GET', + uri=endpoint, + complete_qs=True, + status_code=416, + ) + ] + ) + + self.assertRaises(exceptions.SDKException, self.cloud.list_containers) + self.assert_calls() + + @mock.patch.object(_proxy, '_get_expiration', return_value=13345) + def test_generate_form_signature_container_key(self, mock_expiration): + self.register_uris( + [ + dict( + method='HEAD', + uri=self.container_endpoint, + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'X-Container-Meta-Temp-Url-Key': 'amazingly-secure-key', # noqa: E501 + 'Content-Type': 'text/plain; charset=utf-8', + }, + ) + ] + ) + self.assertEqual( + (13345, '60731fb66d46c97cdcb79b6154363179c500b9d9'), + self.cloud.object_store.generate_form_signature( + self.container, + object_prefix='prefix/location', + redirect_url='https://example.com/location', + max_file_size=1024 * 1024 * 1024, + max_upload_count=10, + timeout=1000, + temp_url_key=None, + ), + ) + self.assert_calls() + + @mock.patch.object(_proxy, '_get_expiration', return_value=13345) + def test_generate_form_signature_account_key(self, mock_expiration): + self.register_uris( + [ + dict( + method='HEAD', + uri=self.container_endpoint, + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8', + }, + ), + dict( + method='HEAD', + uri=self.endpoint + '/', + headers={ + 'X-Account-Meta-Temp-Url-Key': 'amazingly-secure-key' + }, + ), + ] + ) + self.assertEqual( + (13345, '3cb9bc83d5a4136421bb2c1f58b963740566646f'), + self.cloud.object_store.generate_form_signature( + self.container, + object_prefix='prefix/location', + redirect_url='https://example.com/location', + max_file_size=1024 * 1024 * 1024, + max_upload_count=10, + timeout=1000, + temp_url_key=None, + ), + ) + self.assert_calls() + + @mock.patch.object(_proxy, '_get_expiration', return_value=13345) + def test_generate_form_signature_key_argument(self, mock_expiration): + self.assertEqual( + (13345, '1c283a05c6628274b732212d9a885265e6f67b63'), + self.cloud.object_store.generate_form_signature( + self.container, + object_prefix='prefix/location', + redirect_url='https://example.com/location', + max_file_size=1024 * 1024 * 1024, + max_upload_count=10, + timeout=1000, + temp_url_key='amazingly-secure-key', + ), + ) + self.assert_calls() + + def test_generate_form_signature_no_key(self): + self.register_uris( + [ + dict( + method='HEAD', + uri=self.container_endpoint, + headers={ + 'Content-Length': '0', + 'X-Container-Object-Count': '0', + 'Accept-Ranges': 'bytes', + 'X-Storage-Policy': 'Policy-0', + 'Date': 'Fri, 16 Dec 2016 18:29:05 GMT', + 'X-Timestamp': '1481912480.41664', + 'X-Trans-Id': 'tx60ec128d9dbf44b9add68-0058543271dfw1', + 'X-Container-Bytes-Used': '0', + 'Content-Type': 'text/plain; charset=utf-8', + }, + ), + dict(method='HEAD', uri=self.endpoint + '/', headers={}), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.object_store.generate_form_signature, + self.container, + object_prefix='prefix/location', + redirect_url='https://example.com/location', + max_file_size=1024 * 1024 * 1024, + max_upload_count=10, + timeout=1000, + temp_url_key=None, + ) + self.assert_calls() + + def test_set_account_temp_url_key(self): + key = 'super-secure-key' + + self.register_uris( + [ + dict( + method='POST', + uri=self.endpoint + '/', + status_code=204, + validate=dict( + headers={'x-account-meta-temp-url-key': key} + ), + ), + dict( + method='HEAD', + uri=self.endpoint + '/', + headers={'x-account-meta-temp-url-key': key}, + ), + ] + ) + self.cloud.object_store.set_account_temp_url_key(key) + self.assert_calls() + + def test_set_account_temp_url_key_secondary(self): + key = 'super-secure-key' + + self.register_uris( + [ + dict( + method='POST', + uri=self.endpoint + '/', + status_code=204, + validate=dict( + headers={'x-account-meta-temp-url-key-2': key} + ), + ), + dict( + method='HEAD', + uri=self.endpoint + '/', + headers={'x-account-meta-temp-url-key-2': key}, + ), + ] + ) + self.cloud.object_store.set_account_temp_url_key(key, secondary=True) + self.assert_calls() + + def test_set_container_temp_url_key(self): + key = 'super-secure-key' + + self.register_uris( + [ + dict( + method='POST', + uri=self.container_endpoint, + status_code=204, + validate=dict( + headers={'x-container-meta-temp-url-key': key} + ), + ), + dict( + method='HEAD', + uri=self.container_endpoint, + headers={'x-container-meta-temp-url-key': key}, + ), + ] + ) + self.cloud.object_store.set_container_temp_url_key(self.container, key) + self.assert_calls() + + def test_set_container_temp_url_key_secondary(self): + key = 'super-secure-key' + + self.register_uris( + [ + dict( + method='POST', + uri=self.container_endpoint, + status_code=204, + validate=dict( + headers={'x-container-meta-temp-url-key-2': key} + ), + ), + dict( + method='HEAD', + uri=self.container_endpoint, + headers={'x-container-meta-temp-url-key-2': key}, + ), + ] + ) + self.cloud.object_store.set_container_temp_url_key( + self.container, key, secondary=True + ) + self.assert_calls() + + def test_list_objects(self): + endpoint = f'{self.container_endpoint}?format=json' + + objects = [ + { + 'bytes': 20304400896, + 'last_modified': '2016-12-15T13:34:13.650090', + 'hash': 'daaf9ed2106d09bba96cf193d866445e', + 'name': self.object, + 'content_type': 'application/octet-stream', + } + ] + + self.register_uris( + [dict(method='GET', uri=endpoint, complete_qs=True, json=objects)] + ) + + ret = self.cloud.list_objects(self.container) + + self.assert_calls() + for a, b in zip(objects, ret): + self._compare_objects(a, b) + + def test_list_objects_with_prefix(self): + endpoint = f'{self.container_endpoint}?format=json&prefix=test' + + objects = [ + { + 'bytes': 20304400896, + 'last_modified': '2016-12-15T13:34:13.650090', + 'hash': 'daaf9ed2106d09bba96cf193d866445e', + 'name': self.object, + 'content_type': 'application/octet-stream', + } + ] + + self.register_uris( + [dict(method='GET', uri=endpoint, complete_qs=True, json=objects)] + ) + + ret = self.cloud.list_objects(self.container, prefix='test') + + self.assert_calls() + for a, b in zip(objects, ret): + self._compare_objects(a, b) + + def test_list_objects_exception(self): + endpoint = f'{self.container_endpoint}?format=json' + self.register_uris( + [ + dict( + method='GET', + uri=endpoint, + complete_qs=True, + status_code=416, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.list_objects, + self.container, + ) + self.assert_calls() + + def test_delete_object(self): + self.register_uris( + [ + dict( + method='HEAD', + uri=self.object_endpoint, + headers={'X-Object-Meta': 'foo'}, + ), + dict( + method='DELETE', uri=self.object_endpoint, status_code=204 + ), + ] + ) + + self.assertTrue(self.cloud.delete_object(self.container, self.object)) + + self.assert_calls() + + def test_delete_object_not_found(self): + self.register_uris( + [dict(method='HEAD', uri=self.object_endpoint, status_code=404)] + ) + + self.assertFalse(self.cloud.delete_object(self.container, self.object)) + + self.assert_calls() + + def test_get_object(self): + headers = { + 'Content-Length': '20304400896', + 'Content-Type': 'application/octet-stream', + 'Accept-Ranges': 'bytes', + 'Last-Modified': 'Thu, 15 Dec 2016 13:34:14 GMT', + 'Etag': '"b5c454b44fbd5344793e3fb7e3850768"', + 'X-Timestamp': '1481808853.65009', + 'X-Trans-Id': 'tx68c2a2278f0c469bb6de1-005857ed80dfw1', + 'Date': 'Mon, 19 Dec 2016 14:24:00 GMT', + 'X-Static-Large-Object': 'True', + 'X-Object-Meta-Mtime': '1481513709.168512', + } + response_headers = {k.lower(): v for k, v in headers.items()} + text = 'test body' + self.register_uris( + [ + dict( + method='GET', + uri=self.object_endpoint, + headers={ + 'Content-Length': '20304400896', + 'Content-Type': 'application/octet-stream', + 'Accept-Ranges': 'bytes', + 'Last-Modified': 'Thu, 15 Dec 2016 13:34:14 GMT', + 'Etag': '"b5c454b44fbd5344793e3fb7e3850768"', + 'X-Timestamp': '1481808853.65009', + 'X-Trans-Id': 'tx68c2a2278f0c469bb6de1-005857ed80dfw1', + 'Date': 'Mon, 19 Dec 2016 14:24:00 GMT', + 'X-Static-Large-Object': 'True', + 'X-Object-Meta-Mtime': '1481513709.168512', + }, + text='test body', + ) + ] + ) + + resp = self.cloud.get_object(self.container, self.object) + + self.assert_calls() + + self.assertEqual((response_headers, text), resp) + + def test_stream_object(self): + text = b'test body' + self.register_uris( + [ + dict( + method='GET', + uri=self.object_endpoint, + headers={ + 'Content-Length': '20304400896', + 'Content-Type': 'application/octet-stream', + 'Accept-Ranges': 'bytes', + 'Last-Modified': 'Thu, 15 Dec 2016 13:34:14 GMT', + 'Etag': '"b5c454b44fbd5344793e3fb7e3850768"', + 'X-Timestamp': '1481808853.65009', + 'X-Trans-Id': 'tx68c2a2278f0c469bb6de1-005857ed80dfw1', + 'Date': 'Mon, 19 Dec 2016 14:24:00 GMT', + 'X-Static-Large-Object': 'True', + 'X-Object-Meta-Mtime': '1481513709.168512', + }, + text='test body', + ) + ] + ) + + response_text = b'' + for data in self.cloud.stream_object(self.container, self.object): + response_text += data + + self.assert_calls() + + self.assertEqual(text, response_text) + + def test_stream_object_not_found(self): + self.register_uris( + [ + dict(method='GET', uri=self.object_endpoint, status_code=404), + ] + ) + + response_text = b'' + for data in self.cloud.stream_object(self.container, self.object): + response_text += data + + self.assert_calls() + + self.assertEqual(b'', response_text) + + def test_get_object_not_found(self): + self.register_uris( + [dict(method='GET', uri=self.object_endpoint, status_code=404)] + ) + + self.assertIsNone(self.cloud.get_object(self.container, self.object)) + + self.assert_calls() + + def test_get_object_exception(self): + self.register_uris( + [dict(method='GET', uri=self.object_endpoint, status_code=416)] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.get_object, + self.container, + self.object, + ) + + self.assert_calls() + + def test_get_object_segment_size_below_min(self): + # Register directly becuase we make multiple calls. The number + # of calls we make isn't interesting - what we do with the return + # values is. Don't run assert_calls for the same reason. + self.register_uris( + [ + dict( + method='GET', + uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': 1000}, + slo={'min_segment_size': 500}, + ), + headers={'Content-Type': 'application/json'}, + ) + ] + ) + self.assertEqual(500, self.cloud.get_object_segment_size(400)) + self.assertEqual(900, self.cloud.get_object_segment_size(900)) + self.assertEqual(1000, self.cloud.get_object_segment_size(1000)) + self.assertEqual(1000, self.cloud.get_object_segment_size(1100)) + + def test_get_object_segment_size_http_404(self): + self.register_uris( + [ + dict( + method='GET', + uri='https://object-store.example.com/info', + status_code=404, + reason='Not Found', + ) + ] + ) + self.assertEqual( + _proxy.DEFAULT_OBJECT_SEGMENT_SIZE, + self.cloud.get_object_segment_size(None), + ) + self.assert_calls() + + def test_get_object_segment_size_http_412(self): + self.register_uris( + [ + dict( + method='GET', + uri='https://object-store.example.com/info', + status_code=412, + reason='Precondition failed', + ) + ] + ) + self.assertEqual( + _proxy.DEFAULT_OBJECT_SEGMENT_SIZE, + self.cloud.get_object_segment_size(None), + ) + self.assert_calls() + + def test_update_container_cors(self): + headers = { + 'X-Container-Meta-Web-Index': 'index.html', + 'X-Container-Meta-Access-Control-Allow-Origin': '*', + } + self.register_uris( + [ + dict( + method='POST', + uri=self.container_endpoint, + status_code=204, + validate=dict(headers=headers), + ) + ] + ) + self.cloud.update_container(self.container, headers=headers) + self.assert_calls() + + +class TestObjectUploads(BaseTestObject): + def setUp(self): + super().setUp() + + self.content = self.getUniqueString().encode('latin-1') + self.object_file = tempfile.NamedTemporaryFile(delete=False) + self.object_file.write(self.content) + self.object_file.close() + self.md5, self.sha256 = utils._get_file_hashes(self.object_file.name) + self.endpoint = self.cloud.object_store.get_endpoint() + + def test_create_object(self): + self.register_uris( + [ + dict( + method='GET', + uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': 1000}, + slo={'min_segment_size': 500}, + ), + ), + dict( + method='HEAD', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=404, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=201, + validate=dict( + headers={ + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + } + ), + ), + ] + ) + + self.cloud.create_object( + container=self.container, + name=self.object, + filename=self.object_file.name, + ) + + self.assert_calls() + + def test_create_object_index_rax(self): + self.register_uris( + [ + dict( + method='PUT', + uri='{endpoint}/{container}/{object}'.format( + endpoint=self.endpoint, + container=self.container, + object='index.html', + ), + status_code=201, + validate=dict( + headers={ + 'access-control-allow-origin': '*', + 'content-type': 'text/html', + } + ), + ) + ] + ) + + headers = { + 'access-control-allow-origin': '*', + 'content-type': 'text/html', + } + self.cloud.create_object( + self.container, name='index.html', data='', **headers + ) + + self.assert_calls() + + def test_create_directory_marker_object(self): + self.register_uris( + [ + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=201, + validate=dict( + headers={ + 'content-type': 'application/directory', + } + ), + ) + ] + ) + + self.cloud.create_directory_marker_object( + container=self.container, name=self.object + ) + + self.assert_calls() + + def test_create_dynamic_large_object(self): + max_file_size = 2 + min_file_size = 1 + + uris_to_mock = [ + dict( + method='GET', + uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': max_file_size}, + slo={'min_segment_size': min_file_size}, + ), + ), + dict( + method='HEAD', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=404, + ), + ] + + uris_to_mock.extend( + [ + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/{index:0>6}', + status_code=201, + ) + for index, offset in enumerate( + range(0, len(self.content), max_file_size) + ) + ] + ) + + uris_to_mock.append( + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=201, + validate=dict( + headers={ + 'x-object-manifest': f'{self.container}/{self.object}', + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + } + ), + ) + ) + self.register_uris(uris_to_mock) + self.cloud.create_object( + container=self.container, + name=self.object, + filename=self.object_file.name, + use_slo=False, + ) + + # After call 3, order become indeterminate because of thread pool + self.assert_calls(stop_after=3) + + for key, value in self.calls[-1]['headers'].items(): + self.assertEqual( + value, + self.adapter.request_history[-1].headers[key], + 'header mismatch in manifest call', + ) + + def test_create_static_large_object(self): + max_file_size = 25 + min_file_size = 1 + + uris_to_mock = [ + dict( + method='GET', + uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': max_file_size}, + slo={'min_segment_size': min_file_size}, + ), + ), + dict( + method='HEAD', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=404, + ), + ] + + uris_to_mock.extend( + [ + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/{index:0>6}', + status_code=201, + headers=dict(Etag=f'etag{index}'), + ) + for index, offset in enumerate( + range(0, len(self.content), max_file_size) + ) + ] + ) + + uris_to_mock.append( + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=201, + validate=dict( + params={'multipart-manifest', 'put'}, + headers={ + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + }, + ), + ) + ) + self.register_uris(uris_to_mock) + + self.cloud.create_object( + container=self.container, + name=self.object, + filename=self.object_file.name, + use_slo=True, + ) + + # After call 3, order become indeterminate because of thread pool + self.assert_calls(stop_after=3) + + for key, value in self.calls[-1]['headers'].items(): + self.assertEqual( + value, + self.adapter.request_history[-1].headers[key], + 'header mismatch in manifest call', + ) + + base_object = f'/{self.container}/{self.object}' + + self.assertEqual( + [ + { + 'path': f"{base_object}/000000", + 'size_bytes': 25, + 'etag': 'etag0', + }, + { + 'path': f"{base_object}/000001", + 'size_bytes': 25, + 'etag': 'etag1', + }, + { + 'path': f"{base_object}/000002", + 'size_bytes': 25, + 'etag': 'etag2', + }, + { + 'path': f"{base_object}/000003", + 'size_bytes': len(self.object) - 75, + 'etag': 'etag3', + }, + ], + self.adapter.request_history[-1].json(), + ) + + def test_slo_manifest_retry(self): + """ + Uploading the SLO manifest file should be retried up to 3 times before + giving up. This test should succeed on the 3rd and final attempt. + """ + max_file_size = 25 + min_file_size = 1 + + uris_to_mock = [ + dict( + method='GET', + uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': max_file_size}, + slo={'min_segment_size': min_file_size}, + ), + ), + dict( + method='HEAD', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=404, + ), + ] + + uris_to_mock.extend( + [ + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/{index:0>6}', + status_code=201, + headers=dict(Etag=f'etag{index}'), + ) + for index, offset in enumerate( + range(0, len(self.content), max_file_size) + ) + ] + ) + + # manifest file upload calls + uris_to_mock.extend( + [ + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=400, + validate=dict( + params={'multipart-manifest', 'put'}, + headers={ + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + }, + ), + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=400, + validate=dict( + params={'multipart-manifest', 'put'}, + headers={ + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + }, + ), + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=201, + validate=dict( + params={'multipart-manifest', 'put'}, + headers={ + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + }, + ), + ), + ] + ) + + self.register_uris(uris_to_mock) + + self.cloud.create_object( + container=self.container, + name=self.object, + filename=self.object_file.name, + use_slo=True, + ) + + # After call 3, order become indeterminate because of thread pool + self.assert_calls(stop_after=3) + + for key, value in self.calls[-1]['headers'].items(): + self.assertEqual( + value, + self.adapter.request_history[-1].headers[key], + 'header mismatch in manifest call', + ) + + base_object = f'/{self.container}/{self.object}' + + self.assertEqual( + [ + { + 'path': f"{base_object}/000000", + 'size_bytes': 25, + 'etag': 'etag0', + }, + { + 'path': f"{base_object}/000001", + 'size_bytes': 25, + 'etag': 'etag1', + }, + { + 'path': f"{base_object}/000002", + 'size_bytes': 25, + 'etag': 'etag2', + }, + { + 'path': f"{base_object}/000003", + 'size_bytes': len(self.object) - 75, + 'etag': 'etag3', + }, + ], + self.adapter.request_history[-1].json(), + ) + + def test_slo_manifest_fail(self): + """ + Uploading the SLO manifest file should be retried up to 3 times before + giving up. This test fails all 3 attempts and should verify that we + delete uploaded segments that begin with the object prefix. + """ + max_file_size = 25 + min_file_size = 1 + + uris_to_mock = [ + dict( + method='GET', + uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': max_file_size}, + slo={'min_segment_size': min_file_size}, + ), + ), + dict( + method='HEAD', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=404, + ), + ] + + uris_to_mock.extend( + [ + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/{index:0>6}', + status_code=201, + headers=dict(Etag=f'etag{index}'), + ) + for index, offset in enumerate( + range(0, len(self.content), max_file_size) + ) + ] + ) + + # manifest file upload calls + uris_to_mock.extend( + [ + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=400, + validate=dict( + params={'multipart-manifest', 'put'}, + headers={ + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + }, + ), + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=400, + validate=dict( + params={'multipart-manifest', 'put'}, + headers={ + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + }, + ), + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=400, + validate=dict( + params={'multipart-manifest', 'put'}, + headers={ + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + }, + ), + ), + ] + ) + + # Cleaning up image upload segments involves calling the + # delete_autocreated_image_objects() API method which will list + # objects (LIST), get the object metadata (HEAD), then delete the + # object (DELETE). + uris_to_mock.extend( + [ + dict( + method='GET', + uri=f'{self.endpoint}/images?format=json&prefix={self.object}', + complete_qs=True, + json=[ + { + 'content_type': 'application/octet-stream', + 'bytes': 1437258240, + 'hash': '249219347276c331b87bf1ac2152d9af', + 'last_modified': '2015-02-16T17:50:05.289600', + 'name': self.object, + } + ], + ), + dict( + method='HEAD', + uri=f'{self.endpoint}/images/{self.object}', + headers={ + 'X-Timestamp': '1429036140.50253', + 'X-Trans-Id': 'txbbb825960a3243b49a36f-005a0dadaedfw1', + 'Content-Length': '1290170880', + 'Last-Modified': 'Tue, 14 Apr 2015 18:29:01 GMT', + 'X-Object-Meta-x-sdk-autocreated': 'true', + 'X-Object-Meta-X-Shade-Sha256': 'does not matter', + 'X-Object-Meta-X-Shade-Md5': 'does not matter', + 'Date': 'Thu, 16 Nov 2017 15:24:30 GMT', + 'Accept-Ranges': 'bytes', + 'X-Static-Large-Object': 'false', + 'Content-Type': 'application/octet-stream', + 'Etag': '249219347276c331b87bf1ac2152d9af', + }, + ), + dict( + method='DELETE', + uri=f'{self.endpoint}/images/{self.object}', + ), + ] + ) + + self.register_uris(uris_to_mock) + + # image_api_use_tasks needs to be set to True in order for the API + # method delete_autocreated_image_objects() to do the cleanup. + self.cloud.image_api_use_tasks = True + + self.assertRaises( + exceptions.SDKException, + self.cloud.create_object, + container=self.container, + name=self.object, + filename=self.object_file.name, + use_slo=True, + ) + + # After call 3, order become indeterminate because of thread pool + self.assert_calls(stop_after=3) + + def test_object_segment_retry_failure(self): + max_file_size = 25 + min_file_size = 1 + + self.register_uris( + [ + dict( + method='GET', + uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': max_file_size}, + slo={'min_segment_size': min_file_size}, + ), + ), + dict( + method='HEAD', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=404, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/000000', + status_code=201, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/000001', + status_code=201, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/000002', + status_code=201, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/000003', + status_code=501, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=201, + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.create_object, + container=self.container, + name=self.object, + filename=self.object_file.name, + use_slo=True, + ) + + # After call 3, order become indeterminate because of thread pool + self.assert_calls(stop_after=3) + + def test_object_segment_retries(self): + max_file_size = 25 + min_file_size = 1 + + self.register_uris( + [ + dict( + method='GET', + uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': max_file_size}, + slo={'min_segment_size': min_file_size}, + ), + ), + dict( + method='HEAD', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=404, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/000000', + headers={'etag': 'etag0'}, + status_code=201, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/000001', + headers={'etag': 'etag1'}, + status_code=201, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/000002', + headers={'etag': 'etag2'}, + status_code=201, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/000003', + status_code=501, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}/000003', + status_code=201, + headers={'etag': 'etag3'}, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=201, + validate=dict( + params={'multipart-manifest', 'put'}, + headers={ + 'x-object-meta-x-sdk-md5': self.md5, + 'x-object-meta-x-sdk-sha256': self.sha256, + }, + ), + ), + ] + ) + + self.cloud.create_object( + container=self.container, + name=self.object, + filename=self.object_file.name, + use_slo=True, + ) + + # After call 3, order become indeterminate because of thread pool + self.assert_calls(stop_after=3) + + for key, value in self.calls[-1]['headers'].items(): + self.assertEqual( + value, + self.adapter.request_history[-1].headers[key], + 'header mismatch in manifest call', + ) + + base_object = f'/{self.container}/{self.object}' + + self.assertEqual( + [ + { + 'path': f"{base_object}/000000", + 'size_bytes': 25, + 'etag': 'etag0', + }, + { + 'path': f"{base_object}/000001", + 'size_bytes': 25, + 'etag': 'etag1', + }, + { + 'path': f"{base_object}/000002", + 'size_bytes': 25, + 'etag': 'etag2', + }, + { + 'path': f"{base_object}/000003", + 'size_bytes': len(self.object) - 75, + 'etag': 'etag3', + }, + ], + self.adapter.request_history[-1].json(), + ) + + def test_create_object_skip_checksum(self): + self.register_uris( + [ + dict( + method='GET', + uri='https://object-store.example.com/info', + json=dict( + swift={'max_file_size': 1000}, + slo={'min_segment_size': 500}, + ), + ), + dict( + method='HEAD', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=200, + ), + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=201, + validate=dict(headers={}), + ), + ] + ) + + self.cloud.create_object( + container=self.container, + name=self.object, + filename=self.object_file.name, + generate_checksums=False, + ) + + self.assert_calls() + + def test_create_object_data(self): + self.register_uris( + [ + dict( + method='PUT', + uri=f'{self.endpoint}/{self.container}/{self.object}', + status_code=201, + validate=dict( + headers={}, + data=self.content, + ), + ), + ] + ) + + self.cloud.create_object( + container=self.container, name=self.object, data=self.content + ) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_openstackcloud.py b/openstack/tests/unit/cloud/test_openstackcloud.py new file mode 100644 index 0000000000..c7e3a3dc6e --- /dev/null +++ b/openstack/tests/unit/cloud/test_openstackcloud.py @@ -0,0 +1,115 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack import exceptions +from openstack import proxy +from openstack import resource +from openstack.tests.unit import base + + +class TestSearch(base.TestCase): + class FakeResource(resource.Resource): + allow_fetch = True + allow_list = True + + foo = resource.Body("foo") + + def setUp(self): + super().setUp() + + self.session = proxy.Proxy(self.cloud) + self.session._sdk_connection = self.cloud + self.session._get = mock.Mock() + self.session._list = mock.Mock() + self.session._resource_registry = dict(fake=self.FakeResource) + # Set the mock into the cloud connection + setattr(self.cloud, "mock_session", self.session) + + def test_raises_unknown_service(self): + self.assertRaises( + exceptions.SDKException, + self.cloud.search_resources, + "wrong_service.wrong_resource", + "name", + ) + + def test_raises_unknown_resource(self): + self.assertRaises( + exceptions.SDKException, + self.cloud.search_resources, + "mock_session.wrong_resource", + "name", + ) + + def test_search_resources_get_finds(self): + self.session._get.return_value = self.FakeResource(foo="bar") + + ret = self.cloud.search_resources("mock_session.fake", "fake_name") + self.session._get.assert_called_with(self.FakeResource, "fake_name") + + self.assertEqual(1, len(ret)) + self.assertEqual( + self.FakeResource(foo="bar").to_dict(), ret[0].to_dict() + ) + + def test_search_resources_list(self): + self.session._get.side_effect = exceptions.NotFoundException + self.session._list.return_value = [self.FakeResource(foo="bar")] + + ret = self.cloud.search_resources("mock_session.fake", "fake_name") + self.session._get.assert_called_with(self.FakeResource, "fake_name") + self.session._list.assert_called_with( + self.FakeResource, name="fake_name" + ) + + self.assertEqual(1, len(ret)) + self.assertEqual( + self.FakeResource(foo="bar").to_dict(), ret[0].to_dict() + ) + + def test_search_resources_args(self): + self.session._get.side_effect = exceptions.NotFoundException + self.session._list.return_value = [] + + self.cloud.search_resources( + "mock_session.fake", + "fake_name", + get_args=["getarg1"], + get_kwargs={"getkwarg1": "1"}, + list_args=["listarg1"], + list_kwargs={"listkwarg1": "1"}, + filter1="foo", + ) + self.session._get.assert_called_with( + self.FakeResource, "fake_name", "getarg1", getkwarg1="1" + ) + self.session._list.assert_called_with( + self.FakeResource, + "listarg1", + listkwarg1="1", + name="fake_name", + filter1="foo", + ) + + def test_search_resources_name_empty(self): + self.session._list.return_value = [self.FakeResource(foo="bar")] + + ret = self.cloud.search_resources("mock_session.fake", None, foo="bar") + self.session._get.assert_not_called() + self.session._list.assert_called_with(self.FakeResource, foo="bar") + + self.assertEqual(1, len(ret)) + self.assertEqual( + self.FakeResource(foo="bar").to_dict(), ret[0].to_dict() + ) diff --git a/openstack/tests/unit/cloud/test_operator.py b/openstack/tests/unit/cloud/test_operator.py new file mode 100644 index 0000000000..d828437447 --- /dev/null +++ b/openstack/tests/unit/cloud/test_operator.py @@ -0,0 +1,197 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock +import uuid + +import testtools + +from openstack.config import cloud_region +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestOperatorCloud(base.TestCase): + def test_get_image_name(self): + self.use_glance() + + image_id = self.getUniqueString() + fake_image = fakes.make_fake_image(image_id=image_id) + list_return = {'images': [fake_image]} + + self.register_uris( + [ + dict( + method='GET', + uri='https://image.example.com/v2/images', + json=list_return, + ), + dict( + method='GET', + uri='https://image.example.com/v2/images', + json=list_return, + ), + ] + ) + + self.assertEqual('fake_image', self.cloud.get_image_name(image_id)) + self.assertEqual('fake_image', self.cloud.get_image_name('fake_image')) + + self.assert_calls() + + def test_get_image_id(self): + self.use_glance() + + image_id = self.getUniqueString() + fake_image = fakes.make_fake_image(image_id=image_id) + list_return = {'images': [fake_image]} + + self.register_uris( + [ + dict( + method='GET', + uri='https://image.example.com/v2/images', + json=list_return, + ), + dict( + method='GET', + uri='https://image.example.com/v2/images', + json=list_return, + ), + ] + ) + + self.assertEqual(image_id, self.cloud.get_image_id(image_id)) + self.assertEqual(image_id, self.cloud.get_image_id('fake_image')) + + self.assert_calls() + + @mock.patch.object(cloud_region.CloudRegion, 'get_session') + def test_get_session_endpoint_exception(self, get_session_mock): + class FakeException(Exception): + pass + + def side_effect(*args, **kwargs): + raise FakeException("No service") + + session_mock = mock.Mock() + session_mock.get_endpoint.side_effect = side_effect + get_session_mock.return_value = session_mock + self.cloud.name = 'testcloud' + self.cloud.config.config['region_name'] = 'testregion' + with testtools.ExpectedException( + exceptions.SDKException, + "Error getting image endpoint on testcloud:testregion: No service", + ): + self.cloud.get_session_endpoint("image") + + @mock.patch.object(cloud_region.CloudRegion, 'get_session') + def test_get_session_endpoint_unavailable(self, get_session_mock): + session_mock = mock.Mock() + session_mock.get_endpoint.return_value = None + get_session_mock.return_value = session_mock + image_endpoint = self.cloud.get_session_endpoint("image") + self.assertIsNone(image_endpoint) + + @mock.patch.object(cloud_region.CloudRegion, 'get_session') + def test_get_session_endpoint_identity(self, get_session_mock): + session_mock = mock.Mock() + get_session_mock.return_value = session_mock + self.cloud.get_session_endpoint('identity') + kwargs = dict( + service_type='identity', + region_name='RegionOne', + interface='public', + service_name=None, + min_version=None, + max_version=None, + ) + + session_mock.get_endpoint.assert_called_with(**kwargs) + + @mock.patch.object(cloud_region.CloudRegion, 'get_session') + def test_has_service_no(self, get_session_mock): + session_mock = mock.Mock() + session_mock.get_endpoint.return_value = None + get_session_mock.return_value = session_mock + self.assertFalse(self.cloud.has_service("image")) + + @mock.patch.object(cloud_region.CloudRegion, 'get_session') + def test_has_service_yes(self, get_session_mock): + session_mock = mock.Mock() + session_mock.get_endpoint.return_value = 'http://fake.url' + get_session_mock.return_value = session_mock + self.assertTrue(self.cloud.has_service("image")) + + def test_list_hypervisors(self): + '''This test verifies that calling list_hypervisors results in a call + to nova client.''' + uuid1 = uuid.uuid4().hex + uuid2 = uuid.uuid4().hex + self.use_compute_discovery() + self.register_uris( + [ + dict( + method='GET', + uri='https://compute.example.com/v2.1/os-hypervisors/detail', + json={ + 'hypervisors': [ + fakes.make_fake_hypervisor(uuid1, 'testserver1'), + fakes.make_fake_hypervisor(uuid2, 'testserver2'), + ] + }, + validate={ + 'headers': {'OpenStack-API-Version': 'compute 2.53'} + }, + ), + ] + ) + + r = self.cloud.list_hypervisors() + + self.assertEqual(2, len(r)) + self.assertEqual('testserver1', r[0]['name']) + self.assertEqual(uuid1, r[0]['id']) + self.assertEqual('testserver2', r[1]['name']) + self.assertEqual(uuid2, r[1]['id']) + + self.assert_calls() + + def test_list_old_hypervisors(self): + '''This test verifies that calling list_hypervisors on a pre-2.53 cloud + calls the old version.''' + self.use_compute_discovery( + compute_version_json='old-compute-version.json' + ) + self.register_uris( + [ + dict( + method='GET', + uri='https://compute.example.com/v2.1/os-hypervisors/detail', + json={ + 'hypervisors': [ + fakes.make_fake_hypervisor('1', 'testserver1'), + fakes.make_fake_hypervisor('2', 'testserver2'), + ] + }, + ), + ] + ) + + r = self.cloud.list_hypervisors() + + self.assertEqual(2, len(r)) + self.assertEqual('testserver1', r[0]['name']) + self.assertEqual('testserver2', r[1]['name']) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_operator_noauth.py b/openstack/tests/unit/cloud/test_operator_noauth.py new file mode 100644 index 0000000000..3d1e65f3e2 --- /dev/null +++ b/openstack/tests/unit/cloud/test_operator_noauth.py @@ -0,0 +1,262 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openstack.cloud +from openstack.tests.unit import base + + +class TestOpenStackCloudOperatorNoAuth(base.TestCase): + def setUp(self): + """Setup Noauth OpenStackCloud tests + + Setup the test to utilize no authentication and an endpoint + URL in the auth data. This is permits testing of the basic + mechanism that enables Ironic noauth mode to be utilized with + Shade. + + Uses base.TestCase instead of IronicTestCase because + we need to do completely different things with discovery. + """ + super().setUp() + # By clearing the URI registry, we remove all calls to a keystone + # catalog or getting a token + self._uri_registry.clear() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + service_type='baremetal', base_url_append='v1' + ), + json={ + 'id': 'v1', + 'links': [ + { + "href": "https://baremetal.example.com/v1", + "rel": "self", + } + ], + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + service_type='baremetal', + base_url_append='v1', + resource='nodes', + ), + json={'nodes': []}, + ), + ] + ) + + def test_ironic_noauth_none_auth_type(self): + """Test noauth selection for Ironic in OpenStackCloud + + The new way of doing this is with the keystoneauth none plugin. + """ + # NOTE(TheJulia): When we are using the python-ironicclient + # library, the library will automatically prepend the URI path + # with 'v1'. As such, since we are overriding the endpoint, + # we must explicitly do the same as we move away from the + # client library. + self.cloud_noauth = openstack.connect( + auth_type='none', + baremetal_endpoint_override="https://baremetal.example.com/v1", + ) + + self.cloud_noauth.list_machines() + + self.assert_calls() + + def test_ironic_noauth_auth_endpoint(self): + """Test noauth selection for Ironic in OpenStackCloud + + Sometimes people also write clouds.yaml files that look like this: + + :: + clouds: + bifrost: + auth_type: "none" + endpoint: https://baremetal.example.com + """ + self.cloud_noauth = openstack.connect( + auth_type='none', + endpoint='https://baremetal.example.com/v1', + ) + + self.cloud_noauth.list_machines() + + self.assert_calls() + + def test_ironic_noauth_admin_token_auth_type(self): + """Test noauth selection for Ironic in OpenStackCloud + + The old way of doing this was to abuse admin_token. + """ + self.cloud_noauth = openstack.connect( + auth_type='admin_token', + auth=dict( + endpoint='https://baremetal.example.com/v1', token='ignored' + ), + ) + + self.cloud_noauth.list_machines() + + self.assert_calls() + + +class TestOpenStackCloudOperatorNoAuthUnversioned(base.TestCase): + def setUp(self): + """Setup Noauth OpenStackCloud tests for unversioned endpoints + + Setup the test to utilize no authentication and an endpoint + URL in the auth data. This is permits testing of the basic + mechanism that enables Ironic noauth mode to be utilized with + Shade. + + Uses base.TestCase instead of IronicTestCase because + we need to do completely different things with discovery. + """ + super().setUp() + # By clearing the URI registry, we remove all calls to a keystone + # catalog or getting a token + self._uri_registry.clear() + self.register_uris( + [ + dict( + method='GET', + uri='https://baremetal.example.com/', + json={ + "default_version": { + "status": "CURRENT", + "min_version": "1.1", + "version": "1.46", + "id": "v1", + "links": [ + { + "href": "https://baremetal.example.com/v1", + "rel": "self", + } + ], + }, + "versions": [ + { + "status": "CURRENT", + "min_version": "1.1", + "version": "1.46", + "id": "v1", + "links": [ + { + "href": "https://baremetal.example.com/v1", + "rel": "self", + } + ], + } + ], + "name": "OpenStack Ironic API", + "description": "Ironic is an OpenStack project.", + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + service_type='baremetal', base_url_append='v1' + ), + json={ + "media_types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.ironic.v1+json", # noqa: E501 + } + ], + "links": [ + { + "href": "https://baremetal.example.com/v1", + "rel": "self", + } + ], + "ports": [ + { + "href": "https://baremetal.example.com/v1/ports/", + "rel": "self", + }, + { + "href": "https://baremetal.example.com/ports/", + "rel": "bookmark", + }, + ], + "nodes": [ + { + "href": "https://baremetal.example.com/v1/nodes/", + "rel": "self", + }, + { + "href": "https://baremetal.example.com/nodes/", + "rel": "bookmark", + }, + ], + "id": "v1", + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + service_type='baremetal', + base_url_append='v1', + resource='nodes', + ), + json={'nodes': []}, + ), + ] + ) + + def test_ironic_noauth_none_auth_type(self): + """Test noauth selection for Ironic in OpenStackCloud + + The new way of doing this is with the keystoneauth none plugin. + """ + # NOTE(TheJulia): When we are using the python-ironicclient + # library, the library will automatically prepend the URI path + # with 'v1'. As such, since we are overriding the endpoint, + # we must explicitly do the same as we move away from the + # client library. + self.cloud_noauth = openstack.connect( + auth_type='none', + baremetal_endpoint_override="https://baremetal.example.com", + ) + + self.cloud_noauth.list_machines() + + self.assert_calls() + + def test_ironic_noauth_auth_endpoint(self): + """Test noauth selection for Ironic in OpenStackCloud + + Sometimes people also write clouds.yaml files that look like this: + + :: + clouds: + bifrost: + auth_type: "none" + endpoint: https://baremetal.example.com + """ + self.cloud_noauth = openstack.connect( + auth_type='none', + endpoint='https://baremetal.example.com/', + ) + + self.cloud_noauth.list_machines() + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_port.py b/openstack/tests/unit/cloud/test_port.py new file mode 100644 index 0000000000..7be6f693ff --- /dev/null +++ b/openstack/tests/unit/cloud/test_port.py @@ -0,0 +1,568 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_port +---------------------------------- + +Test port resource (managed by neutron) +""" + +from openstack import exceptions +from openstack.network.v2 import port as _port +from openstack.tests.unit import base + + +class TestPort(base.TestCase): + mock_neutron_port_create_rep = { + 'port': { + 'status': 'DOWN', + 'binding:host_id': '', + 'name': 'test-port-name', + 'allowed_address_pairs': [], + 'admin_state_up': True, + 'network_id': 'test-net-id', + 'project_id': 'test-project-id', + 'binding:vif_details': {}, + 'binding:vnic_type': 'normal', + 'binding:vif_type': 'unbound', + 'extra_dhcp_opts': [], + 'device_owner': '', + 'mac_address': '50:1c:0d:e4:f0:0d', + 'binding:profile': {}, + 'fixed_ips': [ + {'subnet_id': 'test-subnet-id', 'ip_address': '29.29.29.29'} + ], + 'id': 'test-port-id', + 'security_groups': [], + 'device_id': '', + } + } + + mock_neutron_port_update_rep = { + 'port': { + 'status': 'DOWN', + 'binding:host_id': '', + 'name': 'test-port-name-updated', + 'allowed_address_pairs': [], + 'admin_state_up': True, + 'network_id': 'test-net-id', + 'project_id': 'test-project-id', + 'binding:vif_details': {}, + 'extra_dhcp_opts': [], + 'binding:vnic_type': 'normal', + 'binding:vif_type': 'unbound', + 'device_owner': '', + 'mac_address': '50:1c:0d:e4:f0:0d', + 'binding:profile': {}, + 'fixed_ips': [ + {'subnet_id': 'test-subnet-id', 'ip_address': '29.29.29.29'} + ], + 'id': 'test-port-id', + 'security_groups': [], + 'device_id': '', + } + } + + mock_neutron_port_list_rep = { + 'ports': [ + { + 'status': 'ACTIVE', + 'binding:host_id': 'devstack', + 'name': 'first-port', + 'allowed_address_pairs': [], + 'admin_state_up': True, + 'network_id': '70c1db1f-b701-45bd-96e0-a313ee3430b3', + 'project_id': '', + 'extra_dhcp_opts': [], + 'binding:vif_details': { + 'port_filter': True, + 'ovs_hybrid_plug': True, + }, + 'binding:vif_type': 'ovs', + 'device_owner': 'network:router_gateway', + 'mac_address': 'fa:16:3e:58:42:ed', + 'binding:profile': {}, + 'binding:vnic_type': 'normal', + 'fixed_ips': [ + { + 'subnet_id': '008ba151-0b8c-4a67-98b5-0d2b87666062', + 'ip_address': '172.24.4.2', + } + ], + 'id': 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b', + 'security_groups': [], + 'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824', + }, + { + 'status': 'ACTIVE', + 'binding:host_id': 'devstack', + 'name': '', + 'allowed_address_pairs': [], + 'admin_state_up': True, + 'network_id': 'f27aa545-cbdd-4907-b0c6-c9e8b039dcc2', + 'project_id': 'd397de8a63f341818f198abb0966f6f3', + 'extra_dhcp_opts': [], + 'binding:vif_details': { + 'port_filter': True, + 'ovs_hybrid_plug': True, + }, + 'binding:vif_type': 'ovs', + 'device_owner': 'network:router_interface', + 'mac_address': 'fa:16:3e:bb:3c:e4', + 'binding:profile': {}, + 'binding:vnic_type': 'normal', + 'fixed_ips': [ + { + 'subnet_id': '288bf4a1-51ba-43b6-9d0a-520e9005db17', + 'ip_address': '10.0.0.1', + } + ], + 'id': 'f71a6703-d6de-4be1-a91a-a570ede1d159', + 'security_groups': [], + 'device_id': '9ae135f4-b6e0-4dad-9e91-3c223e385824', + }, + ] + } + + def _compare_ports(self, exp, real): + self.assertDictEqual( + _port.Port(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def test_create_port(self): + self.register_uris( + [ + dict( + method="POST", + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports'] + ), + json=self.mock_neutron_port_create_rep, + validate=dict( + json={ + 'port': { + 'network_id': 'test-net-id', + 'name': 'test-port-name', + 'admin_state_up': True, + } + } + ), + ) + ] + ) + port = self.cloud.create_port( + network_id='test-net-id', + name='test-port-name', + admin_state_up=True, + ) + self._compare_ports(self.mock_neutron_port_create_rep['port'], port) + self.assert_calls() + + def test_create_port_parameters(self): + """Test that we detect invalid arguments passed to create_port""" + self.assertRaises( + TypeError, + self.cloud.create_port, + network_id='test-net-id', + nome='test-port-name', + stato_amministrativo_porta=True, + ) + + def test_create_port_exception(self): + self.register_uris( + [ + dict( + method="POST", + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports'] + ), + status_code=500, + validate=dict( + json={ + 'port': { + 'network_id': 'test-net-id', + 'name': 'test-port-name', + 'admin_state_up': True, + } + } + ), + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.create_port, + network_id='test-net-id', + name='test-port-name', + admin_state_up=True, + ) + self.assert_calls() + + def test_create_port_with_project(self): + self.mock_neutron_port_create_rep["port"].update( + { + 'project_id': 'test-project-id', + } + ) + self.register_uris( + [ + dict( + method="POST", + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports'] + ), + json=self.mock_neutron_port_create_rep, + validate=dict( + json={ + 'port': { + 'network_id': 'test-net-id', + 'project_id': 'test-project-id', + 'name': 'test-port-name', + 'admin_state_up': True, + } + } + ), + ) + ] + ) + port = self.cloud.create_port( + network_id='test-net-id', + name='test-port-name', + admin_state_up=True, + project_id='test-project-id', + ) + self._compare_ports(self.mock_neutron_port_create_rep['port'], port) + self.assert_calls() + + def test_update_port(self): + port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports', port_id] + ), + json=dict( + port=self.mock_neutron_port_list_rep['ports'][0] + ), + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports', port_id] + ), + json=self.mock_neutron_port_update_rep, + validate=dict( + json={'port': {'name': 'test-port-name-updated'}} + ), + ), + ] + ) + port = self.cloud.update_port( + name_or_id=port_id, name='test-port-name-updated' + ) + + self._compare_ports(self.mock_neutron_port_update_rep['port'], port) + self.assert_calls() + + def test_update_port_parameters(self): + """Test that we detect invalid arguments passed to update_port""" + self.assertRaises( + TypeError, + self.cloud.update_port, + name_or_id='test-port-id', + nome='test-port-name-updated', + ) + + def test_update_port_exception(self): + port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports', port_id] + ), + json=self.mock_neutron_port_list_rep, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports', port_id] + ), + status_code=500, + validate=dict( + json={'port': {'name': 'test-port-name-updated'}} + ), + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.update_port, + name_or_id='d80b1a3b-4fc1-49f3-952e-1e2ab7081d8b', + name='test-port-name-updated', + ) + self.assert_calls() + + def test_list_ports(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports'] + ), + json=self.mock_neutron_port_list_rep, + ) + ] + ) + ports = self.cloud.list_ports() + for a, b in zip(self.mock_neutron_port_list_rep['ports'], ports): + self._compare_ports(a, b) + self.assert_calls() + + def test_list_ports_filtered(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports'], + qs_elements=['status=DOWN'], + ), + json=self.mock_neutron_port_list_rep, + ) + ] + ) + ports = self.cloud.list_ports(filters={'status': 'DOWN'}) + for a, b in zip(self.mock_neutron_port_list_rep['ports'], ports): + self._compare_ports(a, b) + self.assert_calls() + + def test_list_ports_exception(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports'] + ), + status_code=500, + ) + ] + ) + self.assertRaises(exceptions.SDKException, self.cloud.list_ports) + + def test_search_ports_by_id(self): + port_id = 'f71a6703-d6de-4be1-a91a-a570ede1d159' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports'] + ), + json=self.mock_neutron_port_list_rep, + ) + ] + ) + ports = self.cloud.search_ports(name_or_id=port_id) + + self.assertEqual(1, len(ports)) + self.assertEqual('fa:16:3e:bb:3c:e4', ports[0]['mac_address']) + self.assert_calls() + + def test_search_ports_by_name(self): + port_name = "first-port" + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports'] + ), + json=self.mock_neutron_port_list_rep, + ) + ] + ) + ports = self.cloud.search_ports(name_or_id=port_name) + + self.assertEqual(1, len(ports)) + self.assertEqual('fa:16:3e:58:42:ed', ports[0]['mac_address']) + self.assert_calls() + + def test_search_ports_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports'] + ), + json=self.mock_neutron_port_list_rep, + ) + ] + ) + ports = self.cloud.search_ports(name_or_id='non-existent') + self.assertEqual(0, len(ports)) + self.assert_calls() + + def test_delete_port(self): + port_id = 'd80b1a3b-4fc1-49f3-952e-1e2ab7081d8b' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports', 'first-port'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports'], + qs_elements=['name=first-port'], + ), + json=self.mock_neutron_port_list_rep, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'ports', port_id] + ), + json={}, + ), + ] + ) + + self.assertTrue(self.cloud.delete_port(name_or_id='first-port')) + + def test_delete_port_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports', 'non-existent'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports'], + qs_elements=['name=non-existent'], + ), + json={'ports': []}, + ), + ] + ) + self.assertFalse(self.cloud.delete_port(name_or_id='non-existent')) + self.assert_calls() + + def test_delete_subnet_multiple_found(self): + port_name = "port-name" + port1 = dict(id='123', name=port_name) + port2 = dict(id='456', name=port_name) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports', port_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports'], + qs_elements=[f'name={port_name}'], + ), + json={'ports': [port1, port2]}, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, self.cloud.delete_port, port_name + ) + self.assert_calls() + + def test_delete_subnet_multiple_using_id(self): + port_name = "port-name" + port1 = dict(id='123', name=port_name) + port2 = dict(id='456', name=port_name) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports', port1['id']], + ), + json={'ports': [port1, port2]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports', port1['id']], + ), + json={}, + ), + ] + ) + self.assertTrue(self.cloud.delete_port(name_or_id=port1['id'])) + self.assert_calls() + + def test_get_port_by_id(self): + fake_port = dict(id='123', name='456') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports', fake_port['id']], + ), + json={'port': fake_port}, + ) + ] + ) + r = self.cloud.get_port_by_id(fake_port['id']) + self.assertIsNotNone(r) + self._compare_ports(fake_port, r) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_project.py b/openstack/tests/unit/cloud/test_project.py new file mode 100644 index 0000000000..8afa40b674 --- /dev/null +++ b/openstack/tests/unit/cloud/test_project.py @@ -0,0 +1,266 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +import testtools +from testtools import matchers + +from openstack import exceptions +from openstack.tests.unit import base + + +class TestProject(base.TestCase): + def get_mock_url( + self, + service_type='identity', + interface='public', + resource=None, + append=None, + base_url_append=None, + v3=True, + qs_elements=None, + ): + if v3 and resource is None: + resource = 'projects' + elif not v3 and resource is None: + resource = 'tenants' + if base_url_append is None and v3: + base_url_append = 'v3' + return super().get_mock_url( + service_type=service_type, + interface=interface, + resource=resource, + append=append, + base_url_append=base_url_append, + qs_elements=qs_elements, + ) + + def test_create_project_v3( + self, + ): + project_data = self._get_project_data( + description=self.getUniqueString('projectDesc'), + parent_id=uuid.uuid4().hex, + ) + reference_req = project_data.json_request.copy() + reference_req['project']['enabled'] = True + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(), + status_code=200, + json=project_data.json_response, + validate=dict(json=reference_req), + ) + ] + ) + project = self.cloud.create_project( + name=project_data.project_name, + description=project_data.description, + domain_id=project_data.domain_id, + parent_id=project_data.parent_id, + ) + self.assertThat(project.id, matchers.Equals(project_data.project_id)) + self.assertThat( + project.name, matchers.Equals(project_data.project_name) + ) + self.assertThat( + project.description, matchers.Equals(project_data.description) + ) + self.assertThat( + project.domain_id, matchers.Equals(project_data.domain_id) + ) + self.assert_calls() + + def test_delete_project_v3(self): + project_data = self._get_project_data(v3=False) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[project_data.project_id]), + status_code=200, + json=project_data.json_response, + ), + dict( + method='DELETE', + uri=self.get_mock_url(append=[project_data.project_id]), + status_code=204, + ), + ] + ) + self.cloud.delete_project(project_data.project_id) + self.assert_calls() + + def test_update_project_not_found(self): + project_data = self._get_project_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[project_data.project_id]), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + qs_elements=['name=' + project_data.project_id] + ), + status_code=200, + json={'projects': []}, + ), + ] + ) + with testtools.ExpectedException(exceptions.NotFoundException): + self.cloud.update_project(project_data.project_id) + self.assert_calls() + + def test_update_project_v3(self): + project_data = self._get_project_data( + description=self.getUniqueString('projectDesc') + ) + reference_req = project_data.json_request.copy() + # Remove elements not actually sent in the update + reference_req['project'].pop('domain_id') + reference_req['project'].pop('name') + reference_req['project'].pop('enabled') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + append=[project_data.project_id], + qs_elements=['domain_id=' + project_data.domain_id], + ), + status_code=200, + json={'projects': [project_data.json_response['project']]}, + ), + dict( + method='PATCH', + uri=self.get_mock_url(append=[project_data.project_id]), + status_code=200, + json=project_data.json_response, + validate=dict(json=reference_req), + ), + ] + ) + project = self.cloud.update_project( + project_data.project_id, + description=project_data.description, + domain_id=project_data.domain_id, + ) + self.assertThat(project.id, matchers.Equals(project_data.project_id)) + self.assertThat( + project.name, matchers.Equals(project_data.project_name) + ) + self.assertThat( + project.description, matchers.Equals(project_data.description) + ) + self.assert_calls() + + def test_list_projects_v3(self): + project_data = self._get_project_data( + description=self.getUniqueString('projectDesc') + ) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource=( + f'projects?domain_id={project_data.domain_id}' + ) + ), + status_code=200, + json={'projects': [project_data.json_response['project']]}, + ) + ] + ) + projects = self.cloud.list_projects(project_data.domain_id) + self.assertThat(len(projects), matchers.Equals(1)) + self.assertThat( + projects[0].id, matchers.Equals(project_data.project_id) + ) + self.assert_calls() + + def test_list_projects_v3_kwarg(self): + project_data = self._get_project_data( + description=self.getUniqueString('projectDesc') + ) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource=( + f'projects?domain_id={project_data.domain_id}' + ) + ), + status_code=200, + json={'projects': [project_data.json_response['project']]}, + ) + ] + ) + projects = self.cloud.list_projects(domain_id=project_data.domain_id) + self.assertThat(len(projects), matchers.Equals(1)) + self.assertThat( + projects[0].id, matchers.Equals(project_data.project_id) + ) + self.assert_calls() + + def test_list_projects_search_compat(self): + project_data = self._get_project_data( + description=self.getUniqueString('projectDesc') + ) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={'projects': [project_data.json_response['project']]}, + ) + ] + ) + projects = self.cloud.search_projects(project_data.project_id) + self.assertThat(len(projects), matchers.Equals(1)) + self.assertThat( + projects[0].id, matchers.Equals(project_data.project_id) + ) + self.assert_calls() + + def test_list_projects_search_compat_v3(self): + project_data = self._get_project_data( + description=self.getUniqueString('projectDesc') + ) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource=( + f'projects?domain_id={project_data.domain_id}' + ) + ), + status_code=200, + json={'projects': [project_data.json_response['project']]}, + ) + ] + ) + projects = self.cloud.search_projects(domain_id=project_data.domain_id) + self.assertThat(len(projects), matchers.Equals(1)) + self.assertThat( + projects[0].id, matchers.Equals(project_data.project_id) + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_qos_bandwidth_limit_rule.py b/openstack/tests/unit/cloud/test_qos_bandwidth_limit_rule.py new file mode 100644 index 0000000000..3cdffd4e5d --- /dev/null +++ b/openstack/tests/unit/cloud/test_qos_bandwidth_limit_rule.py @@ -0,0 +1,620 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from openstack import exceptions +from openstack.network.v2 import qos_bandwidth_limit_rule +from openstack.tests.unit import base + + +class TestQosBandwidthLimitRule(base.TestCase): + policy_name = 'qos test policy' + policy_id = '881d1bb7-a663-44c0-8f9f-ee2765b74486' + project_id = 'c88fc89f-5121-4a4c-87fd-496b5af864e9' + + rule_id = 'ed1a2b05-0ad7-45d7-873f-008b575a02b3' + rule_max_kbps = 1000 + rule_max_burst = 100 + + mock_policy = { + 'id': policy_id, + 'name': policy_name, + 'description': '', + 'rules': [], + 'project_id': project_id, + 'tenant_id': project_id, + 'shared': False, + 'is_default': False, + } + + mock_rule = { + 'id': rule_id, + 'max_kbps': rule_max_kbps, + 'max_burst_kbps': rule_max_burst, + 'direction': 'egress', + } + + qos_extension = { + "updated": "2015-06-08T10:00:00-00:00", + "name": "Quality of Service", + "links": [], + "alias": "qos", + "description": "The Quality of Service extension.", + } + + qos_bw_limit_direction_extension = { + "updated": "2017-04-10T10:00:00-00:00", + "name": "Direction for QoS bandwidth limit rule", + "links": [], + "alias": "qos-bw-limit-direction", + "description": ( + "Allow to configure QoS bandwidth limit rule with " + "specific direction: ingress or egress" + ), + } + + enabled_neutron_extensions = [ + qos_extension, + qos_bw_limit_direction_extension, + ] + + def _compare_rules(self, exp, real): + self.assertDictEqual( + qos_bandwidth_limit_rule.QoSBandwidthLimitRule(**exp).to_dict( + computed=False + ), + real.to_dict(computed=False), + ) + + def test_get_qos_bandwidth_limit_rule(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'bandwidth_limit_rules', + self.rule_id, + ], + ), + json={'bandwidth_limit_rule': self.mock_rule}, + ), + ] + ) + r = self.cloud.get_qos_bandwidth_limit_rule( + self.policy_name, self.rule_id + ) + self._compare_rules(self.mock_rule, r) + self.assert_calls() + + def test_get_qos_bandwidth_limit_rule_no_qos_policy_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': []}, + ), + ] + ) + self.assertRaises( + exceptions.NotFoundException, + self.cloud.get_qos_bandwidth_limit_rule, + self.policy_name, + self.rule_id, + ) + self.assert_calls() + + def test_get_qos_bandwidth_limit_rule_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.get_qos_bandwidth_limit_rule, + self.policy_name, + self.rule_id, + ) + self.assert_calls() + + def test_create_qos_bandwidth_limit_rule(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'bandwidth_limit_rules', + ], + ), + json={'bandwidth_limit_rule': self.mock_rule}, + ), + ] + ) + rule = self.cloud.create_qos_bandwidth_limit_rule( + self.policy_name, max_kbps=self.rule_max_kbps + ) + self._compare_rules(self.mock_rule, rule) + self.assert_calls() + + def test_create_qos_bandwidth_limit_rule_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.create_qos_bandwidth_limit_rule, + self.policy_name, + max_kbps=100, + ) + self.assert_calls() + + def test_create_qos_bandwidth_limit_rule_no_qos_direction_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': [self.qos_extension]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': [self.qos_extension]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'bandwidth_limit_rules', + ], + ), + json={'bandwidth_limit_rule': self.mock_rule}, + ), + ] + ) + rule = self.cloud.create_qos_bandwidth_limit_rule( + self.policy_name, max_kbps=self.rule_max_kbps, direction="ingress" + ) + self._compare_rules(self.mock_rule, rule) + self.assert_calls() + + def test_update_qos_bandwidth_limit_rule(self): + expected_rule = copy.copy(self.mock_rule) + expected_rule['max_kbps'] = self.rule_max_kbps + 100 + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': [self.qos_extension]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_id], + ), + json=self.mock_policy, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'bandwidth_limit_rules', + self.rule_id, + ], + ), + json={'bandwidth_limit_rule': self.mock_rule}, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'bandwidth_limit_rules', + self.rule_id, + ], + ), + json={'bandwidth_limit_rule': expected_rule}, + validate=dict( + json={ + 'bandwidth_limit_rule': { + 'max_kbps': self.rule_max_kbps + 100 + } + } + ), + ), + ] + ) + rule = self.cloud.update_qos_bandwidth_limit_rule( + self.policy_id, self.rule_id, max_kbps=self.rule_max_kbps + 100 + ) + self._compare_rules(expected_rule, rule) + self.assert_calls() + + def test_update_qos_bandwidth_limit_rule_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.update_qos_bandwidth_limit_rule, + self.policy_id, + self.rule_id, + max_kbps=2000, + ) + self.assert_calls() + + def test_update_qos_bandwidth_limit_rule_no_qos_direction_extension(self): + expected_rule = copy.copy(self.mock_rule) + expected_rule['direction'] = self.rule_max_kbps + 100 + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': [self.qos_extension]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_id], + ), + json=self.mock_policy, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': [self.qos_extension]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'bandwidth_limit_rules', + self.rule_id, + ], + ), + json={'bandwidth_limit_rule': self.mock_rule}, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'bandwidth_limit_rules', + self.rule_id, + ], + ), + json={'bandwidth_limit_rule': expected_rule}, + validate=dict( + json={ + 'bandwidth_limit_rule': { + 'max_kbps': self.rule_max_kbps + 100 + } + } + ), + ), + ] + ) + rule = self.cloud.update_qos_bandwidth_limit_rule( + self.policy_id, + self.rule_id, + max_kbps=self.rule_max_kbps + 100, + direction="ingress", + ) + # Even if there was attempt to change direction to 'ingress' it should + # be not changed in returned rule + self._compare_rules(expected_rule, rule) + self.assert_calls() + + def test_delete_qos_bandwidth_limit_rule(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'bandwidth_limit_rules', + self.rule_id, + ], + ), + json={}, + ), + ] + ) + self.assertTrue( + self.cloud.delete_qos_bandwidth_limit_rule( + self.policy_name, self.rule_id + ) + ) + self.assert_calls() + + def test_delete_qos_bandwidth_limit_rule_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.delete_qos_bandwidth_limit_rule, + self.policy_name, + self.rule_id, + ) + self.assert_calls() + + def test_delete_qos_bandwidth_limit_rule_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'bandwidth_limit_rules', + self.rule_id, + ], + ), + status_code=404, + ), + ] + ) + self.assertFalse( + self.cloud.delete_qos_bandwidth_limit_rule( + self.policy_name, self.rule_id + ) + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_qos_dscp_marking_rule.py b/openstack/tests/unit/cloud/test_qos_dscp_marking_rule.py new file mode 100644 index 0000000000..c3e1fe94ee --- /dev/null +++ b/openstack/tests/unit/cloud/test_qos_dscp_marking_rule.py @@ -0,0 +1,465 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from openstack import exceptions +from openstack.network.v2 import qos_dscp_marking_rule +from openstack.tests.unit import base + + +class TestQosDscpMarkingRule(base.TestCase): + policy_name = 'qos test policy' + policy_id = '881d1bb7-a663-44c0-8f9f-ee2765b74486' + project_id = 'c88fc89f-5121-4a4c-87fd-496b5af864e9' + + rule_id = 'ed1a2b05-0ad7-45d7-873f-008b575a02b3' + rule_dscp_mark = 32 + + mock_policy = { + 'id': policy_id, + 'name': policy_name, + 'description': '', + 'rules': [], + 'project_id': project_id, + 'tenant_id': project_id, + 'shared': False, + 'is_default': False, + } + + mock_rule = { + 'id': rule_id, + 'dscp_mark': rule_dscp_mark, + } + + qos_extension = { + "updated": "2015-06-08T10:00:00-00:00", + "name": "Quality of Service", + "links": [], + "alias": "qos", + "description": "The Quality of Service extension.", + } + + enabled_neutron_extensions = [qos_extension] + + def _compare_rules(self, exp, real): + self.assertDictEqual( + qos_dscp_marking_rule.QoSDSCPMarkingRule(**exp).to_dict( + computed=False + ), + real.to_dict(computed=False), + ) + + def test_get_qos_dscp_marking_rule(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'dscp_marking_rules', + self.rule_id, + ], + ), + json={'dscp_marking_rule': self.mock_rule}, + ), + ] + ) + r = self.cloud.get_qos_dscp_marking_rule( + self.policy_name, self.rule_id + ) + self._compare_rules(self.mock_rule, r) + self.assert_calls() + + def test_get_qos_dscp_marking_rule_no_qos_policy_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': []}, + ), + ] + ) + self.assertRaises( + exceptions.NotFoundException, + self.cloud.get_qos_dscp_marking_rule, + self.policy_name, + self.rule_id, + ) + self.assert_calls() + + def test_get_qos_dscp_marking_rule_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.get_qos_dscp_marking_rule, + self.policy_name, + self.rule_id, + ) + self.assert_calls() + + def test_create_qos_dscp_marking_rule(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'dscp_marking_rules', + ], + ), + json={'dscp_marking_rule': self.mock_rule}, + ), + ] + ) + rule = self.cloud.create_qos_dscp_marking_rule( + self.policy_name, dscp_mark=self.rule_dscp_mark + ) + self._compare_rules(self.mock_rule, rule) + self.assert_calls() + + def test_create_qos_dscp_marking_rule_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.create_qos_dscp_marking_rule, + self.policy_name, + dscp_mark=16, + ) + self.assert_calls() + + def test_update_qos_dscp_marking_rule(self): + new_dscp_mark_value = 16 + expected_rule = copy.copy(self.mock_rule) + expected_rule['dscp_mark'] = new_dscp_mark_value + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_id], + ), + json=self.mock_policy, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'dscp_marking_rules', + self.rule_id, + ], + ), + json={'dscp_marking_rule': self.mock_rule}, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'dscp_marking_rules', + self.rule_id, + ], + ), + json={'dscp_marking_rule': expected_rule}, + validate=dict( + json={ + 'dscp_marking_rule': { + 'dscp_mark': new_dscp_mark_value + } + } + ), + ), + ] + ) + rule = self.cloud.update_qos_dscp_marking_rule( + self.policy_id, self.rule_id, dscp_mark=new_dscp_mark_value + ) + self._compare_rules(expected_rule, rule) + self.assert_calls() + + def test_update_qos_dscp_marking_rule_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.update_qos_dscp_marking_rule, + self.policy_id, + self.rule_id, + dscp_mark=8, + ) + self.assert_calls() + + def test_delete_qos_dscp_marking_rule(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'dscp_marking_rules', + self.rule_id, + ], + ), + json={}, + ), + ] + ) + self.assertTrue( + self.cloud.delete_qos_dscp_marking_rule( + self.policy_name, self.rule_id + ) + ) + self.assert_calls() + + def test_delete_qos_dscp_marking_rule_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.delete_qos_dscp_marking_rule, + self.policy_name, + self.rule_id, + ) + self.assert_calls() + + def test_delete_qos_dscp_marking_rule_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'dscp_marking_rules', + self.rule_id, + ], + ), + status_code=404, + ), + ] + ) + self.assertFalse( + self.cloud.delete_qos_dscp_marking_rule( + self.policy_name, self.rule_id + ) + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_qos_minimum_bandwidth_rule.py b/openstack/tests/unit/cloud/test_qos_minimum_bandwidth_rule.py new file mode 100644 index 0000000000..5ef8813718 --- /dev/null +++ b/openstack/tests/unit/cloud/test_qos_minimum_bandwidth_rule.py @@ -0,0 +1,465 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from openstack import exceptions +from openstack.network.v2 import qos_minimum_bandwidth_rule +from openstack.tests.unit import base + + +class TestQosMinimumBandwidthRule(base.TestCase): + policy_name = 'qos test policy' + policy_id = '881d1bb7-a663-44c0-8f9f-ee2765b74486' + project_id = 'c88fc89f-5121-4a4c-87fd-496b5af864e9' + + rule_id = 'ed1a2b05-0ad7-45d7-873f-008b575a02b3' + rule_min_kbps = 1000 + + mock_policy = { + 'id': policy_id, + 'name': policy_name, + 'description': '', + 'rules': [], + 'project_id': project_id, + 'tenant_id': project_id, + 'shared': False, + 'is_default': False, + } + + mock_rule = { + 'id': rule_id, + 'min_kbps': rule_min_kbps, + 'direction': 'egress', + } + + qos_extension = { + "updated": "2015-06-08T10:00:00-00:00", + "name": "Quality of Service", + "links": [], + "alias": "qos", + "description": "The Quality of Service extension.", + } + + enabled_neutron_extensions = [qos_extension] + + def _compare_rules(self, exp, real): + self.assertDictEqual( + qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule(**exp).to_dict( + computed=False + ), + real.to_dict(computed=False), + ) + + def test_get_qos_minimum_bandwidth_rule(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'minimum_bandwidth_rules', + self.rule_id, + ], + ), + json={'minimum_bandwidth_rule': self.mock_rule}, + ), + ] + ) + r = self.cloud.get_qos_minimum_bandwidth_rule( + self.policy_name, self.rule_id + ) + self._compare_rules(self.mock_rule, r) + self.assert_calls() + + def test_get_qos_minimum_bandwidth_rule_no_qos_policy_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': []}, + ), + ] + ) + self.assertRaises( + exceptions.NotFoundException, + self.cloud.get_qos_minimum_bandwidth_rule, + self.policy_name, + self.rule_id, + ) + self.assert_calls() + + def test_get_qos_minimum_bandwidth_rule_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.get_qos_minimum_bandwidth_rule, + self.policy_name, + self.rule_id, + ) + self.assert_calls() + + def test_create_qos_minimum_bandwidth_rule(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'minimum_bandwidth_rules', + ], + ), + json={'minimum_bandwidth_rule': self.mock_rule}, + ), + ] + ) + rule = self.cloud.create_qos_minimum_bandwidth_rule( + self.policy_name, min_kbps=self.rule_min_kbps + ) + self._compare_rules(self.mock_rule, rule) + self.assert_calls() + + def test_create_qos_minimum_bandwidth_rule_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.create_qos_minimum_bandwidth_rule, + self.policy_name, + min_kbps=100, + ) + self.assert_calls() + + def test_update_qos_minimum_bandwidth_rule(self): + expected_rule = copy.copy(self.mock_rule) + expected_rule['min_kbps'] = self.rule_min_kbps + 100 + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_id], + ), + json=self.mock_policy, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'minimum_bandwidth_rules', + self.rule_id, + ], + ), + json={'minimum_bandwidth_rule': self.mock_rule}, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'minimum_bandwidth_rules', + self.rule_id, + ], + ), + json={'minimum_bandwidth_rule': expected_rule}, + validate=dict( + json={ + 'minimum_bandwidth_rule': { + 'min_kbps': self.rule_min_kbps + 100 + } + } + ), + ), + ] + ) + rule = self.cloud.update_qos_minimum_bandwidth_rule( + self.policy_id, self.rule_id, min_kbps=self.rule_min_kbps + 100 + ) + self._compare_rules(expected_rule, rule) + self.assert_calls() + + def test_update_qos_minimum_bandwidth_rule_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.update_qos_minimum_bandwidth_rule, + self.policy_id, + self.rule_id, + min_kbps=2000, + ) + self.assert_calls() + + def test_delete_qos_minimum_bandwidth_rule(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'minimum_bandwidth_rules', + self.rule_id, + ], + ), + json={}, + ), + ] + ) + self.assertTrue( + self.cloud.delete_qos_minimum_bandwidth_rule( + self.policy_name, self.rule_id + ) + ) + self.assert_calls() + + def test_delete_qos_minimum_bandwidth_rule_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.delete_qos_minimum_bandwidth_rule, + self.policy_name, + self.rule_id, + ) + self.assert_calls() + + def test_delete_qos_minimum_bandwidth_rule_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'policies', + self.policy_id, + 'minimum_bandwidth_rules', + self.rule_id, + ], + ), + status_code=404, + ), + ] + ) + self.assertFalse( + self.cloud.delete_qos_minimum_bandwidth_rule( + self.policy_name, self.rule_id + ) + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_qos_policy.py b/openstack/tests/unit/cloud/test_qos_policy.py new file mode 100644 index 0000000000..534633e616 --- /dev/null +++ b/openstack/tests/unit/cloud/test_qos_policy.py @@ -0,0 +1,474 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +from openstack import exceptions +from openstack.network.v2 import qos_policy as _policy +from openstack.tests.unit import base + + +class TestQosPolicy(base.TestCase): + policy_name = 'qos test policy' + policy_id = '881d1bb7-a663-44c0-8f9f-ee2765b74486' + project_id = 'c88fc89f-5121-4a4c-87fd-496b5af864e9' + + mock_policy = { + 'id': policy_id, + 'name': policy_name, + 'description': '', + 'rules': [], + 'project_id': project_id, + 'tenant_id': project_id, + 'shared': False, + 'is_default': False, + 'tags': [], + } + + qos_extension = { + "updated": "2015-06-08T10:00:00-00:00", + "name": "Quality of Service", + "links": [], + "alias": "qos", + "description": "The Quality of Service extension.", + } + + qos_default_extension = { + "updated": "2017-041-06T10:00:00-00:00", + "name": "QoS default policy", + "links": [], + "alias": "qos-default", + "description": "Expose the QoS default policy per project", + } + + enabled_neutron_extensions = [qos_extension, qos_default_extension] + + def _compare_policies(self, exp, real): + self.assertDictEqual( + _policy.QoSPolicy(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def test_get_qos_policy(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + ] + ) + r = self.cloud.get_qos_policy(self.policy_name) + self.assertIsNotNone(r) + self._compare_policies(self.mock_policy, r) + self.assert_calls() + + def test_get_qos_policy_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.get_qos_policy, + self.policy_name, + ) + self.assert_calls() + + def test_create_qos_policy(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'qos', 'policies'] + ), + json={'policy': self.mock_policy}, + ), + ] + ) + policy = self.cloud.create_qos_policy( + name=self.policy_name, project_id=self.project_id + ) + self._compare_policies(self.mock_policy, policy) + self.assert_calls() + + def test_create_qos_policy_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.create_qos_policy, + name=self.policy_name, + ) + self.assert_calls() + + def test_create_qos_policy_no_qos_default_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': [self.qos_extension]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': [self.qos_extension]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'qos', 'policies'] + ), + json={'policy': self.mock_policy}, + validate=dict( + json={ + 'policy': { + 'name': self.policy_name, + 'project_id': self.project_id, + } + } + ), + ), + ] + ) + policy = self.cloud.create_qos_policy( + name=self.policy_name, project_id=self.project_id, default=True + ) + self._compare_policies(self.mock_policy, policy) + self.assert_calls() + + def test_delete_qos_policy(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [self.mock_policy]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_id], + ), + json={}, + ), + ] + ) + self.assertTrue(self.cloud.delete_qos_policy(self.policy_name)) + self.assert_calls() + + def test_delete_qos_policy_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.delete_qos_policy, + self.policy_name, + ) + self.assert_calls() + + def test_delete_qos_policy_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', 'goofy'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=['name=goofy'], + ), + json={'policies': []}, + ), + ] + ) + self.assertFalse(self.cloud.delete_qos_policy('goofy')) + self.assert_calls() + + def test_delete_qos_policy_multiple_found(self): + policy1 = dict(id='123', name=self.policy_name) + policy2 = dict(id='456', name=self.policy_name) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies'], + qs_elements=[f'name={self.policy_name}'], + ), + json={'policies': [policy1, policy2]}, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.delete_qos_policy, + self.policy_name, + ) + self.assert_calls() + + def test_delete_qos_policy_using_id(self): + policy1 = self.mock_policy + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', policy1['id']], + ), + json=policy1, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_id], + ), + json={}, + ), + ] + ) + self.assertTrue(self.cloud.delete_qos_policy(policy1['id'])) + self.assert_calls() + + def test_update_qos_policy(self): + expected_policy = copy.copy(self.mock_policy) + expected_policy['name'] = 'goofy' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_id], + ), + json=self.mock_policy, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_id], + ), + json={'policy': expected_policy}, + validate=dict(json={'policy': {'name': 'goofy'}}), + ), + ] + ) + policy = self.cloud.update_qos_policy(self.policy_id, name='goofy') + self._compare_policies(expected_policy, policy) + self.assert_calls() + + def test_update_qos_policy_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.update_qos_policy, + self.policy_id, + name="goofy", + ) + self.assert_calls() + + def test_update_qos_policy_no_qos_default_extension(self): + expected_policy = copy.copy(self.mock_policy) + expected_policy['name'] = 'goofy' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': [self.qos_extension]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': [self.qos_extension]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_id], + ), + json=self.mock_policy, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'policies', self.policy_id], + ), + json={'policy': expected_policy}, + validate=dict(json={'policy': {'name': "goofy"}}), + ), + ] + ) + policy = self.cloud.update_qos_policy( + self.policy_id, name='goofy', default=True + ) + self._compare_policies(expected_policy, policy) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_qos_rule_type.py b/openstack/tests/unit/cloud/test_qos_rule_type.py new file mode 100644 index 0000000000..21d85f32c6 --- /dev/null +++ b/openstack/tests/unit/cloud/test_qos_rule_type.py @@ -0,0 +1,217 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack import exceptions +from openstack.network.v2 import qos_rule_type +from openstack.tests.unit import base + + +class TestQosRuleType(base.TestCase): + rule_type_name = "bandwidth_limit" + + qos_extension = { + "updated": "2015-06-08T10:00:00-00:00", + "name": "Quality of Service", + "links": [], + "alias": "qos", + "description": "The Quality of Service extension.", + } + qos_rule_type_details_extension = { + "updated": "2017-06-22T10:00:00-00:00", + "name": "Details of QoS rule types", + "links": [], + "alias": "qos-rule-type-details", + "description": ( + "Expose details about QoS rule types supported by " + "loaded backend drivers" + ), + } + + mock_rule_type_bandwidth_limit = {'type': 'bandwidth_limit'} + mock_rule_type_dscp_marking = {'type': 'dscp_marking'} + mock_rule_types = [ + mock_rule_type_bandwidth_limit, + mock_rule_type_dscp_marking, + ] + + mock_rule_type_details = { + 'drivers': [ + { + 'name': 'linuxbridge', + 'supported_parameters': [ + { + 'parameter_values': {'start': 0, 'end': 2147483647}, + 'parameter_type': 'range', + 'parameter_name': 'max_kbps', + }, + { + 'parameter_values': ['ingress', 'egress'], + 'parameter_type': 'choices', + 'parameter_name': 'direction', + }, + { + 'parameter_values': {'start': 0, 'end': 2147483647}, + 'parameter_type': 'range', + 'parameter_name': 'max_burst_kbps', + }, + ], + } + ], + 'type': rule_type_name, + } + + def _compare_rule_types(self, exp, real): + self.assertDictEqual( + qos_rule_type.QoSRuleType(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def test_list_qos_rule_types(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': [self.qos_extension]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'qos', 'rule-types'], + ), + json={'rule_types': self.mock_rule_types}, + ), + ] + ) + rule_types = self.cloud.list_qos_rule_types() + for a, b in zip(self.mock_rule_types, rule_types): + self._compare_rule_types(a, b) + self.assert_calls() + + def test_list_qos_rule_types_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, self.cloud.list_qos_rule_types + ) + self.assert_calls() + + def test_get_qos_rule_type_details(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={ + 'extensions': [ + self.qos_extension, + self.qos_rule_type_details_extension, + ] + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={ + 'extensions': [ + self.qos_extension, + self.qos_rule_type_details_extension, + ] + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'qos', + 'rule-types', + self.rule_type_name, + ], + ), + json={'rule_type': self.mock_rule_type_details}, + ), + ] + ) + + self._compare_rule_types( + self.mock_rule_type_details, + self.cloud.get_qos_rule_type_details(self.rule_type_name), + ) + self.assert_calls() + + def test_get_qos_rule_type_details_no_qos_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': []}, + ) + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.get_qos_rule_type_details, + self.rule_type_name, + ) + self.assert_calls() + + def test_get_qos_rule_type_details_no_qos_details_extension(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': [self.qos_extension]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': [self.qos_extension]}, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.get_qos_rule_type_details, + self.rule_type_name, + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_quotas.py b/openstack/tests/unit/cloud/test_quotas.py new file mode 100644 index 0000000000..d02b2e7d80 --- /dev/null +++ b/openstack/tests/unit/cloud/test_quotas.py @@ -0,0 +1,378 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack.network.v2 import quota as _quota +from openstack.tests.unit import base + +fake_quota_set = { + "cores": 20, + "fixed_ips": -1, + "floating_ips": 10, + "injected_file_content_bytes": 10240, + "injected_file_path_bytes": 255, + "injected_files": 5, + "instances": 10, + "key_pairs": 100, + "metadata_items": 128, + "ram": 51200, + "security_group_rules": 20, + "security_groups": 45, + "server_groups": 10, + "server_group_members": 10, +} + + +class TestQuotas(base.TestCase): + def setUp(self, cloud_config_fixture='clouds.yaml'): + super().setUp(cloud_config_fixture=cloud_config_fixture) + + def test_update_quotas(self): + project = self._get_project_data() + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'identity', + 'public', + append=['v3', 'projects', project.project_id], + ), + json={'project': project.json_response['project']}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='PUT', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-quota-sets', project.project_id], + ), + json={'quota_set': fake_quota_set}, + validate=dict( + json={'quota_set': {'cores': 1, 'force': True}} + ), + ), + ] + ) + + self.cloud.set_compute_quotas(project.project_id, cores=1) + + self.assert_calls() + + def test_update_quotas_bad_request(self): + project = self._get_project_data() + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'identity', + 'public', + append=['v3', 'projects', project.project_id], + ), + json={'project': project.json_response['project']}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='PUT', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-quota-sets', project.project_id], + ), + status_code=400, + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.set_compute_quotas, + project.project_id, + ) + + self.assert_calls() + + def test_get_quotas(self): + project = self._get_project_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'identity', + 'public', + append=['v3', 'projects', project.project_id], + ), + json={'project': project.json_response['project']}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-quota-sets', project.project_id], + ), + json={'quota_set': fake_quota_set}, + ), + ] + ) + + self.cloud.get_compute_quotas(project.project_id) + + self.assert_calls() + + def test_delete_quotas(self): + project = self._get_project_data() + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'identity', + 'public', + append=['v3', 'projects', project.project_id], + ), + json={'project': project.json_response['project']}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-quota-sets', project.project_id], + ), + ), + ] + ) + + self.cloud.delete_compute_quotas(project.project_id) + + self.assert_calls() + + def test_cinder_update_quotas(self): + project = self._get_project_data() + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'identity', + 'public', + append=['v3', 'projects', project.project_id], + ), + json={'project': project.json_response['project']}, + ), + self.get_cinder_discovery_mock_dict(), + dict( + method='PUT', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['os-quota-sets', project.project_id], + ), + json=dict(quota_set={'volumes': 1}), + validate=dict(json={'quota_set': {'volumes': 1}}), + ), + ] + ) + self.cloud.set_volume_quotas(project.project_id, volumes=1) + self.assert_calls() + + def test_cinder_get_quotas(self): + project = self._get_project_data() + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'identity', + 'public', + append=['v3', 'projects', project.project_id], + ), + json={'project': project.json_response['project']}, + ), + self.get_cinder_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['os-quota-sets', project.project_id], + qs_elements=['usage=False'], + ), + json=dict(quota_set={'snapshots': 10, 'volumes': 20}), + ), + ] + ) + self.cloud.get_volume_quotas(project.project_id) + self.assert_calls() + + def test_cinder_delete_quotas(self): + project = self._get_project_data() + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'identity', + 'public', + append=['v3', 'projects', project.project_id], + ), + json={'project': project.json_response['project']}, + ), + self.get_cinder_discovery_mock_dict(), + dict( + method='DELETE', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['os-quota-sets', project.project_id], + ), + ), + ] + ) + self.cloud.delete_volume_quotas(project.project_id) + self.assert_calls() + + def test_neutron_update_quotas(self): + project = self.mock_for_keystone_projects( + project_count=1, id_get=True + )[0] + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'quotas', project.project_id], + ), + json={}, + validate=dict(json={'quota': {'network': 1}}), + ) + ] + ) + self.cloud.set_network_quotas(project.project_id, network=1) + self.assert_calls() + + def test_neutron_get_quotas(self): + quota = { + 'subnet': 100, + 'network': 100, + 'floatingip': 50, + 'subnetpool': -1, + 'security_group_rule': 100, + 'security_group': 10, + 'router': 10, + 'rbac_policy': 10, + 'port': 500, + } + project = self.mock_for_keystone_projects( + project_count=1, id_get=True + )[0] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'quotas', project.project_id], + ), + json={'quota': quota}, + ) + ] + ) + received_quota = self.cloud.get_network_quotas( + project.project_id + ).to_dict(computed=False) + expected_quota = _quota.Quota(**quota).to_dict(computed=False) + received_quota.pop('id') + received_quota.pop('name') + expected_quota.pop('id') + expected_quota.pop('name') + + self.assertDictEqual(expected_quota, received_quota) + + self.assert_calls() + + def test_neutron_get_quotas_details(self): + quota_details = { + 'subnet': {'limit': 100, 'used': 7, 'reserved': 0}, + 'network': {'limit': 100, 'used': 6, 'reserved': 0}, + 'floatingip': {'limit': 50, 'used': 0, 'reserved': 0}, + 'subnetpool': {'limit': -1, 'used': 2, 'reserved': 0}, + 'security_group_rule': {'limit': 100, 'used': 4, 'reserved': 0}, + 'security_group': {'limit': 10, 'used': 1, 'reserved': 0}, + 'router': {'limit': 10, 'used': 2, 'reserved': 0}, + 'rbac_policy': {'limit': 10, 'used': 2, 'reserved': 0}, + 'port': {'limit': 500, 'used': 7, 'reserved': 0}, + } + project = self.mock_for_keystone_projects( + project_count=1, id_get=True + )[0] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'quotas', + project.project_id, + 'details', + ], + ), + json={'quota': quota_details}, + ) + ] + ) + received_quota_details = self.cloud.get_network_quotas( + project.project_id, details=True + ) + self.assertDictEqual( + _quota.QuotaDetails(**quota_details).to_dict(computed=False), + received_quota_details.to_dict(computed=False), + ) + self.assert_calls() + + def test_neutron_delete_quotas(self): + project = self.mock_for_keystone_projects( + project_count=1, id_get=True + )[0] + self.register_uris( + [ + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'quotas', project.project_id], + ), + json={}, + ) + ] + ) + self.cloud.delete_network_quotas(project.project_id) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_rebuild_server.py b/openstack/tests/unit/cloud/test_rebuild_server.py new file mode 100644 index 0000000000..b187fd037c --- /dev/null +++ b/openstack/tests/unit/cloud/test_rebuild_server.py @@ -0,0 +1,333 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_rebuild_server +---------------------------------- + +Tests for the `rebuild_server` command. +""" + +import uuid + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestRebuildServer(base.TestCase): + def setUp(self): + super().setUp() + self.server_id = str(uuid.uuid4()) + self.server_name = self.getUniqueString('name') + self.fake_server = fakes.make_fake_server( + self.server_id, self.server_name + ) + self.rebuild_server = fakes.make_fake_server( + self.server_id, self.server_name, 'REBUILD' + ) + self.error_server = fakes.make_fake_server( + self.server_id, self.server_name, 'ERROR' + ) + + def test_rebuild_server_rebuild_exception(self): + """ + Test that an exception in the rebuild raises an exception in + rebuild_server. + """ + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', self.server_id, 'action'], + ), + status_code=400, + validate=dict( + json={'rebuild': {'imageRef': 'a', 'adminPass': 'b'}} + ), + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.rebuild_server, + self.fake_server['id'], + "a", + "b", + ) + + self.assert_calls() + + def test_rebuild_server_server_error(self): + """ + Test that a server error while waiting for the server to rebuild + raises an exception in rebuild_server. + """ + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', self.server_id, 'action'], + ), + json={'server': self.rebuild_server}, + validate=dict(json={'rebuild': {'imageRef': 'a'}}), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id] + ), + json={'server': self.error_server}, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.rebuild_server, + self.fake_server['id'], + "a", + wait=True, + ) + + self.assert_calls() + + def test_rebuild_server_timeout(self): + """ + Test that a timeout while waiting for the server to rebuild raises an + exception in rebuild_server. + """ + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', self.server_id, 'action'], + ), + json={'server': self.rebuild_server}, + validate=dict(json={'rebuild': {'imageRef': 'a'}}), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id] + ), + json={'server': self.rebuild_server}, + ), + ] + ) + self.assertRaises( + exceptions.ResourceTimeout, + self.cloud.rebuild_server, + self.fake_server['id'], + "a", + wait=True, + timeout=0.001, + ) + + self.assert_calls(do_count=False) + + def test_rebuild_server_no_wait(self): + """ + Test that rebuild_server with no wait and no exception in the + rebuild call returns the server instance. + """ + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', self.server_id, 'action'], + ), + json={'server': self.rebuild_server}, + validate=dict(json={'rebuild': {'imageRef': 'a'}}), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + ] + ) + self.assertEqual( + self.rebuild_server['status'], + self.cloud.rebuild_server(self.fake_server['id'], "a")['status'], + ) + + self.assert_calls() + + def test_rebuild_server_with_admin_pass_no_wait(self): + """ + Test that a server with an admin_pass passed returns the password + """ + password = self.getUniqueString('password') + rebuild_server = self.rebuild_server.copy() + rebuild_server['adminPass'] = password + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', self.server_id, 'action'], + ), + json={'server': rebuild_server}, + validate=dict( + json={ + 'rebuild': {'imageRef': 'a', 'adminPass': password} + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + ] + ) + self.assertEqual( + password, + self.cloud.rebuild_server( + self.fake_server['id'], 'a', admin_pass=password + )['adminPass'], + ) + + self.assert_calls() + + def test_rebuild_server_with_admin_pass_wait(self): + """ + Test that a server with an admin_pass passed returns the password + """ + password = self.getUniqueString('password') + rebuild_server = self.rebuild_server.copy() + rebuild_server['adminPass'] = password + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', self.server_id, 'action'], + ), + json={'server': rebuild_server}, + validate=dict( + json={ + 'rebuild': {'imageRef': 'a', 'adminPass': password} + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id] + ), + json={'server': self.rebuild_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id] + ), + json={'server': self.fake_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + ] + ) + + self.assertEqual( + password, + self.cloud.rebuild_server( + self.fake_server['id'], 'a', admin_pass=password, wait=True + )['adminPass'], + ) + + self.assert_calls() + + def test_rebuild_server_wait(self): + """ + Test that rebuild_server with a wait returns the server instance when + its status changes to "ACTIVE". + """ + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', self.server_id, 'action'], + ), + json={'server': self.rebuild_server}, + validate=dict(json={'rebuild': {'imageRef': 'a'}}), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id] + ), + json={'server': self.rebuild_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id] + ), + json={'server': self.fake_server}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + ] + ) + self.assertEqual( + 'ACTIVE', + self.cloud.rebuild_server(self.fake_server['id'], 'a', wait=True)[ + 'status' + ], + ) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_recordset.py b/openstack/tests/unit/cloud/test_recordset.py new file mode 100644 index 0000000000..181d5b41a9 --- /dev/null +++ b/openstack/tests/unit/cloud/test_recordset.py @@ -0,0 +1,581 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack.tests.unit import base +from openstack.tests.unit.cloud import test_zone + + +zone = test_zone.zone_dict + +recordset = { + 'name': 'www.example.net.', + 'type': 'A', + 'description': 'Example zone rec', + 'ttl': 3600, + 'records': ['192.168.1.1'], + 'id': '1', + 'zone_id': zone['id'], + 'zone_name': zone['name'], +} + + +class RecordsetTestWrapper(test_zone.ZoneTestWrapper): + pass + + +class TestRecordset(base.TestCase): + def setUp(self): + super().setUp() + self.use_designate() + + def test_create_recordset_zoneid(self): + fake_zone = test_zone.ZoneTestWrapper(self, zone) + fake_rs = RecordsetTestWrapper(self, recordset) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id']], + ), + json=fake_zone.get_get_response_json(), + ), + dict( + method='POST', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', zone['id'], 'recordsets'], + ), + json=fake_rs.get_create_response_json(), + validate=dict( + json={ + "records": fake_rs['records'], + "type": fake_rs['type'], + "name": fake_rs['name'], + "description": fake_rs['description'], + "ttl": fake_rs['ttl'], + } + ), + ), + ] + ) + rs = self.cloud.create_recordset( + zone=fake_zone['id'], + name=fake_rs['name'], + recordset_type=fake_rs['type'], + records=fake_rs['records'], + description=fake_rs['description'], + ttl=fake_rs['ttl'], + ) + + fake_rs.cmp(rs) + self.assert_calls() + + def test_create_recordset_zonename(self): + fake_zone = test_zone.ZoneTestWrapper(self, zone) + fake_rs = RecordsetTestWrapper(self, recordset) + self.register_uris( + [ + # try by directly + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['name']], + ), + status_code=404, + ), + # list with name + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones'], + qs_elements=[ + 'name={name}'.format(name=fake_zone['name']) + ], + ), + json={'zones': [fake_zone.get_get_response_json()]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', zone['id'], 'recordsets'], + ), + json=fake_rs.get_create_response_json(), + validate=dict( + json={ + "records": fake_rs['records'], + "type": fake_rs['type'], + "name": fake_rs['name'], + "description": fake_rs['description'], + "ttl": fake_rs['ttl'], + } + ), + ), + ] + ) + rs = self.cloud.create_recordset( + zone=fake_zone['name'], + name=fake_rs['name'], + recordset_type=fake_rs['type'], + records=fake_rs['records'], + description=fake_rs['description'], + ttl=fake_rs['ttl'], + ) + + fake_rs.cmp(rs) + self.assert_calls() + + def test_create_recordset_exception(self): + fake_zone = test_zone.ZoneTestWrapper(self, zone) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id']], + ), + json=fake_zone.get_get_response_json(), + ), + dict( + method='POST', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', zone['id'], 'recordsets'], + ), + status_code=500, + validate=dict( + json={ + 'name': 'www2.example.net.', + 'records': ['192.168.1.2'], + 'type': 'A', + } + ), + ), + ] + ) + + self.assertRaises( + exceptions.SDKException, + self.cloud.create_recordset, + fake_zone['id'], + 'www2.example.net.', + 'a', + ['192.168.1.2'], + ) + + self.assert_calls() + + def test_update_recordset(self): + fake_zone = test_zone.ZoneTestWrapper(self, zone) + fake_rs = RecordsetTestWrapper(self, recordset) + new_ttl = 7200 + expected_recordset = recordset.copy() + expected_recordset['ttl'] = new_ttl + updated_rs = RecordsetTestWrapper(self, expected_recordset) + self.register_uris( + [ + # try by directly + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['name']], + ), + status_code=404, + ), + # list with name + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones'], + qs_elements=[ + 'name={name}'.format(name=fake_zone['name']) + ], + ), + json={'zones': [fake_zone.get_get_response_json()]}, + ), + # try directly + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=[ + 'v2', + 'zones', + fake_zone['id'], + 'recordsets', + fake_rs['name'], + ], + ), + status_code=404, + ), + # list with name + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id'], 'recordsets'], + qs_elements=[ + 'name={name}'.format(name=fake_rs['name']) + ], + ), + json={'recordsets': [fake_rs.get_get_response_json()]}, + ), + # update + dict( + method='PUT', + uri=self.get_mock_url( + 'dns', + 'public', + append=[ + 'v2', + 'zones', + fake_zone['id'], + 'recordsets', + fake_rs['id'], + ], + ), + json=updated_rs.get_get_response_json(), + validate=dict(json={'ttl': new_ttl}), + ), + ] + ) + res = self.cloud.update_recordset( + fake_zone['name'], fake_rs['name'], ttl=new_ttl + ) + + updated_rs.cmp(res) + self.assert_calls() + + def test_list_recordsets(self): + fake_zone = test_zone.ZoneTestWrapper(self, zone) + fake_rs = RecordsetTestWrapper(self, recordset) + self.register_uris( + [ + # try by directly + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id']], + ), + json=fake_zone.get_get_response_json(), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id'], 'recordsets'], + ), + json={ + 'recordsets': [fake_rs.get_get_response_json()], + 'links': { + 'next': self.get_mock_url( + 'dns', + 'public', + append=[ + 'v2', + 'zones', + fake_zone['id'], + 'recordsets', + ], + qs_elements=['limit=1', 'marker=asd'], + ), + 'self': self.get_mock_url( + 'dns', + 'public', + append=[ + 'v2', + 'zones', + fake_zone['id'], + 'recordsets?limit=1', + ], + ), + }, + 'metadata': {'total_count': 2}, + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id'], 'recordsets'], + qs_elements=['limit=1', 'marker=asd'], + ), + json={'recordsets': [fake_rs.get_get_response_json()]}, + ), + ] + ) + res = self.cloud.list_recordsets(fake_zone['id']) + + self.assertEqual(2, len(res)) + self.assert_calls() + + def test_delete_recordset(self): + fake_zone = test_zone.ZoneTestWrapper(self, zone) + fake_rs = RecordsetTestWrapper(self, recordset) + self.register_uris( + [ + # try by directly + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['name']], + ), + status_code=404, + ), + # list with name + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones'], + qs_elements=[ + 'name={name}'.format(name=fake_zone['name']) + ], + ), + json={'zones': [fake_zone.get_get_response_json()]}, + ), + # try directly + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=[ + 'v2', + 'zones', + fake_zone['id'], + 'recordsets', + fake_rs['name'], + ], + ), + status_code=404, + ), + # list with name + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id'], 'recordsets'], + qs_elements=[ + 'name={name}'.format(name=fake_rs['name']) + ], + ), + json={'recordsets': [fake_rs.get_get_response_json()]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'dns', + 'public', + append=[ + 'v2', + 'zones', + zone['id'], + 'recordsets', + fake_rs['id'], + ], + ), + status_code=202, + ), + ] + ) + self.assertTrue( + self.cloud.delete_recordset(fake_zone['name'], fake_rs['name']) + ) + self.assert_calls() + + def test_get_recordset_by_id(self): + fake_zone = test_zone.ZoneTestWrapper(self, zone) + fake_rs = RecordsetTestWrapper(self, recordset) + self.register_uris( + [ + # try by directly + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['name']], + ), + status_code=404, + ), + # list with name + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones'], + qs_elements=[ + 'name={name}'.format(name=fake_zone['name']) + ], + ), + json={'zones': [fake_zone.get_get_response_json()]}, + ), + # try directly + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=[ + 'v2', + 'zones', + fake_zone['id'], + 'recordsets', + fake_rs['id'], + ], + ), + json=fake_rs.get_get_response_json(), + ), + ] + ) + res = self.cloud.get_recordset(fake_zone['name'], fake_rs['id']) + fake_rs.cmp(res) + self.assert_calls() + + def test_get_recordset_by_name(self): + fake_zone = test_zone.ZoneTestWrapper(self, zone) + fake_rs = RecordsetTestWrapper(self, recordset) + self.register_uris( + [ + # try by directly + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['name']], + ), + status_code=404, + ), + # list with name + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones'], + qs_elements=[ + 'name={name}'.format(name=fake_zone['name']) + ], + ), + json={'zones': [fake_zone.get_get_response_json()]}, + ), + # try directly + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=[ + 'v2', + 'zones', + fake_zone['id'], + 'recordsets', + fake_rs['name'], + ], + ), + status_code=404, + ), + # list with name + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id'], 'recordsets'], + qs_elements=[ + 'name={name}'.format(name=fake_rs['name']) + ], + ), + json={'recordsets': [fake_rs.get_get_response_json()]}, + ), + ] + ) + res = self.cloud.get_recordset(fake_zone['name'], fake_rs['name']) + fake_rs.cmp(res) + self.assert_calls() + + def test_get_recordset_not_found_returns_false(self): + fake_zone = test_zone.ZoneTestWrapper(self, zone) + self.register_uris( + [ + # try by directly + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id']], + ), + json=fake_zone.get_get_response_json(), + ), + # try directly + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=[ + 'v2', + 'zones', + fake_zone['id'], + 'recordsets', + 'fake', + ], + ), + status_code=404, + ), + # list with name + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id'], 'recordsets'], + qs_elements=['name=fake'], + ), + json={'recordsets': []}, + ), + ] + ) + res = self.cloud.get_recordset(fake_zone['id'], 'fake') + self.assertFalse(res) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_role_assignment.py b/openstack/tests/unit/cloud/test_role_assignment.py new file mode 100644 index 0000000000..e1e34bc914 --- /dev/null +++ b/openstack/tests/unit/cloud/test_role_assignment.py @@ -0,0 +1,2187 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import testtools +from testtools import matchers + +from openstack import exceptions +from openstack.tests.unit import base + + +class TestRoleAssignment(base.TestCase): + IS_INHERITED = False + + def _build_role_assignment_response( + self, role_id, scope_type, scope_id, entity_type, entity_id + ): + self.assertThat(['group', 'user'], matchers.Contains(entity_type)) + self.assertThat(['project', 'domain'], matchers.Contains(scope_type)) + # NOTE(notmorgan): Links are thrown out by shade, but we construct them + # for corectness. + link_str = ( + 'https://identity.example.com/identity/v3/{scope_t}s' + '/{scopeid}/{entity_t}s/{entityid}/roles/{roleid}' + ) + return [ + { + 'links': { + 'assignment': link_str.format( + scope_t=scope_type, + scopeid=scope_id, + entity_t=entity_type, + entityid=entity_id, + roleid=role_id, + ) + }, + 'role': {'id': role_id}, + 'scope': {scope_type: {'id': scope_id}}, + entity_type: {'id': entity_id}, + } + ] + + def setUp(self, cloud_config_fixture='clouds.yaml'): + super().setUp(cloud_config_fixture) + self.role_data = self._get_role_data() + self.domain_data = self._get_domain_data() + self.user_data = self._get_user_data( + domain_id=self.domain_data.domain_id + ) + self.project_data = self._get_project_data( + domain_id=self.domain_data.domain_id + ) + self.project_data_v2 = self._get_project_data( + project_name=self.project_data.project_name, + project_id=self.project_data.project_id, + v3=False, + ) + self.group_data = self._get_group_data( + domain_id=self.domain_data.domain_id + ) + + self.user_project_assignment = self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='project', + scope_id=self.project_data.project_id, + entity_type='user', + entity_id=self.user_data.user_id, + ) + + self.group_project_assignment = self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='project', + scope_id=self.project_data.project_id, + entity_type='group', + entity_id=self.group_data.group_id, + ) + + self.user_domain_assignment = self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='user', + entity_id=self.user_data.user_id, + ) + + self.group_domain_assignment = self._build_role_assignment_response( + role_id=self.role_data.role_id, + scope_type='domain', + scope_id=self.domain_data.domain_id, + entity_type='group', + entity_id=self.group_data.group_id, + ) + + # Cleanup of instances to ensure garbage collection/no leaking memory + # in tests. + self.addCleanup(delattr, self, 'role_data') + self.addCleanup(delattr, self, 'user_data') + self.addCleanup(delattr, self, 'domain_data') + self.addCleanup(delattr, self, 'group_data') + self.addCleanup(delattr, self, 'project_data') + self.addCleanup(delattr, self, 'project_data_v2') + self.addCleanup(delattr, self, 'user_project_assignment') + self.addCleanup(delattr, self, 'group_project_assignment') + self.addCleanup(delattr, self, 'user_domain_assignment') + self.addCleanup(delattr, self, 'group_domain_assignment') + + def get_mock_url( + self, + service_type='identity', + interface='public', + resource='role_assignments', + append=None, + base_url_append='v3', + qs_elements=None, + inherited=False, + ): + if inherited: + base_url_append = base_url_append + '/OS-INHERIT' + if append and inherited: + append.append('inherited_to_projects') + + return super().get_mock_url( + service_type, + interface, + resource, + append, + base_url_append, + qs_elements, + ) + + def __get( + self, resource, data, attr, qs_elements, use_name=False, is_found=True + ): + if not use_name: + if is_found: + return [ + dict( + method='GET', + uri=self.get_mock_url( + resource=resource + 's', # do roles from role + append=[getattr(data, attr)], + qs_elements=qs_elements, + ), + status_code=200, + json=data.json_response, + ) + ] + else: + return [ + dict( + method='GET', + uri=self.get_mock_url( + resource=resource + 's', # do roles from role + append=[getattr(data, attr)], + qs_elements=qs_elements, + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource=resource + 's', # do roles from role + qs_elements=qs_elements, + ), + status_code=200, + json={(resource + 's'): []}, + ), + ] + else: + return [ + dict( + method='GET', + uri=self.get_mock_url( + resource=resource + 's', + append=[getattr(data, attr)], + qs_elements=qs_elements, + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource=resource + 's', + qs_elements=[ + 'name=' + getattr(data, attr), + *qs_elements, + ], + ), + status_code=200, + json={(resource + 's'): [data.json_response[resource]]}, + ), + ] + + def __user_mocks( + self, user_data, use_name, is_found=True, domain_data=None + ): + qs_elements = [] + if domain_data: + qs_elements = ['domain_id=' + domain_data.domain_id] + uri_mocks = [] + if not use_name: + uri_mocks.append( + dict( + method='GET', + uri=self.get_mock_url( + resource='users', + append=[user_data.user_id], + # TODO(stephenfin): We shouldn't be passing domain ID + # here since it's unnecessary, but that requires a much + # larger rework of the Resource.find method. + qs_elements=qs_elements, + ), + json=user_data.json_response if is_found else None, + status_code=200 if is_found else 404, + ), + ) + else: + uri_mocks += [ + dict( + method='GET', + uri=self.get_mock_url( + resource='users', + append=[user_data.name], + qs_elements=qs_elements, + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='users', + qs_elements=[*qs_elements, 'name=' + user_data.name], + ), + status_code=200, + json={ + 'users': ( + [user_data.json_response['user']] + if is_found + else [] + ) + }, + ), + ] + return uri_mocks + + def _get_mock_role_query_urls( + self, + role_data, + domain_data=None, + project_data=None, + group_data=None, + user_data=None, + use_role_name=False, + use_domain_name=False, + use_project_name=False, + use_group_name=False, + use_user_name=False, + use_domain_in_query=True, + ): + """Build uri mocks for querying role assignments""" + uri_mocks = [] + + if domain_data: + uri_mocks.extend( + self.__get( + 'domain', + domain_data, + 'domain_id' if not use_domain_name else 'domain_name', + [], + use_name=use_domain_name, + ) + ) + + qs_elements = [] + if domain_data and use_domain_in_query: + qs_elements = ['domain_id=' + domain_data.domain_id] + + uri_mocks.extend( + self.__get( + 'role', + role_data, + 'role_id' if not use_role_name else 'role_name', + [], + use_name=use_role_name, + ) + ) + + if user_data: + uri_mocks.extend( + self.__user_mocks( + user_data, + use_user_name, + is_found=True, + domain_data=domain_data, + ) + ) + + if group_data: + uri_mocks.extend( + self.__get( + 'group', + group_data, + 'group_id' if not use_group_name else 'group_name', + qs_elements, + use_name=use_group_name, + ) + ) + + if project_data: + uri_mocks.extend( + self.__get( + 'project', + project_data, + 'project_id' if not use_project_name else 'project_name', + qs_elements, + use_name=use_project_name, + ) + ) + + return uri_mocks + + def test_grant_role_user_id_project(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + user_data=self.user_data, + use_role_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.grant_role( + self.role_data.role_name, + user=self.user_data.user_id, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_role_user_name_project(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + user_data=self.user_data, + use_role_name=True, + use_user_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + + def test_grant_role_user_id_project_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + user_data=self.user_data, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.grant_role( + self.role_data.role_id, + user=self.user_data.user_id, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_role_user_name_project_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + user_data=self.user_data, + use_role_name=True, + use_user_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_role_group_id_project(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + group_data=self.group_data, + use_role_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_id, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_role_group_name_project(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + group_data=self.group_data, + use_role_name=True, + use_group_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_name, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_role_group_id_project_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + group_data=self.group_data, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.grant_role( + self.role_data.role_id, + group=self.group_data.group_id, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_role_group_name_project_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + group_data=self.group_data, + use_role_name=True, + use_group_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_name, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + # ===== Domain + def test_grant_role_user_id_domain(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + user_data=self.user_data, + use_role_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.grant_role( + self.role_data.role_name, + user=self.user_data.user_id, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_role_user_name_domain(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + user_data=self.user_data, + use_role_name=True, + use_user_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + + def test_grant_role_user_id_domain_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + user_data=self.user_data, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.grant_role( + self.role_data.role_id, + user=self.user_data.user_id, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_role_user_name_domain_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + user_data=self.user_data, + use_role_name=True, + use_user_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_role_group_id_domain(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + group_data=self.group_data, + use_role_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_id, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_role_group_name_domain(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + group_data=self.group_data, + use_role_name=True, + use_group_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_role_group_id_domain_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + group_data=self.group_data, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.grant_role( + self.role_data.role_id, + group=self.group_data.group_id, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_role_group_name_domain_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + group_data=self.group_data, + use_role_name=True, + use_group_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + # ==== Revoke + def test_revoke_role_user_id_project(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + user_data=self.user_data, + use_role_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.user_id, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_revoke_role_user_name_project(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + user_data=self.user_data, + use_role_name=True, + use_user_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + + def test_revoke_role_user_id_project_not_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + user_data=self.user_data, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.revoke_role( + self.role_data.role_id, + user=self.user_data.user_id, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_revoke_role_user_name_project_not_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + user_data=self.user_data, + use_role_name=True, + use_user_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_revoke_role_group_id_project(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + group_data=self.group_data, + use_role_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_id, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_revoke_role_group_name_project(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + group_data=self.group_data, + use_role_name=True, + use_group_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_name, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_revoke_role_group_id_project_not_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + group_data=self.group_data, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.revoke_role( + self.role_data.role_id, + group=self.group_data.group_id, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_revoke_role_group_name_project_not_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + group_data=self.group_data, + use_role_name=True, + use_group_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_name, + project=self.project_data.project_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + # ==== Domain + def test_revoke_role_user_id_domain(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + user_data=self.user_data, + use_role_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.user_id, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_revoke_role_user_name_domain(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + user_data=self.user_data, + use_role_name=True, + use_user_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + + def test_revoke_role_user_id_domain_not_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + user_data=self.user_data, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.revoke_role( + self.role_data.role_id, + user=self.user_data.user_id, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_revoke_role_user_name_domain_not_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + user_data=self.user_data, + use_role_name=True, + use_user_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_revoke_role_group_id_domain(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + group_data=self.group_data, + use_role_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_id, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_revoke_role_group_name_domain(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + group_data=self.group_data, + use_role_name=True, + use_group_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_revoke_role_group_id_domain_not_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + group_data=self.group_data, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.revoke_role( + self.role_data.role_id, + group=self.group_data.group_id, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_revoke_role_group_name_domain_not_exists(self): + uris = self._get_mock_role_query_urls( + self.role_data, + domain_data=self.domain_data, + group_data=self.group_data, + use_role_name=True, + use_group_name=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='domains', + append=[ + self.domain_data.domain_id, + 'groups', + self.group_data.group_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + ] + ) + self.register_uris(uris) + + self.assertFalse( + self.cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_id, + inherited=self.IS_INHERITED, + ) + ) + self.assert_calls() + + def test_grant_no_role(self): + uris = self.__get( + 'domain', self.domain_data, 'domain_name', [], use_name=True + ) + uris.extend( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='roles', + append=[self.role_data.role_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='roles', + qs_elements=[ + 'name=' + self.role_data.role_name, + ], + ), + status_code=200, + json={'roles': []}, + ), + ] + ) + self.register_uris(uris) + + with testtools.ExpectedException(exceptions.NotFoundException): + self.cloud.grant_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_name, + inherited=self.IS_INHERITED, + ) + self.assert_calls() + + def test_revoke_no_role(self): + uris = self.__get( + 'domain', self.domain_data, 'domain_name', [], use_name=True + ) + uris.extend( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='roles', + append=[self.role_data.role_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='roles', + qs_elements=[ + 'name=' + self.role_data.role_name, + ], + ), + status_code=200, + json={'roles': []}, + ), + ] + ) + self.register_uris(uris) + + with testtools.ExpectedException(exceptions.NotFoundException): + self.cloud.revoke_role( + self.role_data.role_name, + group=self.group_data.group_name, + domain=self.domain_data.domain_name, + inherited=self.IS_INHERITED, + ) + self.assert_calls() + + def test_grant_no_user_or_group_specified(self): + uris = self.__get( + 'role', self.role_data, 'role_name', [], use_name=True + ) + self.register_uris(uris) + with testtools.ExpectedException( + exceptions.SDKException, + 'Must specify either a user or a group', + ): + self.cloud.grant_role( + self.role_data.role_name, + inherited=self.IS_INHERITED, + ) + self.assert_calls() + + def test_revoke_no_user_or_group_specified(self): + uris = self.__get( + 'role', self.role_data, 'role_name', [], use_name=True + ) + self.register_uris(uris) + with testtools.ExpectedException( + exceptions.SDKException, + 'Must specify either a user or a group', + ): + self.cloud.revoke_role( + self.role_data.role_name, + inherited=self.IS_INHERITED, + ) + self.assert_calls() + + def test_grant_both_user_and_group(self): + uris = self.__get( + 'role', self.role_data, 'role_name', [], use_name=True + ) + self.register_uris(uris) + + with testtools.ExpectedException( + exceptions.SDKException, + 'Specify either a group or a user, not both', + ): + self.cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + group=self.group_data.group_name, + inherited=self.IS_INHERITED, + ) + self.assert_calls() + + def test_revoke_both_user_and_group(self): + uris = self.__get( + 'role', self.role_data, 'role_name', [], use_name=True + ) + uris.extend(self.__user_mocks(self.user_data, use_name=True)) + uris.extend( + self.__get( + 'group', self.group_data, 'group_name', [], use_name=True + ) + ) + self.register_uris(uris) + + with testtools.ExpectedException( + exceptions.SDKException, + 'Specify either a group or a user, not both', + ): + self.cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + group=self.group_data.group_name, + inherited=self.IS_INHERITED, + ) + + def test_grant_both_project_and_domain(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + user_data=self.user_data, + domain_data=self.domain_data, + use_role_name=True, + use_user_name=True, + use_project_name=True, + use_domain_name=True, + use_domain_in_query=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=404, + ), + dict( + method='PUT', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_name, + domain=self.domain_data.domain_name, + inherited=self.IS_INHERITED, + ) + ) + + def test_revoke_both_project_and_domain(self): + uris = self._get_mock_role_query_urls( + self.role_data, + project_data=self.project_data, + user_data=self.user_data, + domain_data=self.domain_data, + use_role_name=True, + use_user_name=True, + use_project_name=True, + use_domain_name=True, + use_domain_in_query=True, + ) + uris.extend( + [ + dict( + method='HEAD', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + complete_qs=True, + status_code=204, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + resource='projects', + append=[ + self.project_data.project_id, + 'users', + self.user_data.user_id, + 'roles', + self.role_data.role_id, + ], + inherited=self.IS_INHERITED, + ), + status_code=200, + ), + ] + ) + self.register_uris(uris) + + self.assertTrue( + self.cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + project=self.project_data.project_name, + domain=self.domain_data.domain_name, + inherited=self.IS_INHERITED, + ) + ) + + def test_grant_no_project_or_domain(self): + uris = self._get_mock_role_query_urls( + self.role_data, + use_role_name=True, + ) + + self.register_uris(uris) + + with testtools.ExpectedException( + exceptions.SDKException, + 'Must specify either a domain, project or system', + ): + self.cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + inherited=self.IS_INHERITED, + ) + self.assert_calls() + + def test_revoke_no_project_or_domain_or_system(self): + uris = self._get_mock_role_query_urls( + self.role_data, + use_role_name=True, + ) + + self.register_uris(uris) + + with testtools.ExpectedException( + exceptions.SDKException, + 'Must specify either a domain, project or system', + ): + self.cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + inherited=self.IS_INHERITED, + ) + self.assert_calls() + + def test_grant_bad_domain_exception(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='domains', append=['baddomain'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='domains', qs_elements=['name=baddomain'] + ), + status_code=404, + ), + ] + ) + with testtools.ExpectedException(exceptions.NotFoundException): + self.cloud.grant_role( + self.role_data.role_name, + user=self.user_data.name, + domain='baddomain', + inherited=self.IS_INHERITED, + ) + self.assert_calls() + + def test_revoke_bad_domain_exception(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + resource='domains', append=['baddomain'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + resource='domains', qs_elements=['name=baddomain'] + ), + status_code=404, + ), + ] + ) + with testtools.ExpectedException(exceptions.NotFoundException): + self.cloud.revoke_role( + self.role_data.role_name, + user=self.user_data.name, + domain='baddomain', + inherited=self.IS_INHERITED, + ) + self.assert_calls() + + +class TestInheritedRoleAssignment(TestRoleAssignment): + IS_INHERITED = True diff --git a/openstack/tests/unit/cloud/test_router.py b/openstack/tests/unit/cloud/test_router.py new file mode 100644 index 0000000000..cbe134dac1 --- /dev/null +++ b/openstack/tests/unit/cloud/test_router.py @@ -0,0 +1,610 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +import testtools + +from openstack import exceptions +from openstack.network.v2 import port as _port +from openstack.network.v2 import router as _router +from openstack.tests.unit import base + + +class TestRouter(base.TestCase): + router_name = 'goofy' + router_id = '57076620-dcfb-42ed-8ad6-79ccb4a79ed2' + subnet_id = '1f1696eb-7f47-47f6-835c-4889bff88604' + + mock_router_rep = { + 'admin_state_up': True, + 'availability_zone_hints': [], + 'availability_zones': [], + 'description': '', + 'distributed': False, + 'external_gateway_info': None, + 'flavor_id': None, + 'ha': False, + 'id': router_id, + 'name': router_name, + 'project_id': '861808a93da0484ea1767967c4df8a23', + 'routes': [{"destination": "179.24.1.0/24", "nexthop": "172.24.3.99"}], + 'status': 'ACTIVE', + } + + mock_router_interface_rep = { + 'network_id': '53aee281-b06d-47fc-9e1a-37f045182b8e', + 'subnet_id': '1f1696eb-7f47-47f6-835c-4889bff88604', + 'project_id': '861808a93da0484ea1767967c4df8a23', + 'subnet_ids': [subnet_id], + 'port_id': '23999891-78b3-4a6b-818d-d1b713f67848', + 'id': '57076620-dcfb-42ed-8ad6-79ccb4a79ed2', + 'request_ids': ['req-f1b0b1b4-ae51-4ef9-b371-0cc3c3402cf7'], + } + + router_availability_zone_extension = { + "alias": "router_availability_zone", + "updated": "2015-01-01T10:00:00-00:00", + "description": "Availability zone support for router.", + "links": [], + "name": "Router Availability Zone", + } + + router_extraroute_extension = { + "alias": "extraroute", + "updated": "2015-01-01T10:00:00-00:00", + "description": "extra routes extension for router.", + "links": [], + "name": "Extra Routes", + } + + enabled_neutron_extensions = [ + router_availability_zone_extension, + router_extraroute_extension, + ] + + def _compare_routers(self, exp, real): + self.assertDictEqual( + _router.Router(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def test_get_router(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers', self.router_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers'], + qs_elements=[f'name={self.router_name}'], + ), + json={'routers': [self.mock_router_rep]}, + ), + ] + ) + r = self.cloud.get_router(self.router_name) + self.assertIsNotNone(r) + self._compare_routers(self.mock_router_rep, r) + self.assert_calls() + + def test_get_router_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers', 'mickey'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers'], + qs_elements=['name=mickey'], + ), + json={'routers': []}, + ), + ] + ) + r = self.cloud.get_router('mickey') + self.assertIsNone(r) + self.assert_calls() + + def test_create_router(self): + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers'] + ), + json={'router': self.mock_router_rep}, + validate=dict( + json={ + 'router': { + 'name': self.router_name, + 'admin_state_up': True, + } + } + ), + ) + ] + ) + new_router = self.cloud.create_router( + name=self.router_name, admin_state_up=True + ) + + self._compare_routers(self.mock_router_rep, new_router) + self.assert_calls() + + def test_create_router_specific_project(self): + new_router_project_id = "project_id_value" + mock_router_rep = copy.copy(self.mock_router_rep) + mock_router_rep['project_id'] = new_router_project_id + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers'] + ), + json={'router': mock_router_rep}, + validate=dict( + json={ + 'router': { + 'name': self.router_name, + 'admin_state_up': True, + 'project_id': new_router_project_id, + } + } + ), + ) + ] + ) + + self.cloud.create_router( + self.router_name, project_id=new_router_project_id + ) + self.assert_calls() + + def test_create_router_with_availability_zone_hints(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers'] + ), + json={'router': self.mock_router_rep}, + validate=dict( + json={ + 'router': { + 'name': self.router_name, + 'admin_state_up': True, + 'availability_zone_hints': ['nova'], + } + } + ), + ), + ] + ) + self.cloud.create_router( + name=self.router_name, + admin_state_up=True, + availability_zone_hints=['nova'], + ) + self.assert_calls() + + def test_create_router_without_enable_snat(self): + """Do not send enable_snat when not given.""" + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers'] + ), + json={'router': self.mock_router_rep}, + validate=dict( + json={ + 'router': { + 'name': self.router_name, + 'admin_state_up': True, + } + } + ), + ) + ] + ) + self.cloud.create_router(name=self.router_name, admin_state_up=True) + self.assert_calls() + + def test_create_router_with_enable_snat_True(self): + """Send enable_snat when it is True.""" + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers'] + ), + json={'router': self.mock_router_rep}, + validate=dict( + json={ + 'router': { + 'name': self.router_name, + 'admin_state_up': True, + 'external_gateway_info': {'enable_snat': True}, + } + } + ), + ) + ] + ) + self.cloud.create_router( + name=self.router_name, admin_state_up=True, enable_snat=True + ) + self.assert_calls() + + def test_create_router_with_enable_snat_False(self): + """Send enable_snat when it is False.""" + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'routers'] + ), + json={'router': self.mock_router_rep}, + validate=dict( + json={ + 'router': { + 'name': self.router_name, + 'external_gateway_info': { + 'enable_snat': False + }, + 'admin_state_up': True, + } + } + ), + ) + ] + ) + self.cloud.create_router( + name=self.router_name, admin_state_up=True, enable_snat=False + ) + self.assert_calls() + + def test_create_router_wrong_availability_zone_hints_type(self): + azh_opts = "invalid" + with testtools.ExpectedException( + exceptions.SDKException, + "Parameter 'availability_zone_hints' must be a list", + ): + self.cloud.create_router( + name=self.router_name, + admin_state_up=True, + availability_zone_hints=azh_opts, + ) + + def test_add_router_interface(self): + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'routers', + self.router_id, + 'add_router_interface', + ], + ), + json={'port': self.mock_router_interface_rep}, + validate=dict(json={'subnet_id': self.subnet_id}), + ) + ] + ) + self.cloud.add_router_interface( + {'id': self.router_id}, subnet_id=self.subnet_id + ) + self.assert_calls() + + def test_remove_router_interface(self): + self.register_uris( + [ + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'routers', + self.router_id, + 'remove_router_interface', + ], + ), + json={'port': self.mock_router_interface_rep}, + validate=dict(json={'subnet_id': self.subnet_id}), + ) + ] + ) + self.cloud.remove_router_interface( + {'id': self.router_id}, subnet_id=self.subnet_id + ) + self.assert_calls() + + def test_remove_router_interface_missing_argument(self): + self.assertRaises( + ValueError, self.cloud.remove_router_interface, {'id': '123'} + ) + + def test_update_router(self): + new_router_name = "mickey" + new_routes = [] + expected_router_rep = copy.copy(self.mock_router_rep) + expected_router_rep['name'] = new_router_name + expected_router_rep['routes'] = new_routes + # validate_calls() asserts that these requests are done in order, + # but the extensions call is only called if a non-None value is + # passed in 'routes' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'extensions'] + ), + json={'extensions': self.enabled_neutron_extensions}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers', self.router_id], + ), + json=self.mock_router_rep, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers', self.router_id], + ), + json={'router': expected_router_rep}, + validate=dict( + json={ + 'router': { + 'name': new_router_name, + 'routes': new_routes, + } + } + ), + ), + ] + ) + new_router = self.cloud.update_router( + self.router_id, name=new_router_name, routes=new_routes + ) + + self._compare_routers(expected_router_rep, new_router) + self.assert_calls() + + def test_delete_router(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers', self.router_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers'], + qs_elements=[f'name={self.router_name}'], + ), + json={'routers': [self.mock_router_rep]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers', self.router_id], + ), + json={}, + ), + ] + ) + self.assertTrue(self.cloud.delete_router(self.router_name)) + self.assert_calls() + + def test_delete_router_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers', self.router_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers'], + qs_elements=[f'name={self.router_name}'], + ), + json={'routers': []}, + ), + ] + ) + self.assertFalse(self.cloud.delete_router(self.router_name)) + self.assert_calls() + + def test_delete_router_multiple_found(self): + router1 = dict(id='123', name='mickey') + router2 = dict(id='456', name='mickey') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers', 'mickey'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'routers'], + qs_elements=['name=mickey'], + ), + json={'routers': [router1, router2]}, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, self.cloud.delete_router, 'mickey' + ) + self.assert_calls() + + def _test_list_router_interfaces( + self, router, interface_type, expected_result=None + ): + internal_ports = [ + { + 'id': 'internal_port_id', + 'fixed_ips': [ + { + 'subnet_id': 'internal_subnet_id', + 'ip_address': "10.0.0.1", + } + ], + 'device_id': self.router_id, + 'device_owner': device_owner, + } + for device_owner in [ + 'network:router_interface', + 'network:ha_router_replicated_interface', + 'network:router_interface_distributed', + ] + ] + + external_ports = [ + { + 'id': 'external_port_id', + 'fixed_ips': [ + { + 'subnet_id': 'external_subnet_id', + 'ip_address': "1.2.3.4", + } + ], + 'device_id': self.router_id, + 'device_owner': 'network:router_gateway', + } + ] + + if expected_result is None: + if interface_type == "internal": + expected_result = internal_ports + elif interface_type == "external": + expected_result = external_ports + else: + expected_result = internal_ports + external_ports + + mock_uri = dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'ports'], + qs_elements=[f"device_id={self.router_id}"], + ), + json={'ports': (internal_ports + external_ports)}, + ) + + self.register_uris([mock_uri]) + ret = self.cloud.list_router_interfaces(router, interface_type) + self.assertEqual( + [_port.Port(**i).to_dict(computed=False) for i in expected_result], + [i.to_dict(computed=False) for i in ret], + ) + self.assert_calls() + + router = { + 'id': router_id, + 'external_gateway_info': { + 'external_fixed_ips': [ + {'subnet_id': 'external_subnet_id', 'ip_address': '1.2.3.4'} + ] + }, + } + + def test_list_router_interfaces_all(self): + self._test_list_router_interfaces(self.router, interface_type=None) + + def test_list_router_interfaces_internal(self): + self._test_list_router_interfaces( + self.router, interface_type="internal" + ) + + def test_list_router_interfaces_external(self): + self._test_list_router_interfaces( + self.router, interface_type="external" + ) diff --git a/openstack/tests/unit/cloud/test_security_groups.py b/openstack/tests/unit/cloud/test_security_groups.py new file mode 100644 index 0000000000..f9fdbea178 --- /dev/null +++ b/openstack/tests/unit/cloud/test_security_groups.py @@ -0,0 +1,1298 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import copy + +import openstack.cloud +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +# TODO(mordred): Move id and name to using a getUniqueString() value + +neutron_grp_dict = fakes.make_fake_neutron_security_group( + id='1', + name='neutron-sec-group', + description='Test Neutron security group', + rules=[ + dict( + id='1', + port_range_min=80, + port_range_max=81, + protocol='tcp', + remote_ip_prefix='0.0.0.0/0', + ) + ], +) + + +nova_grp_dict = fakes.make_fake_nova_security_group( + id='2', + name='nova-sec-group', + description='Test Nova security group #1', + rules=[ + fakes.make_fake_nova_security_group_rule( + id='2', + from_port=8000, + to_port=8001, + ip_protocol='tcp', + cidr='0.0.0.0/0', + ), + ], +) + + +class TestSecurityGroups(base.TestCase): + def setUp(self): + super().setUp() + self.has_neutron = True + + def fake_has_service(*args, **kwargs): + return self.has_neutron + + self.cloud.has_service = fake_has_service + + def test_list_security_groups_neutron(self): + project_id = 42 + self.cloud.secgroup_source = 'neutron' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'security-groups'], + qs_elements=[f"project_id={project_id}"], + ), + json={'security_groups': [neutron_grp_dict]}, + ) + ] + ) + self.cloud.list_security_groups(filters={'project_id': project_id}) + self.assert_calls() + + def test_list_security_groups_nova(self): + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups?project_id=42', + json={'security_groups': []}, + ), + ] + ) + self.cloud.secgroup_source = 'nova' + self.has_neutron = False + self.cloud.list_security_groups(filters={'project_id': 42}) + + self.assert_calls() + + def test_list_security_groups_none(self): + self.cloud.secgroup_source = None + self.has_neutron = False + self.assertRaises( + openstack.cloud.OpenStackCloudUnavailableFeature, + self.cloud.list_security_groups, + ) + + def test_delete_security_group_neutron(self): + sg_id = neutron_grp_dict['id'] + self.cloud.secgroup_source = 'neutron' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'security-groups'] + ), + json={'security_groups': [neutron_grp_dict]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'security-groups', f'{sg_id}'], + ), + status_code=200, + json={}, + ), + ] + ) + self.assertTrue(self.cloud.delete_security_group('1')) + self.assert_calls() + + def test_delete_security_group_nova(self): + self.cloud.secgroup_source = 'nova' + self.has_neutron = False + nova_return = [nova_grp_dict] + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', + json={'security_groups': nova_return}, + ), + dict( + method='DELETE', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups/2', + ), + ] + ) + self.cloud.delete_security_group('2') + self.assert_calls() + + def test_delete_security_group_neutron_not_found(self): + self.cloud.secgroup_source = 'neutron' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'security-groups'] + ), + json={'security_groups': [neutron_grp_dict]}, + ) + ] + ) + self.assertFalse(self.cloud.delete_security_group('10')) + self.assert_calls() + + def test_delete_security_group_nova_not_found(self): + self.cloud.secgroup_source = 'nova' + self.has_neutron = False + nova_return = [nova_grp_dict] + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', + json={'security_groups': nova_return}, + ), + ] + ) + self.assertFalse(self.cloud.delete_security_group('doesNotExist')) + + def test_delete_security_group_none(self): + self.cloud.secgroup_source = None + self.assertRaises( + openstack.cloud.OpenStackCloudUnavailableFeature, + self.cloud.delete_security_group, + 'doesNotExist', + ) + + def test_create_security_group_neutron(self): + self.cloud.secgroup_source = 'neutron' + group_name = self.getUniqueString() + group_desc = self.getUniqueString('description') + new_group = fakes.make_fake_neutron_security_group( + id='2', name=group_name, description=group_desc, rules=[] + ) + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'security-groups'] + ), + json={'security_group': new_group}, + validate=dict( + json={ + 'security_group': { + 'name': group_name, + 'description': group_desc, + } + } + ), + ) + ] + ) + + r = self.cloud.create_security_group(group_name, group_desc) + self.assertEqual(group_name, r['name']) + self.assertEqual(group_desc, r['description']) + self.assertEqual(True, r['stateful']) + + self.assert_calls() + + def test_create_security_group_neutron_specific_tenant(self): + self.cloud.secgroup_source = 'neutron' + project_id = "861808a93da0484ea1767967c4df8a23" + group_name = self.getUniqueString() + group_desc = ( + 'security group from ' + 'test_create_security_group_neutron_specific_tenant' + ) + new_group = fakes.make_fake_neutron_security_group( + id='2', + name=group_name, + description=group_desc, + project_id=project_id, + rules=[], + ) + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'security-groups'] + ), + json={'security_group': new_group}, + validate=dict( + json={ + 'security_group': { + 'name': group_name, + 'description': group_desc, + 'tenant_id': project_id, + } + } + ), + ) + ] + ) + + r = self.cloud.create_security_group( + group_name, group_desc, project_id + ) + self.assertEqual(group_name, r['name']) + self.assertEqual(group_desc, r['description']) + self.assertEqual(project_id, r['tenant_id']) + + self.assert_calls() + + def test_create_security_group_stateless_neutron(self): + self.cloud.secgroup_source = 'neutron' + group_name = self.getUniqueString() + group_desc = self.getUniqueString('description') + new_group = fakes.make_fake_neutron_security_group( + id='2', + name=group_name, + description=group_desc, + stateful=False, + rules=[], + ) + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'security-groups'] + ), + json={'security_group': new_group}, + validate=dict( + json={ + 'security_group': { + 'name': group_name, + 'description': group_desc, + 'stateful': False, + } + } + ), + ) + ] + ) + + r = self.cloud.create_security_group( + group_name, group_desc, stateful=False + ) + self.assertEqual(group_name, r['name']) + self.assertEqual(group_desc, r['description']) + self.assertEqual(False, r['stateful']) + self.assert_calls() + + def test_create_security_group_nova(self): + group_name = self.getUniqueString() + self.has_neutron = False + group_desc = self.getUniqueString('description') + new_group = fakes.make_fake_nova_security_group( + id='2', name=group_name, description=group_desc, rules=[] + ) + self.register_uris( + [ + dict( + method='POST', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', + json={'security_group': new_group}, + validate=dict( + json={ + 'security_group': { + 'name': group_name, + 'description': group_desc, + } + } + ), + ), + ] + ) + + self.cloud.secgroup_source = 'nova' + r = self.cloud.create_security_group(group_name, group_desc) + self.assertEqual(group_name, r['name']) + self.assertEqual(group_desc, r['description']) + + self.assert_calls() + + def test_create_security_group_none(self): + self.cloud.secgroup_source = None + self.has_neutron = False + self.assertRaises( + openstack.cloud.OpenStackCloudUnavailableFeature, + self.cloud.create_security_group, + '', + '', + ) + + def test_update_security_group_neutron(self): + self.cloud.secgroup_source = 'neutron' + new_name = self.getUniqueString() + sg_id = neutron_grp_dict['id'] + update_return = neutron_grp_dict.copy() + update_return['name'] = new_name + update_return['stateful'] = False + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'security-groups'] + ), + json={'security_groups': [neutron_grp_dict]}, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'security-groups', f'{sg_id}'], + ), + json={'security_group': update_return}, + validate=dict( + json={ + 'security_group': { + 'name': new_name, + 'stateful': False, + } + } + ), + ), + ] + ) + r = self.cloud.update_security_group( + sg_id, name=new_name, stateful=False + ) + self.assertEqual(r['name'], new_name) + self.assertEqual(r['stateful'], False) + self.assert_calls() + + def test_update_security_group_nova(self): + self.has_neutron = False + new_name = self.getUniqueString() + self.cloud.secgroup_source = 'nova' + nova_return = [nova_grp_dict] + update_return = nova_grp_dict.copy() + update_return['name'] = new_name + + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', + json={'security_groups': nova_return}, + ), + dict( + method='PUT', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups/2', + json={'security_group': update_return}, + ), + ] + ) + + r = self.cloud.update_security_group( + nova_grp_dict['id'], name=new_name + ) + self.assertEqual(r['name'], new_name) + self.assert_calls() + + def test_update_security_group_bad_kwarg(self): + self.assertRaises( + TypeError, + self.cloud.update_security_group, + 'doesNotExist', + bad_arg='', + ) + + def test_create_security_group_rule_neutron(self): + self.cloud.secgroup_source = 'neutron' + args = dict( + port_range_min=-1, + port_range_max=40000, + protocol='tcp', + remote_ip_prefix='0.0.0.0/0', + remote_group_id='456', + remote_address_group_id='1234-5678', + direction='egress', + ethertype='IPv6', + ) + expected_args = copy.copy(args) + # For neutron, -1 port should be converted to None + expected_args['port_range_min'] = None + expected_args['security_group_id'] = neutron_grp_dict['id'] + + expected_new_rule = copy.copy(expected_args) + expected_new_rule['id'] = '1234' + expected_new_rule['tenant_id'] = None + expected_new_rule['project_id'] = expected_new_rule['tenant_id'] + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'security-groups'] + ), + json={'security_groups': [neutron_grp_dict]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'security-group-rules'], + ), + json={'security_group_rule': expected_new_rule}, + validate=dict(json={'security_group_rule': expected_args}), + ), + ] + ) + new_rule = self.cloud.create_security_group_rule( + secgroup_name_or_id=neutron_grp_dict['id'], **args + ).to_dict(original_names=True) + # NOTE(gtema): don't check location and not relevant properties + # in new rule + new_rule.pop('created_at') + new_rule.pop('description') + new_rule.pop('location') + new_rule.pop('name') + new_rule.pop('revision_number') + new_rule.pop('tags') + new_rule.pop('updated_at') + new_rule.pop('if-match') + self.assertEqual(expected_new_rule, new_rule) + self.assert_calls() + + def test_create_security_group_rule_neutron_specific_tenant(self): + self.cloud.secgroup_source = 'neutron' + args = dict( + port_range_min=-1, + port_range_max=40000, + protocol='tcp', + remote_ip_prefix='0.0.0.0/0', + remote_group_id='456', + remote_address_group_id=None, + direction='egress', + ethertype='IPv6', + project_id='861808a93da0484ea1767967c4df8a23', + ) + expected_args = copy.copy(args) + # For neutron, -1 port should be converted to None + expected_args['port_range_min'] = None + expected_args['security_group_id'] = neutron_grp_dict['id'] + expected_args['tenant_id'] = expected_args['project_id'] + expected_args.pop('project_id') + + expected_new_rule = copy.copy(expected_args) + expected_new_rule['id'] = '1234' + expected_new_rule['project_id'] = expected_new_rule['tenant_id'] + + # This is not sent in body if == None so should not be in the + # JSON; see SecurityGroupRule where it is removed. + expected_args.pop('remote_address_group_id') + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'security-groups'] + ), + json={'security_groups': [neutron_grp_dict]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'security-group-rules'], + ), + json={'security_group_rule': expected_new_rule}, + validate=dict(json={'security_group_rule': expected_args}), + ), + ] + ) + new_rule = self.cloud.create_security_group_rule( + secgroup_name_or_id=neutron_grp_dict['id'], **args + ).to_dict(original_names=True) + # NOTE(slaweq): don't check location and properties in new rule + new_rule.pop('created_at') + new_rule.pop('description') + new_rule.pop('location') + new_rule.pop('name') + new_rule.pop('revision_number') + new_rule.pop('tags') + new_rule.pop('updated_at') + new_rule.pop('if-match') + self.assertEqual(expected_new_rule, new_rule) + self.assert_calls() + + def test_create_security_group_rule_nova(self): + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + + nova_return = [nova_grp_dict] + + new_rule = fakes.make_fake_nova_security_group_rule( + id='xyz', + from_port=1, + to_port=2000, + ip_protocol='tcp', + cidr='1.2.3.4/32', + ) + + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', + json={'security_groups': nova_return}, + ), + dict( + method='POST', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-group-rules', + json={'security_group_rule': new_rule}, + validate=dict( + json={ + "security_group_rule": { + "from_port": 1, + "ip_protocol": "tcp", + "to_port": 2000, + "parent_group_id": "2", + "cidr": "1.2.3.4/32", + "group_id": "123", + } + } + ), + ), + ] + ) + + self.cloud.create_security_group_rule( + '2', + port_range_min=1, + port_range_max=2000, + protocol='tcp', + remote_ip_prefix='1.2.3.4/32', + remote_group_id='123', + ) + + self.assert_calls() + + def test_create_security_group_rule_nova_no_ports(self): + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + + new_rule = fakes.make_fake_nova_security_group_rule( + id='xyz', + from_port=1, + to_port=65535, + ip_protocol='tcp', + cidr='1.2.3.4/32', + ) + + nova_return = [nova_grp_dict] + + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', + json={'security_groups': nova_return}, + ), + dict( + method='POST', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-group-rules', + json={'security_group_rule': new_rule}, + validate=dict( + json={ + "security_group_rule": { + "from_port": 1, + "ip_protocol": "tcp", + "to_port": 65535, + "parent_group_id": "2", + "cidr": "1.2.3.4/32", + "group_id": "123", + } + } + ), + ), + ] + ) + + self.cloud.create_security_group_rule( + '2', + protocol='tcp', + remote_ip_prefix='1.2.3.4/32', + remote_group_id='123', + ) + + self.assert_calls() + + def test_create_security_group_rule_none(self): + self.has_neutron = False + self.cloud.secgroup_source = None + self.assertRaises( + openstack.cloud.OpenStackCloudUnavailableFeature, + self.cloud.create_security_group_rule, + '', + ) + + def test_delete_security_group_rule_neutron(self): + rule_id = "xyz" + self.cloud.secgroup_source = 'neutron' + self.register_uris( + [ + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'security-group-rules', + f'{rule_id}', + ], + ), + json={}, + ) + ] + ) + self.assertTrue(self.cloud.delete_security_group_rule(rule_id)) + self.assert_calls() + + def test_delete_security_group_rule_nova(self): + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + self.register_uris( + [ + dict( + method='DELETE', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-group-rules/xyz', + ), + ] + ) + r = self.cloud.delete_security_group_rule('xyz') + self.assertTrue(r) + self.assert_calls() + + def test_delete_security_group_rule_none(self): + self.has_neutron = False + self.cloud.secgroup_source = None + self.assertRaises( + openstack.cloud.OpenStackCloudUnavailableFeature, + self.cloud.delete_security_group_rule, + '', + ) + + def test_delete_security_group_rule_not_found(self): + rule_id = "doesNotExist" + self.cloud.secgroup_source = 'neutron' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'security-groups'] + ), + json={'security_groups': [neutron_grp_dict]}, + ) + ] + ) + self.assertFalse(self.cloud.delete_security_group(rule_id)) + self.assert_calls() + + def test_delete_security_group_rule_not_found_nova(self): + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', + json={'security_groups': [nova_grp_dict]}, + ), + ] + ) + r = self.cloud.delete_security_group('doesNotExist') + self.assertFalse(r) + + self.assert_calls() + + def test_nova_egress_security_group_rule(self): + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', + json={'security_groups': [nova_grp_dict]}, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.create_security_group_rule, + secgroup_name_or_id='nova-sec-group', + direction='egress', + ) + + self.assert_calls() + + def test_list_server_security_groups_nova(self): + self.has_neutron = False + + server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', server['id']] + ), + json=server, + ), + dict( + method='GET', + uri='{endpoint}/servers/{id}/os-security-groups'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id=server['id'] + ), + json={'security_groups': [nova_grp_dict]}, + ), + ] + ) + groups = self.cloud.list_server_security_groups(server) + self.assertEqual( + groups[0]['rules'][0]['ip_range']['cidr'], + nova_grp_dict['rules'][0]['ip_range']['cidr'], + ) + + self.assert_calls() + + def test_list_server_security_groups_bad_source(self): + self.has_neutron = False + self.cloud.secgroup_source = 'invalid' + server = dict(id='server_id') + ret = self.cloud.list_server_security_groups(server) + self.assertEqual([], ret) + + def test_add_security_group_to_server_nova(self): + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', + json={'security_groups': [nova_grp_dict]}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri='{}/servers/{}/action'.format( + fakes.COMPUTE_ENDPOINT, '1234' + ), + validate=dict( + json={'addSecurityGroup': {'name': 'nova-sec-group'}} + ), + status_code=202, + ), + ] + ) + + ret = self.cloud.add_server_security_groups( + dict(id='1234'), 'nova-sec-group' + ) + + self.assertTrue(ret) + + self.assert_calls() + + def test_add_security_group_to_server_neutron(self): + # fake to get server by name, server-name must match + fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') + + # use neutron for secgroup list and return an existing fake + self.cloud.secgroup_source = 'neutron' + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'server-name'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=server-name'], + ), + json={'servers': [fake_server]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'security-groups'] + ), + json={'security_groups': [neutron_grp_dict]}, + ), + dict( + method='POST', + uri='{}/servers/{}/action'.format( + fakes.COMPUTE_ENDPOINT, '1234' + ), + validate=dict( + json={ + 'addSecurityGroup': {'name': 'neutron-sec-group'} + } + ), + status_code=202, + ), + ] + ) + + self.assertTrue( + self.cloud.add_server_security_groups( + 'server-name', 'neutron-sec-group' + ) + ) + self.assert_calls() + + def test_remove_security_group_from_server_nova(self): + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', + json={'security_groups': [nova_grp_dict]}, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri='{}/servers/{}/action'.format( + fakes.COMPUTE_ENDPOINT, '1234' + ), + validate=dict( + json={ + 'removeSecurityGroup': {'name': 'nova-sec-group'} + } + ), + ), + ] + ) + + ret = self.cloud.remove_server_security_groups( + dict(id='1234'), 'nova-sec-group' + ) + self.assertTrue(ret) + + self.assert_calls() + + def test_remove_security_group_from_server_neutron(self): + # fake to get server by name, server-name must match + fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') + + # use neutron for secgroup list and return an existing fake + self.cloud.secgroup_source = 'neutron' + + validate = {'removeSecurityGroup': {'name': 'neutron-sec-group'}} + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'server-name'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=server-name'], + ), + json={'servers': [fake_server]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'security-groups'] + ), + json={'security_groups': [neutron_grp_dict]}, + ), + dict( + method='POST', + uri='{}/servers/{}/action'.format( + fakes.COMPUTE_ENDPOINT, '1234' + ), + validate=dict(json=validate), + ), + ] + ) + + self.assertTrue( + self.cloud.remove_server_security_groups( + 'server-name', 'neutron-sec-group' + ) + ) + self.assert_calls() + + def test_add_bad_security_group_to_server_nova(self): + # fake to get server by name, server-name must match + fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') + + # use nova for secgroup list and return an existing fake + self.has_neutron = False + self.cloud.secgroup_source = 'nova' + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'server-name'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=server-name'], + ), + json={'servers': [fake_server]}, + ), + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/os-security-groups', + json={'security_groups': [nova_grp_dict]}, + ), + ] + ) + + ret = self.cloud.add_server_security_groups( + 'server-name', 'unknown-sec-group' + ) + self.assertFalse(ret) + + self.assert_calls() + + def test_add_bad_security_group_to_server_neutron(self): + # fake to get server by name, server-name must match + fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') + + # use neutron for secgroup list and return an existing fake + self.cloud.secgroup_source = 'neutron' + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'server-name'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=server-name'], + ), + json={'servers': [fake_server]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'security-groups'] + ), + json={'security_groups': [neutron_grp_dict]}, + ), + ] + ) + self.assertFalse( + self.cloud.add_server_security_groups( + 'server-name', 'unknown-sec-group' + ) + ) + self.assert_calls() + + def test_add_security_group_to_bad_server(self): + # fake to get server by name, server-name must match + fake_server = fakes.make_fake_server('1234', 'server-name', 'ACTIVE') + + print( + self.get_mock_url( + 'compute', + 'public', + append=['servers', 'unknown-server-name'], + base_url_append='v2.1', + ) + ) + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'unknown-server-name'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=unknown-server-name'], + ), + json={'servers': [fake_server]}, + ), + ] + ) + + ret = self.cloud.add_server_security_groups( + 'unknown-server-name', 'nova-sec-group' + ) + self.assertFalse(ret) + + self.assert_calls() + + def test_get_security_group_by_id_neutron(self): + self.cloud.secgroup_source = 'neutron' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'security-groups', + neutron_grp_dict['id'], + ], + ), + json={'security_group': neutron_grp_dict}, + ) + ] + ) + ret_sg = self.cloud.get_security_group_by_id(neutron_grp_dict['id']) + self.assertEqual(neutron_grp_dict['id'], ret_sg['id']) + self.assertEqual(neutron_grp_dict['name'], ret_sg['name']) + self.assertEqual( + neutron_grp_dict['description'], ret_sg['description'] + ) + self.assertEqual(neutron_grp_dict['stateful'], ret_sg['stateful']) + self.assert_calls() + + def test_get_security_group_by_id_nova(self): + self.register_uris( + [ + dict( + method='GET', + uri='{endpoint}/os-security-groups/{id}'.format( + endpoint=fakes.COMPUTE_ENDPOINT, id=nova_grp_dict['id'] + ), + json={'security_group': nova_grp_dict}, + ), + ] + ) + self.cloud.secgroup_source = 'nova' + self.has_neutron = False + ret_sg = self.cloud.get_security_group_by_id(nova_grp_dict['id']) + self.assertEqual(nova_grp_dict['id'], ret_sg['id']) + self.assertEqual(nova_grp_dict['name'], ret_sg['name']) + self.assert_calls() + + def test_normalize_secgroups(self): + nova_secgroup = dict( + id='abc123', + name='nova_secgroup', + description='A Nova security group', + rules=[ + dict( + id='123', + from_port=80, + to_port=81, + ip_protocol='tcp', + ip_range={'cidr': '0.0.0.0/0'}, + parent_group_id='xyz123', + ) + ], + ) + expected = dict( + id='abc123', + name='nova_secgroup', + description='A Nova security group', + project_id='', + tenant_id='', + properties={}, + location=dict( + region_name='RegionOne', + zone=None, + project=dict( + domain_name='default', + id='1c36b64c840a42cd9e9b931a369337f0', + domain_id=None, + name='admin', + ), + cloud='_test_cloud_', + ), + security_group_rules=[ + dict( + id='123', + direction='ingress', + ethertype='IPv4', + port_range_min=80, + port_range_max=81, + protocol='tcp', + remote_ip_prefix='0.0.0.0/0', + security_group_id='xyz123', + project_id='', + tenant_id='', + properties={}, + remote_group_id=None, + location=dict( + region_name='RegionOne', + zone=None, + project=dict( + domain_name='default', + id='1c36b64c840a42cd9e9b931a369337f0', + domain_id=None, + name='admin', + ), + cloud='_test_cloud_', + ), + ) + ], + ) + # Set secgroup source to nova for this test as stateful parameter + # is only valid for neutron security groups. + self.cloud.secgroup_source = 'nova' + retval = self.cloud._normalize_secgroup(nova_secgroup) + self.cloud.secgroup_source = 'neutron' + self.assertEqual(expected, retval) + + def test_normalize_secgroups_negone_port(self): + nova_secgroup = dict( + id='abc123', + name='nova_secgroup', + description='A Nova security group with -1 ports', + rules=[ + dict( + id='123', + from_port=-1, + to_port=-1, + ip_protocol='icmp', + ip_range={'cidr': '0.0.0.0/0'}, + parent_group_id='xyz123', + ) + ], + ) + retval = self.cloud._normalize_secgroup(nova_secgroup) + self.assertIsNone(retval['security_group_rules'][0]['port_range_min']) + self.assertIsNone(retval['security_group_rules'][0]['port_range_max']) + + def test_normalize_secgroup_rules(self): + nova_rules = [ + dict( + id='123', + from_port=80, + to_port=81, + ip_protocol='tcp', + ip_range={'cidr': '0.0.0.0/0'}, + parent_group_id='xyz123', + ) + ] + expected = [ + dict( + id='123', + direction='ingress', + ethertype='IPv4', + port_range_min=80, + port_range_max=81, + protocol='tcp', + remote_ip_prefix='0.0.0.0/0', + security_group_id='xyz123', + tenant_id='', + project_id='', + remote_group_id=None, + properties={}, + location=dict( + region_name='RegionOne', + zone=None, + project=dict( + domain_name='default', + id='1c36b64c840a42cd9e9b931a369337f0', + domain_id=None, + name='admin', + ), + cloud='_test_cloud_', + ), + ) + ] + retval = self.cloud._normalize_secgroup_rules(nova_rules) + self.assertEqual(expected, retval) diff --git a/openstack/tests/unit/cloud/test_server_console.py b/openstack/tests/unit/cloud/test_server_console.py new file mode 100644 index 0000000000..679b0ed404 --- /dev/null +++ b/openstack/tests/unit/cloud/test_server_console.py @@ -0,0 +1,87 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestServerConsole(base.TestCase): + def setUp(self): + super().setUp() + + self.server_id = str(uuid.uuid4()) + self.server_name = self.getUniqueString('name') + self.server = fakes.make_fake_server( + server_id=self.server_id, name=self.server_name + ) + self.output = self.getUniqueString('output') + + def test_get_server_console_dict(self): + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action', + json={"output": self.output}, + validate=dict(json={'os-getConsoleOutput': {'length': 5}}), + ), + ] + ) + + self.assertEqual( + self.output, self.cloud.get_server_console(self.server, 5) + ) + self.assert_calls() + + def test_get_server_console_name_or_id(self): + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}', + json={'server': self.server}, + ), + dict( + method='POST', + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action', + json={"output": self.output}, + validate=dict(json={'os-getConsoleOutput': {}}), + ), + ] + ) + + self.assertEqual( + self.output, self.cloud.get_server_console(self.server['id']) + ) + + self.assert_calls() + + def test_get_server_console_no_console(self): + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=f'{fakes.COMPUTE_ENDPOINT}/servers/{self.server_id}/action', + status_code=400, + validate=dict(json={'os-getConsoleOutput': {}}), + ), + ] + ) + + self.assertEqual('', self.cloud.get_server_console(self.server)) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_server_delete_metadata.py b/openstack/tests/unit/cloud/test_server_delete_metadata.py new file mode 100644 index 0000000000..b2d581f32c --- /dev/null +++ b/openstack/tests/unit/cloud/test_server_delete_metadata.py @@ -0,0 +1,106 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_server_delete_metadata +---------------------------------- + +Tests for the `delete_server_metadata` command. +""" + +import uuid + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestServerDeleteMetadata(base.TestCase): + def setUp(self): + super().setUp() + self.server_id = str(uuid.uuid4()) + self.server_name = self.getUniqueString('name') + self.fake_server = fakes.make_fake_server( + self.server_id, self.server_name + ) + + def test_server_delete_metadata_with_exception(self): + """ + Test that a missing metadata throws an exception. + """ + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id] + ), + json={'server': self.fake_server}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', + 'public', + append=[ + 'servers', + self.fake_server['id'], + 'metadata', + 'key', + ], + ), + status_code=404, + ), + ] + ) + + self.assertRaises( + exceptions.NotFoundException, + self.cloud.delete_server_metadata, + self.server_id, + ['key'], + ) + + self.assert_calls() + + def test_server_delete_metadata(self): + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id] + ), + json={'server': self.fake_server}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', + 'public', + append=[ + 'servers', + self.fake_server['id'], + 'metadata', + 'key', + ], + ), + status_code=200, + ), + ] + ) + + self.cloud.delete_server_metadata(self.server_id, ['key']) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_server_group.py b/openstack/tests/unit/cloud/test_server_group.py new file mode 100644 index 0000000000..bf37d30d6c --- /dev/null +++ b/openstack/tests/unit/cloud/test_server_group.py @@ -0,0 +1,93 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import uuid + +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestServerGroup(base.TestCase): + def setUp(self): + super().setUp() + self.group_id = uuid.uuid4().hex + self.group_name = self.getUniqueString('server-group') + self.policies = ['affinity'] + self.fake_group = fakes.make_fake_server_group( + self.group_id, self.group_name, self.policies + ) + + def test_create_server_group(self): + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', 'public', append=['os-server-groups'] + ), + json={'server_group': self.fake_group}, + validate=dict( + json={ + 'server_group': { + 'name': self.group_name, + 'policies': self.policies, + } + } + ), + ), + ] + ) + + self.cloud.create_server_group( + name=self.group_name, policies=self.policies + ) + + self.assert_calls() + + def test_delete_server_group(self): + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-server-groups', self.group_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-server-groups'], + ), + json={'server_groups': [self.fake_group]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-server-groups', self.group_id], + ), + json={'server_groups': [self.fake_group]}, + ), + ] + ) + self.assertTrue(self.cloud.delete_server_group(self.group_name)) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_server_set_metadata.py b/openstack/tests/unit/cloud/test_server_set_metadata.py new file mode 100644 index 0000000000..522c1270fb --- /dev/null +++ b/openstack/tests/unit/cloud/test_server_set_metadata.py @@ -0,0 +1,110 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_server_set_metadata +---------------------------------- + +Tests for the `set_server_metadata` command. +""" + +import uuid + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestServerSetMetadata(base.TestCase): + def setUp(self): + super().setUp() + self.server_id = str(uuid.uuid4()) + self.server_name = self.getUniqueString('name') + self.fake_server = fakes.make_fake_server( + self.server_id, self.server_name + ) + + def test_server_set_metadata_with_exception(self): + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', self.server_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=[f'name={self.server_name}'], + ), + json={'servers': [self.fake_server]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', self.fake_server['id'], 'metadata'], + ), + validate=dict(json={'metadata': {'meta': 'data'}}), + json={}, + status_code=400, + ), + ] + ) + + self.assertRaises( + exceptions.BadRequestException, + self.cloud.set_server_metadata, + self.server_name, + {'meta': 'data'}, + ) + + self.assert_calls() + + def test_server_set_metadata(self): + metadata = {'meta': 'data'} + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id] + ), + json={'server': self.fake_server}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', self.fake_server['id'], 'metadata'], + ), + validate=dict(json={'metadata': metadata}), + status_code=200, + json={'metadata': metadata}, + ), + ] + ) + + self.cloud.set_server_metadata(self.server_id, metadata) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_services.py b/openstack/tests/unit/cloud/test_services.py new file mode 100644 index 0000000000..58a3ea4e74 --- /dev/null +++ b/openstack/tests/unit/cloud/test_services.py @@ -0,0 +1,380 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +""" +test_cloud_services +---------------------------------- + +Tests Keystone services commands. +""" + +import warnings + +from testtools import matchers + +from openstack import exceptions +from openstack.tests.unit import base + + +class CloudServices(base.TestCase): + def setUp(self, cloud_config_fixture='clouds.yaml'): + super().setUp(cloud_config_fixture) + + def get_mock_url( + self, + service_type='identity', + interface='public', + resource='services', + append=None, + base_url_append='v3', + qs_elements=None, + ): + return super().get_mock_url( + service_type, + interface, + resource, + append, + base_url_append, + qs_elements, + ) + + def test_create_service_v3(self): + service_data = self._get_service_data( + name='a service', type='network', description='A test service' + ) + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url(), + status_code=200, + json=service_data.json_response_v3, + validate=dict(json={'service': service_data.json_request}), + ) + ] + ) + + service = self.cloud.create_service( + name=service_data.service_name, + service_type=service_data.service_type, + description=service_data.description, + ) + self.assertThat( + service.name, matchers.Equals(service_data.service_name) + ) + self.assertThat(service.id, matchers.Equals(service_data.service_id)) + self.assertThat( + service.description, matchers.Equals(service_data.description) + ) + self.assertThat( + service.type, matchers.Equals(service_data.service_type) + ) + self.assert_calls() + + def test_update_service_v3(self): + service_data = self._get_service_data( + name='a service', type='network', description='A test service' + ) + request = service_data.json_request.copy() + request['enabled'] = False + resp = service_data.json_response_v3.copy() + resp['enabled'] = False + request.pop('description') + request.pop('name') + request.pop('type') + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[service_data.service_id]), + status_code=200, + json=service_data.json_request, + ), + dict( + method='PATCH', + uri=self.get_mock_url(append=[service_data.service_id]), + status_code=200, + json=resp, + validate=dict(json={'service': request}), + ), + ] + ) + + service = self.cloud.update_service( + service_data.service_id, enabled=False + ) + self.assertThat( + service.name, matchers.Equals(service_data.service_name) + ) + self.assertThat(service.id, matchers.Equals(service_data.service_id)) + self.assertThat( + service.description, matchers.Equals(service_data.description) + ) + self.assertThat( + service.type, matchers.Equals(service_data.service_type) + ) + self.assert_calls() + + def test_list_services(self): + service_data = self._get_service_data() + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={ + 'services': [service_data.json_response_v3['service']] + }, + ) + ] + ) + services = self.cloud.list_services() + self.assertThat(len(services), matchers.Equals(1)) + self.assertThat( + services[0].id, matchers.Equals(service_data.service_id) + ) + self.assertThat( + services[0].name, matchers.Equals(service_data.service_name) + ) + self.assertThat( + services[0].type, matchers.Equals(service_data.service_type) + ) + self.assert_calls() + + def test_get_service(self): + service_data = self._get_service_data() + service2_data = self._get_service_data() + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(append=[service_data.service_id]), + status_code=200, + json=service_data.json_response_v3, + ), + # you can't retrieve by name + dict( + method='GET', + uri=self.get_mock_url(append=[service_data.service_name]), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={ + 'services': [ + service_data.json_response_v3['service'], + service2_data.json_response_v3['service'], + ] + }, + ), + # you can't retrieve by name, especially if it doesn't exist + dict( + method='GET', + uri=self.get_mock_url(append=['INVALID SERVICE']), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + qs_elements=['name=INVALID SERVICE'] + ), + json={'services': []}, + ), + ] + ) + + # Search by id + service = self.cloud.get_service(name_or_id=service_data.service_id) + self.assertThat(service.id, matchers.Equals(service_data.service_id)) + + # Search by name + service = self.cloud.get_service(name_or_id=service_data.service_name) + # test we are getting exactly 1 element + self.assertThat(service.id, matchers.Equals(service_data.service_id)) + + # Not found + service = self.cloud.get_service(name_or_id='INVALID SERVICE') + self.assertIs(None, service) + + def test_get_service__multiple_matches(self): + service_a_data = self._get_service_data(type='type2') + service_b_data = self._get_service_data(type='type2') + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={ + 'services': [ + service_a_data.json_response_v3['service'], + service_b_data.json_response_v3['service'], + ] + }, + ), + ] + ) + + # Multiple matches + # test we are getting an Exception + with warnings.catch_warnings(record=True): + self.assertRaises( + exceptions.SDKException, + self.cloud.get_service, + name_or_id=None, + filters={'type': 'type2'}, + ) + self.assert_calls() + + def test_search_services(self): + service_data = self._get_service_data() + service2_data = self._get_service_data(type=service_data.service_type) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={ + 'services': [ + service_data.json_response_v3['service'], + service2_data.json_response_v3['service'], + ] + }, + ), + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={ + 'services': [ + service_data.json_response_v3['service'], + service2_data.json_response_v3['service'], + ] + }, + ), + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={ + 'services': [ + service_data.json_response_v3['service'], + service2_data.json_response_v3['service'], + ] + }, + ), + dict( + method='GET', + uri=self.get_mock_url(), + status_code=200, + json={ + 'services': [ + service_data.json_response_v3['service'], + service2_data.json_response_v3['service'], + ] + }, + ), + ] + ) + + # Search by id + services = self.cloud.search_services( + name_or_id=service_data.service_id + ) + # test we are getting exactly 1 element + self.assertThat(len(services), matchers.Equals(1)) + self.assertThat( + services[0].id, matchers.Equals(service_data.service_id) + ) + + # Search by name + services = self.cloud.search_services( + name_or_id=service_data.service_name + ) + # test we are getting exactly 1 element + self.assertThat(len(services), matchers.Equals(1)) + self.assertThat( + services[0].name, matchers.Equals(service_data.service_name) + ) + + # Not found + services = self.cloud.search_services(name_or_id='!INVALID!') + self.assertThat(len(services), matchers.Equals(0)) + + # Multiple matches + services = self.cloud.search_services( + filters={'type': service_data.service_type} + ) + # test we are getting exactly 2 elements + self.assertThat(len(services), matchers.Equals(2)) + self.assertThat( + services[0].id, matchers.Equals(service_data.service_id) + ) + self.assertThat( + services[1].id, matchers.Equals(service2_data.service_id) + ) + self.assert_calls() + + def test_delete_service(self): + service_data = self._get_service_data() + self.register_uris( + [ + # you can't retrieve by name + dict( + method='GET', + uri=self.get_mock_url(append=[service_data.service_name]), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + qs_elements=[f'name={service_data.service_name}'] + ), + status_code=200, + json={ + 'services': [service_data.json_response_v3['service']] + }, + ), + dict( + method='DELETE', + uri=self.get_mock_url(append=[service_data.service_id]), + status_code=204, + ), + dict( + method='GET', + uri=self.get_mock_url(append=[service_data.service_id]), + status_code=200, + json=service_data.json_response_v3, + ), + dict( + method='DELETE', + uri=self.get_mock_url(append=[service_data.service_id]), + status_code=204, + ), + ] + ) + + # Delete by name + self.cloud.delete_service(name_or_id=service_data.service_name) + + # Delete by id + self.cloud.delete_service(service_data.service_id) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_shared_file_system.py b/openstack/tests/unit/cloud/test_shared_file_system.py new file mode 100644 index 0000000000..f87fc31329 --- /dev/null +++ b/openstack/tests/unit/cloud/test_shared_file_system.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.tests.unit import base + + +IDENTIFIER = str(uuid.uuid4()) +MANILA_AZ_DICT = { + "id": IDENTIFIER, + "name": "manila-zone-0", + "created_at": "2021-01-21T20:13:55.000000", + "updated_at": None, +} + + +class TestSharedFileSystem(base.TestCase): + def setUp(self): + super().setUp() + self.use_manila() + + def test_list_availability_zones(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'shared-file-system', + 'public', + append=['v2', 'availability-zones'], + ), + json={'availability_zones': [MANILA_AZ_DICT]}, + ), + ] + ) + az_list = self.cloud.list_share_availability_zones() + self.assertEqual(len(az_list), 1) + self.assertEqual(MANILA_AZ_DICT['id'], az_list[0].id) + self.assertEqual(MANILA_AZ_DICT['name'], az_list[0].name) + self.assertEqual(MANILA_AZ_DICT['created_at'], az_list[0].created_at) + self.assertEqual(MANILA_AZ_DICT['updated_at'], az_list[0].updated_at) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_stack.py b/openstack/tests/unit/cloud/test_stack.py new file mode 100644 index 0000000000..28bb63961b --- /dev/null +++ b/openstack/tests/unit/cloud/test_stack.py @@ -0,0 +1,708 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import tempfile + +import testtools + +from openstack import exceptions +from openstack.orchestration.v1 import stack +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestStack(base.TestCase): + def setUp(self): + super().setUp() + self.stack_id = self.getUniqueString('id') + self.stack_name = self.getUniqueString('name') + self.stack_tag = self.getUniqueString('tag') + self.stack = fakes.make_fake_stack(self.stack_id, self.stack_name) + + def _compare_stacks(self, exp, real): + self.assertDictEqual( + stack.Stack(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def test_list_stacks(self): + fake_stacks = [ + self.stack, + fakes.make_fake_stack( + self.getUniqueString('id'), self.getUniqueString('name') + ), + ] + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', + json={"stacks": fake_stacks}, + ), + ] + ) + stacks = self.cloud.list_stacks() + [self._compare_stacks(b, a) for a, b in zip(stacks, fake_stacks)] + + self.assert_calls() + + def test_list_stacks_filters(self): + fake_stacks = [ + self.stack, + fakes.make_fake_stack( + self.getUniqueString('id'), self.getUniqueString('name') + ), + ] + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'orchestration', + 'public', + append=['stacks'], + qs_elements=['name=a', 'status=b'], + ), + json={"stacks": fake_stacks}, + ), + ] + ) + stacks = self.cloud.list_stacks(name='a', status='b') + [self._compare_stacks(b, a) for a, b in zip(stacks, fake_stacks)] + + self.assert_calls() + + def test_list_stacks_exception(self): + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', + status_code=404, + ) + ] + ) + with testtools.ExpectedException(exceptions.NotFoundException): + self.cloud.list_stacks() + self.assert_calls() + + def test_search_stacks(self): + fake_stacks = [ + self.stack, + fakes.make_fake_stack( + self.getUniqueString('id'), self.getUniqueString('name') + ), + ] + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', + json={"stacks": fake_stacks}, + ), + ] + ) + stacks = self.cloud.search_stacks() + [self._compare_stacks(b, a) for a, b in zip(stacks, fake_stacks)] + self.assert_calls() + + def test_search_stacks_filters(self): + fake_stacks = [ + self.stack, + fakes.make_fake_stack( + self.getUniqueString('id'), + self.getUniqueString('name'), + status='CREATE_FAILED', + ), + ] + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', + json={"stacks": fake_stacks}, + ), + ] + ) + filters = {'status': 'FAILED'} + stacks = self.cloud.search_stacks(filters=filters) + [self._compare_stacks(b, a) for a, b in zip(stacks, fake_stacks)] + self.assert_calls() + + def test_search_stacks_exception(self): + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', + status_code=404, + ) + ] + ) + with testtools.ExpectedException(exceptions.NotFoundException): + self.cloud.search_stacks() + + def test_delete_stack(self): + resolve = 'resolve_outputs=False' + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}?{resolve}', + status_code=302, + headers=dict( + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', + json={"stack": self.stack}, + ), + dict( + method='DELETE', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}', + ), + ] + ) + self.assertTrue(self.cloud.delete_stack(self.stack_name)) + self.assert_calls() + + def test_delete_stack_not_found(self): + resolve = 'resolve_outputs=False' + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/stack_name?{resolve}', + status_code=404, + ), + ] + ) + self.assertFalse(self.cloud.delete_stack('stack_name')) + self.assert_calls() + + def test_delete_stack_exception(self): + resolve = 'resolve_outputs=False' + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?{resolve}', + status_code=302, + headers=dict( + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', + json={"stack": self.stack}, + ), + dict( + method='DELETE', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}', + status_code=400, + reason="ouch", + ), + ] + ) + with testtools.ExpectedException(exceptions.BadRequestException): + self.cloud.delete_stack(self.stack_id) + self.assert_calls() + + def test_delete_stack_by_name_wait(self): + marker_event = fakes.make_fake_stack_event( + self.stack_id, + self.stack_name, + status='CREATE_COMPLETE', + resource_name='name', + ) + marker_qs = 'marker={e_id}&sort_dir=asc'.format( + e_id=marker_event['id'] + ) + resolve = 'resolve_outputs=False' + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}?{resolve}', + status_code=302, + headers=dict( + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', + json={"stack": self.stack}, + ), + dict( + method='GET', + uri='{endpoint}/stacks/{name}/events?{qs}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name, + qs='limit=1&sort_dir=desc', + ), + complete_qs=True, + json={"events": [marker_event]}, + ), + dict( + method='DELETE', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}', + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/events?{marker_qs}', + complete_qs=True, + json={ + "events": [ + fakes.make_fake_stack_event( + self.stack_id, + self.stack_name, + status='DELETE_COMPLETE', + resource_name='name', + ), + ] + }, + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}?{resolve}', + status_code=404, + ), + ] + ) + + self.assertTrue(self.cloud.delete_stack(self.stack_name, wait=True)) + self.assert_calls() + + def test_delete_stack_by_id_wait(self): + marker_event = fakes.make_fake_stack_event( + self.stack_id, + self.stack_name, + status='CREATE_COMPLETE', + resource_name='name', + ) + marker_qs = 'marker={e_id}&sort_dir=asc'.format( + e_id=marker_event['id'] + ) + resolve = 'resolve_outputs=False' + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?{resolve}', + status_code=302, + headers=dict( + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', + json={"stack": self.stack}, + ), + dict( + method='GET', + uri='{endpoint}/stacks/{id}/events?{qs}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, + qs='limit=1&sort_dir=desc', + ), + complete_qs=True, + json={"events": [marker_event]}, + ), + dict( + method='DELETE', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}', + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}/events?{marker_qs}', + complete_qs=True, + json={ + "events": [ + fakes.make_fake_stack_event( + self.stack_id, + self.stack_name, + status='DELETE_COMPLETE', + ), + ] + }, + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?{resolve}', + status_code=404, + ), + ] + ) + + self.assertTrue(self.cloud.delete_stack(self.stack_id, wait=True)) + self.assert_calls() + + def test_delete_stack_wait_failed(self): + failed_stack = self.stack.copy() + failed_stack['stack_status'] = 'DELETE_FAILED' + marker_event = fakes.make_fake_stack_event( + self.stack_id, self.stack_name, status='CREATE_COMPLETE' + ) + marker_qs = 'marker={e_id}&sort_dir=asc'.format( + e_id=marker_event['id'] + ) + resolve = 'resolve_outputs=False' + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?{resolve}', + status_code=302, + headers=dict( + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', + json={"stack": self.stack}, + ), + dict( + method='GET', + uri='{endpoint}/stacks/{id}/events?{qs}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + id=self.stack_id, + qs='limit=1&sort_dir=desc', + ), + complete_qs=True, + json={"events": [marker_event]}, + ), + dict( + method='DELETE', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}', + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}/events?{marker_qs}', + complete_qs=True, + json={ + "events": [ + fakes.make_fake_stack_event( + self.stack_id, + self.stack_name, + status='DELETE_COMPLETE', + ), + ] + }, + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_id}?resolve_outputs=False', + status_code=302, + headers=dict( + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}?{resolve}', + json={"stack": failed_stack}, + ), + ] + ) + + with testtools.ExpectedException(exceptions.SDKException): + self.cloud.delete_stack(self.stack_id, wait=True) + + self.assert_calls() + + def test_create_stack(self): + test_template = tempfile.NamedTemporaryFile(delete=False) + test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) + test_template.close() + self.register_uris( + [ + dict( + method='POST', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', + json={"stack": self.stack}, + validate=dict( + json={ + 'disable_rollback': False, + 'parameters': {}, + 'stack_name': self.stack_name, + 'tags': self.stack_tag, + 'template': fakes.FAKE_TEMPLATE_CONTENT, + 'timeout_mins': 60, + } + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', + status_code=302, + headers=dict( + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}' + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}', + json={"stack": self.stack}, + ), + ] + ) + + self.cloud.create_stack( + self.stack_name, + tags=self.stack_tag, + template_file=test_template.name, + ) + + self.assert_calls() + + def test_create_stack_wait(self): + test_template = tempfile.NamedTemporaryFile(delete=False) + test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) + test_template.close() + + self.register_uris( + [ + dict( + method='POST', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks', + json={"stack": self.stack}, + validate=dict( + json={ + 'disable_rollback': False, + 'parameters': {}, + 'stack_name': self.stack_name, + 'tags': self.stack_tag, + 'template': fakes.FAKE_TEMPLATE_CONTENT, + 'timeout_mins': 60, + } + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/events?sort_dir=asc', + json={ + "events": [ + fakes.make_fake_stack_event( + self.stack_id, + self.stack_name, + status='CREATE_COMPLETE', + resource_name='name', + ), + ] + }, + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', + status_code=302, + headers=dict( + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}' + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}', + json={"stack": self.stack}, + ), + ] + ) + self.cloud.create_stack( + self.stack_name, + tags=self.stack_tag, + template_file=test_template.name, + wait=True, + ) + + self.assert_calls() + + def test_update_stack(self): + test_template = tempfile.NamedTemporaryFile(delete=False) + test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) + test_template.close() + + self.register_uris( + [ + dict( + method='PUT', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', + validate=dict( + json={ + 'disable_rollback': False, + 'parameters': {}, + 'tags': self.stack_tag, + 'template': fakes.FAKE_TEMPLATE_CONTENT, + 'timeout_mins': 60, + } + ), + json={}, + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', + status_code=302, + headers=dict( + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}' + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}', + json={"stack": self.stack}, + ), + ] + ) + self.cloud.update_stack( + self.stack_name, + tags=self.stack_tag, + template_file=test_template.name, + ) + + self.assert_calls() + + def test_update_stack_wait(self): + marker_event = fakes.make_fake_stack_event( + self.stack_id, + self.stack_name, + status='CREATE_COMPLETE', + resource_name='name', + ) + marker_qs = 'marker={e_id}&sort_dir=asc'.format( + e_id=marker_event['id'] + ) + test_template = tempfile.NamedTemporaryFile(delete=False) + test_template.write(fakes.FAKE_TEMPLATE.encode('utf-8')) + test_template.close() + + self.register_uris( + [ + dict( + method='GET', + uri='{endpoint}/stacks/{name}/events?{qs}'.format( + endpoint=fakes.ORCHESTRATION_ENDPOINT, + name=self.stack_name, + qs='limit=1&sort_dir=desc', + ), + json={"events": [marker_event]}, + ), + dict( + method='PUT', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', + validate=dict( + json={ + 'disable_rollback': False, + 'parameters': {}, + 'tags': self.stack_tag, + 'template': fakes.FAKE_TEMPLATE_CONTENT, + 'timeout_mins': 60, + } + ), + json={}, + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/events?{marker_qs}', + json={ + "events": [ + fakes.make_fake_stack_event( + self.stack_id, + self.stack_name, + status='UPDATE_COMPLETE', + resource_name='name', + ), + ] + }, + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', + status_code=302, + headers=dict( + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}' + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}', + json={"stack": self.stack}, + ), + ] + ) + self.cloud.update_stack( + self.stack_name, + tags=self.stack_tag, + template_file=test_template.name, + wait=True, + ) + + self.assert_calls() + + def test_get_stack(self): + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', + status_code=302, + headers=dict( + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}' + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}', + json={"stack": self.stack}, + ), + ] + ) + + res = self.cloud.get_stack(self.stack_name) + self.assertIsNotNone(res) + self.assertEqual(self.stack['stack_name'], res['name']) + self.assertEqual(self.stack['stack_status'], res['stack_status']) + self.assertEqual('CREATE_COMPLETE', res['status']) + + self.assert_calls() + + def test_get_stack_in_progress(self): + in_progress = self.stack.copy() + in_progress['stack_status'] = 'CREATE_IN_PROGRESS' + self.register_uris( + [ + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}', + status_code=302, + headers=dict( + location=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}' + ), + ), + dict( + method='GET', + uri=f'{fakes.ORCHESTRATION_ENDPOINT}/stacks/{self.stack_name}/{self.stack_id}', + json={"stack": in_progress}, + ), + ] + ) + + res = self.cloud.get_stack(self.stack_name) + self.assertIsNotNone(res) + self.assertEqual(in_progress['stack_name'], res.name) + self.assertEqual(in_progress['stack_status'], res['stack_status']) + self.assertEqual('CREATE_IN_PROGRESS', res['status']) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_subnet.py b/openstack/tests/unit/cloud/test_subnet.py new file mode 100644 index 0000000000..11bf505790 --- /dev/null +++ b/openstack/tests/unit/cloud/test_subnet.py @@ -0,0 +1,867 @@ +# Copyright 2017 OVH SAS +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import copy + +import testtools + +from openstack import exceptions +from openstack.network.v2 import subnet as _subnet +from openstack.tests.unit import base + + +class TestSubnet(base.TestCase): + network_name = 'network_name' + subnet_name = 'subnet_name' + subnet_id = '1f1696eb-7f47-47f6-835c-4889bff88604' + subnet_cidr = '192.168.199.0/24' + subnetpool_cidr = '172.16.0.0/28' + prefix_length = 28 + + mock_network_rep = { + 'id': '881d1bb7-a663-44c0-8f9f-ee2765b74486', + 'name': network_name, + } + + mock_subnet_rep = { + 'allocation_pools': [ + {'start': '192.168.199.2', 'end': '192.168.199.254'} + ], + 'cidr': subnet_cidr, + 'created_at': '2017-04-24T20:22:23Z', + 'description': '', + 'dns_nameservers': [], + 'enable_dhcp': False, + 'gateway_ip': '192.168.199.1', + 'host_routes': [], + 'id': subnet_id, + 'ip_version': 4, + 'ipv6_address_mode': None, + 'ipv6_ra_mode': None, + 'name': subnet_name, + 'network_id': mock_network_rep['id'], + 'project_id': '861808a93da0484ea1767967c4df8a23', + 'revision_number': 2, + 'service_types': [], + 'subnetpool_id': None, + 'tags': [], + } + + mock_subnetpool_rep = { + 'id': 'f49a1319-423a-4ee6-ba54-1d95a4f6cc68', + 'prefixes': ['172.16.0.0/16'], + } + + def _compare_subnets(self, exp, real): + self.assertDictEqual( + _subnet.Subnet(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def test_get_subnet(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', self.subnet_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets'], + qs_elements=[f'name={self.subnet_name}'], + ), + json={'subnets': [self.mock_subnet_rep]}, + ), + ] + ) + r = self.cloud.get_subnet(self.subnet_name) + self.assertIsNotNone(r) + self._compare_subnets(self.mock_subnet_rep, r) + self.assert_calls() + + def test_get_subnet_by_id(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', self.subnet_id], + ), + json={'subnet': self.mock_subnet_rep}, + ) + ] + ) + r = self.cloud.get_subnet_by_id(self.subnet_id) + self.assertIsNotNone(r) + self._compare_subnets(self.mock_subnet_rep, r) + self.assert_calls() + + def test_create_subnet(self): + pool = [{'start': '192.168.199.2', 'end': '192.168.199.254'}] + dns = ['8.8.8.8'] + routes = [{"destination": "0.0.0.0/0", "nexthop": "123.456.78.9"}] + mock_subnet_rep = copy.copy(self.mock_subnet_rep) + mock_subnet_rep['allocation_pools'] = pool + mock_subnet_rep['dns_nameservers'] = dns + mock_subnet_rep['host_routes'] = routes + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', self.network_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=[f'name={self.network_name}'], + ), + json={'networks': [self.mock_network_rep]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnet': mock_subnet_rep}, + validate=dict( + json={ + 'subnet': { + 'cidr': self.subnet_cidr, + 'enable_dhcp': False, + 'ip_version': 4, + 'network_id': self.mock_network_rep['id'], + 'allocation_pools': pool, + 'dns_nameservers': dns, + 'host_routes': routes, + } + } + ), + ), + ] + ) + subnet = self.cloud.create_subnet( + self.network_name, + self.subnet_cidr, + allocation_pools=pool, + dns_nameservers=dns, + host_routes=routes, + ) + self._compare_subnets(mock_subnet_rep, subnet) + self.assert_calls() + + def test_create_subnet_string_ip_version(self): + '''Allow ip_version as a string''' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', self.network_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=[f'name={self.network_name}'], + ), + json={'networks': [self.mock_network_rep]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnet': self.mock_subnet_rep}, + validate=dict( + json={ + 'subnet': { + 'cidr': self.subnet_cidr, + 'enable_dhcp': False, + 'ip_version': 4, + 'network_id': self.mock_network_rep['id'], + } + } + ), + ), + ] + ) + subnet = self.cloud.create_subnet( + self.network_name, self.subnet_cidr, ip_version='4' + ) + self._compare_subnets(self.mock_subnet_rep, subnet) + self.assert_calls() + + def test_create_subnet_bad_ip_version(self): + '''String ip_versions must be convertable to int''' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', self.network_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=[f'name={self.network_name}'], + ), + json={'networks': [self.mock_network_rep]}, + ), + ] + ) + with testtools.ExpectedException( + exceptions.SDKException, "ip_version must be an integer" + ): + self.cloud.create_subnet( + self.network_name, self.subnet_cidr, ip_version='4x' + ) + self.assert_calls() + + def test_create_subnet_without_gateway_ip(self): + pool = [{'start': '192.168.199.2', 'end': '192.168.199.254'}] + dns = ['8.8.8.8'] + mock_subnet_rep = copy.copy(self.mock_subnet_rep) + mock_subnet_rep['allocation_pools'] = pool + mock_subnet_rep['dns_nameservers'] = dns + mock_subnet_rep['gateway_ip'] = None + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', self.network_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=[f'name={self.network_name}'], + ), + json={'networks': [self.mock_network_rep]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnet': mock_subnet_rep}, + validate=dict( + json={ + 'subnet': { + 'cidr': self.subnet_cidr, + 'enable_dhcp': False, + 'ip_version': 4, + 'network_id': self.mock_network_rep['id'], + 'allocation_pools': pool, + 'gateway_ip': None, + 'dns_nameservers': dns, + } + } + ), + ), + ] + ) + subnet = self.cloud.create_subnet( + self.network_name, + self.subnet_cidr, + allocation_pools=pool, + dns_nameservers=dns, + disable_gateway_ip=True, + ) + self._compare_subnets(mock_subnet_rep, subnet) + self.assert_calls() + + def test_create_subnet_with_gateway_ip(self): + pool = [{'start': '192.168.199.8', 'end': '192.168.199.254'}] + gateway = '192.168.199.2' + dns = ['8.8.8.8'] + mock_subnet_rep = copy.copy(self.mock_subnet_rep) + mock_subnet_rep['allocation_pools'] = pool + mock_subnet_rep['dns_nameservers'] = dns + mock_subnet_rep['gateway_ip'] = gateway + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', self.network_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=[f'name={self.network_name}'], + ), + json={'networks': [self.mock_network_rep]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnet': mock_subnet_rep}, + validate=dict( + json={ + 'subnet': { + 'cidr': self.subnet_cidr, + 'enable_dhcp': False, + 'ip_version': 4, + 'network_id': self.mock_network_rep['id'], + 'allocation_pools': pool, + 'gateway_ip': gateway, + 'dns_nameservers': dns, + } + } + ), + ), + ] + ) + subnet = self.cloud.create_subnet( + self.network_name, + self.subnet_cidr, + allocation_pools=pool, + dns_nameservers=dns, + gateway_ip=gateway, + ) + self._compare_subnets(mock_subnet_rep, subnet) + self.assert_calls() + + def test_create_subnet_conflict_gw_ops(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', 'kooky'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=['name=kooky'], + ), + json={'networks': [self.mock_network_rep]}, + ), + ] + ) + gateway = '192.168.200.3' + self.assertRaises( + exceptions.SDKException, + self.cloud.create_subnet, + 'kooky', + self.subnet_cidr, + gateway_ip=gateway, + disable_gateway_ip=True, + ) + self.assert_calls() + + def test_create_subnet_bad_network(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', 'duck'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=['name=duck'], + ), + json={'networks': [self.mock_network_rep]}, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.create_subnet, + 'duck', + self.subnet_cidr, + ) + self.assert_calls() + + def test_create_subnet_non_unique_network(self): + net1 = dict(id='123', name=self.network_name) + net2 = dict(id='456', name=self.network_name) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', self.network_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=[f'name={self.network_name}'], + ), + json={'networks': [net1, net2]}, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.create_subnet, + self.network_name, + self.subnet_cidr, + ) + self.assert_calls() + + def test_create_subnet_from_subnetpool_with_prefixlen(self): + pool = [{'start': '172.16.0.2', 'end': '172.16.0.15'}] + id = '143296eb-7f47-4755-835c-488123475604' + gateway = '172.16.0.1' + dns = ['8.8.8.8'] + routes = [{"destination": "0.0.0.0/0", "nexthop": "123.456.78.9"}] + mock_subnet_rep = copy.copy(self.mock_subnet_rep) + mock_subnet_rep['allocation_pools'] = pool + mock_subnet_rep['dns_nameservers'] = dns + mock_subnet_rep['host_routes'] = routes + mock_subnet_rep['gateway_ip'] = gateway + mock_subnet_rep['subnetpool_id'] = self.mock_subnetpool_rep['id'] + mock_subnet_rep['cidr'] = self.subnetpool_cidr + mock_subnet_rep['id'] = id + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', self.network_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=[f'name={self.network_name}'], + ), + json={'networks': [self.mock_network_rep]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnet': mock_subnet_rep}, + validate=dict( + json={ + 'subnet': { + 'enable_dhcp': False, + 'ip_version': 4, + 'network_id': self.mock_network_rep['id'], + 'allocation_pools': pool, + 'dns_nameservers': dns, + 'use_default_subnetpool': True, + 'prefixlen': self.prefix_length, + 'host_routes': routes, + } + } + ), + ), + ] + ) + subnet = self.cloud.create_subnet( + self.network_name, + allocation_pools=pool, + dns_nameservers=dns, + use_default_subnetpool=True, + prefixlen=self.prefix_length, + host_routes=routes, + ) + mock_subnet_rep.update( + {'prefixlen': self.prefix_length, 'use_default_subnetpool': True} + ) + self._compare_subnets(mock_subnet_rep, subnet) + self.assert_calls() + + def test_create_subnet_from_specific_subnetpool(self): + pool = [{'start': '172.16.0.2', 'end': '172.16.0.15'}] + id = '143296eb-7f47-4755-835c-488123475604' + gateway = '172.16.0.1' + dns = ['8.8.8.8'] + routes = [{"destination": "0.0.0.0/0", "nexthop": "123.456.78.9"}] + mock_subnet_rep = copy.copy(self.mock_subnet_rep) + mock_subnet_rep['allocation_pools'] = pool + mock_subnet_rep['dns_nameservers'] = dns + mock_subnet_rep['host_routes'] = routes + mock_subnet_rep['gateway_ip'] = gateway + mock_subnet_rep['subnetpool_id'] = self.mock_subnetpool_rep['id'] + mock_subnet_rep['cidr'] = self.subnetpool_cidr + mock_subnet_rep['id'] = id + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks', self.network_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'networks'], + qs_elements=[f'name={self.network_name}'], + ), + json={'networks': [self.mock_network_rep]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=[ + 'v2.0', + 'subnetpools', + self.mock_subnetpool_rep['id'], + ], + ), + json={"subnetpool": self.mock_subnetpool_rep}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'subnets'] + ), + json={'subnet': mock_subnet_rep}, + validate=dict( + json={ + 'subnet': { + 'enable_dhcp': False, + 'ip_version': 4, + 'network_id': self.mock_network_rep['id'], + 'allocation_pools': pool, + 'dns_nameservers': dns, + 'subnetpool_id': self.mock_subnetpool_rep[ + 'id' + ], + 'prefixlen': self.prefix_length, + 'host_routes': routes, + } + } + ), + ), + ] + ) + subnet = self.cloud.create_subnet( + self.network_name, + allocation_pools=pool, + dns_nameservers=dns, + subnetpool_name_or_id=self.mock_subnetpool_rep['id'], + prefixlen=self.prefix_length, + host_routes=routes, + ) + mock_subnet_rep.update( + {'prefixlen': self.prefix_length, 'use_default_subnetpool': None} + ) + self._compare_subnets(mock_subnet_rep, subnet) + self.assert_calls() + + def test_delete_subnet(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', self.subnet_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets'], + qs_elements=[f'name={self.subnet_name}'], + ), + json={'subnets': [self.mock_subnet_rep]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', self.subnet_id], + ), + json={}, + ), + ] + ) + self.assertTrue(self.cloud.delete_subnet(self.subnet_name)) + self.assert_calls() + + def test_delete_subnet_not_found(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', 'goofy'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets'], + qs_elements=['name=goofy'], + ), + json={'subnets': []}, + ), + ] + ) + self.assertFalse(self.cloud.delete_subnet('goofy')) + self.assert_calls() + + def test_delete_subnet_multiple_found(self): + subnet1 = dict(id='123', name=self.subnet_name) + subnet2 = dict(id='456', name=self.subnet_name) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', self.subnet_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets'], + qs_elements=[f'name={self.subnet_name}'], + ), + json={'subnets': [subnet1, subnet2]}, + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.delete_subnet, + self.subnet_name, + ) + self.assert_calls() + + def test_delete_subnet_using_id(self): + subnet1 = dict(id='123', name=self.subnet_name) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', subnet1['id']], + ), + json=subnet1, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', subnet1['id']], + ), + json={}, + ), + ] + ) + self.assertTrue(self.cloud.delete_subnet(subnet1['id'])) + self.assert_calls() + + def test_update_subnet(self): + expected_subnet = copy.copy(self.mock_subnet_rep) + expected_subnet['name'] = 'goofy' + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', self.subnet_id], + ), + json=self.mock_subnet_rep, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', self.subnet_id], + ), + json={'subnet': expected_subnet}, + validate=dict(json={'subnet': {'name': 'goofy'}}), + ), + ] + ) + subnet = self.cloud.update_subnet(self.subnet_id, subnet_name='goofy') + self._compare_subnets(expected_subnet, subnet) + self.assert_calls() + + def test_update_subnet_gateway_ip(self): + expected_subnet = copy.copy(self.mock_subnet_rep) + gateway = '192.168.199.3' + expected_subnet['gateway_ip'] = gateway + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', self.subnet_id], + ), + json=self.mock_subnet_rep, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', self.subnet_id], + ), + json={'subnet': expected_subnet}, + validate=dict(json={'subnet': {'gateway_ip': gateway}}), + ), + ] + ) + subnet = self.cloud.update_subnet(self.subnet_id, gateway_ip=gateway) + self._compare_subnets(expected_subnet, subnet) + self.assert_calls() + + def test_update_subnet_disable_gateway_ip(self): + expected_subnet = copy.copy(self.mock_subnet_rep) + expected_subnet['gateway_ip'] = None + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', self.subnet_id], + ), + json=self.mock_subnet_rep, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'network', + 'public', + append=['v2.0', 'subnets', self.subnet_id], + ), + json={'subnet': expected_subnet}, + validate=dict(json={'subnet': {'gateway_ip': None}}), + ), + ] + ) + subnet = self.cloud.update_subnet( + self.subnet_id, disable_gateway_ip=True + ) + self._compare_subnets(expected_subnet, subnet) + self.assert_calls() + + def test_update_subnet_conflict_gw_ops(self): + self.assertRaises( + exceptions.SDKException, + self.cloud.update_subnet, + self.subnet_id, + gateway_ip="192.168.199.3", + disable_gateway_ip=True, + ) diff --git a/openstack/tests/unit/cloud/test_update_server.py b/openstack/tests/unit/cloud/test_update_server.py new file mode 100644 index 0000000000..214a3e4e47 --- /dev/null +++ b/openstack/tests/unit/cloud/test_update_server.py @@ -0,0 +1,141 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +""" +test_update_server +---------------------------------- + +Tests for the `update_server` command. +""" + +import uuid + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestUpdateServer(base.TestCase): + def setUp(self): + super().setUp() + self.server_id = str(uuid.uuid4()) + self.server_name = self.getUniqueString('name') + self.updated_server_name = self.getUniqueString('name2') + self.fake_server = fakes.make_fake_server( + self.server_id, self.server_name + ) + + def test_update_server_with_update_exception(self): + """ + Test that an exception in the update raises an exception in + update_server. + """ + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', self.server_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=[f'name={self.server_name}'], + ), + json={'servers': [self.fake_server]}, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id] + ), + status_code=400, + validate=dict( + json={'server': {'name': self.updated_server_name}} + ), + ), + ] + ) + self.assertRaises( + exceptions.SDKException, + self.cloud.update_server, + self.server_name, + name=self.updated_server_name, + ) + + self.assert_calls() + + def test_update_server_name(self): + """ + Test that update_server updates the name without raising any exception + """ + fake_update_server = fakes.make_fake_server( + self.server_id, self.updated_server_name + ) + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', self.server_name], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=[f'name={self.server_name}'], + ), + json={'servers': [self.fake_server]}, + ), + dict( + method='PUT', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', self.server_id] + ), + json={'server': fake_update_server}, + validate=dict( + json={'server': {'name': self.updated_server_name}} + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'network', 'public', append=['v2.0', 'networks'] + ), + json={'networks': []}, + ), + ] + ) + self.assertEqual( + self.updated_server_name, + self.cloud.update_server( + self.server_name, name=self.updated_server_name + )['name'], + ) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_usage.py b/openstack/tests/unit/cloud/test_usage.py new file mode 100644 index 0000000000..910682bf93 --- /dev/null +++ b/openstack/tests/unit/cloud/test_usage.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import datetime +import uuid + +from openstack.tests.unit import base + + +class TestUsage(base.TestCase): + def test_get_usage(self): + project = self.mock_for_keystone_projects( + project_count=1, id_get=True + )[0] + start = end = datetime.datetime.now() + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['os-simple-tenant-usage', project.project_id], + qs_elements=[ + f'start={start.isoformat()}', + f'end={end.isoformat()}', + ], + ), + json={ + "tenant_usage": { + "server_usages": [ + { + "ended_at": None, + "flavor": "m1.tiny", + "hours": 1.0, + "instance_id": uuid.uuid4().hex, + "local_gb": 1, + "memory_mb": 512, + "name": "instance-2", + "started_at": "2012-10-08T20:10:44.541277", + "state": "active", + "tenant_id": "6f70656e737461636b20342065766572", # noqa: E501 + "uptime": 3600, + "vcpus": 1, + } + ], + "start": "2012-10-08T20:10:44.587336", + "stop": "2012-10-08T21:10:44.587336", + "tenant_id": "6f70656e737461636b20342065766572", + "total_hours": 1.0, + "total_local_gb_usage": 1.0, + "total_memory_mb_usage": 512.0, + "total_vcpus_usage": 1.0, + } + }, + ), + ] + ) + + self.cloud.get_compute_usage(project.project_id, start, end) + + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_users.py b/openstack/tests/unit/cloud/test_users.py new file mode 100644 index 0000000000..cbb0e6dedd --- /dev/null +++ b/openstack/tests/unit/cloud/test_users.py @@ -0,0 +1,269 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +import testtools + +from openstack import exceptions +from openstack.tests.unit import base + + +class TestUsers(base.TestCase): + def _get_keystone_mock_url( + self, resource, append=None, v3=True, qs_elements=None + ): + base_url_append = None + if v3: + base_url_append = 'v3' + return self.get_mock_url( + service_type='identity', + resource=resource, + append=append, + base_url_append=base_url_append, + qs_elements=qs_elements, + ) + + def _get_user_list(self, user_data): + uri = self._get_keystone_mock_url(resource='users') + return { + 'users': [ + user_data.json_response['user'], + ], + 'links': { + 'self': uri, + 'previous': None, + 'next': None, + }, + } + + def test_create_user_v3(self): + user_data = self._get_user_data( + domain_id=uuid.uuid4().hex, + description=self.getUniqueString('description'), + ) + + self.register_uris( + [ + dict( + method='POST', + uri=self._get_keystone_mock_url(resource='users'), + status_code=200, + json=user_data.json_response, + validate=dict(json=user_data.json_request), + ), + ] + ) + + user = self.cloud.create_user( + name=user_data.name, + email=user_data.email, + password=user_data.password, + description=user_data.description, + domain_id=user_data.domain_id, + ) + + self.assertEqual(user_data.name, user.name) + self.assertEqual(user_data.email, user.email) + self.assertEqual(user_data.description, user.description) + self.assertEqual(user_data.user_id, user.id) + self.assert_calls() + + def test_create_user_v3_no_domain(self): + user_data = self._get_user_data( + domain_id=uuid.uuid4().hex, email='test@example.com' + ) + with testtools.ExpectedException( + exceptions.SDKException, + "User or project creation requires an explicit " + "domain_id argument.", + ): + self.cloud.create_user( + name=user_data.name, + email=user_data.email, + password=user_data.password, + ) + + def test_delete_user(self): + user_data = self._get_user_data(domain_id=uuid.uuid4().hex) + user_resource_uri = self._get_keystone_mock_url( + resource='users', append=[user_data.user_id] + ) + + self.register_uris( + [ + # you can't lookup by name, so return 404 for that attempt + dict( + method='GET', + uri=self._get_keystone_mock_url( + resource='users', append=[user_data.name] + ), + status_code=404, + ), + dict( + method='GET', + uri=self._get_keystone_mock_url( + resource='users', + qs_elements=[f'name={user_data.name}'], + ), + status_code=200, + json=self._get_user_list(user_data), + ), + dict(method='DELETE', uri=user_resource_uri, status_code=204), + ] + ) + + self.cloud.delete_user(user_data.name) + self.assert_calls() + + def test_delete_user_not_found(self): + nonexistent_user_id = self.getUniqueString() + self.register_uris( + [ + dict( + method='GET', + uri=self._get_keystone_mock_url( + resource='users', append=[nonexistent_user_id] + ), + status_code=404, + ), + dict( + method='GET', + uri=self._get_keystone_mock_url(resource='users'), + status_code=200, + json={'users': []}, + ), + ] + ) + self.assertFalse(self.cloud.delete_user(nonexistent_user_id)) + + def test_add_user_to_group(self): + user_data = self._get_user_data() + group_data = self._get_group_data() + + self.register_uris( + [ + dict( + method='GET', + uri=self._get_keystone_mock_url( + resource='users', append=[user_data.user_id] + ), + status_code=200, + json=user_data.json_response, + ), + dict( + method='GET', + uri=self._get_keystone_mock_url( + resource='groups', append=[group_data.group_id] + ), + status_code=200, + json=group_data.json_response, + ), + dict( + method='PUT', + uri=self._get_keystone_mock_url( + resource='groups', + append=[ + group_data.group_id, + 'users', + user_data.user_id, + ], + ), + status_code=200, + ), + ] + ) + self.cloud.add_user_to_group(user_data.user_id, group_data.group_id) + self.assert_calls() + + def test_is_user_in_group(self): + user_data = self._get_user_data() + group_data = self._get_group_data() + + self.register_uris( + [ + dict( + method='GET', + uri=self._get_keystone_mock_url( + resource='users', append=[user_data.user_id] + ), + status_code=200, + json=user_data.json_response, + ), + dict( + method='GET', + uri=self._get_keystone_mock_url( + resource='groups', append=[group_data.group_id] + ), + status_code=200, + json=group_data.json_response, + ), + dict( + method='HEAD', + uri=self._get_keystone_mock_url( + resource='groups', + append=[ + group_data.group_id, + 'users', + user_data.user_id, + ], + ), + status_code=204, + ), + ] + ) + + self.assertTrue( + self.cloud.is_user_in_group(user_data.user_id, group_data.group_id) + ) + self.assert_calls() + + def test_remove_user_from_group(self): + user_data = self._get_user_data() + group_data = self._get_group_data() + + self.register_uris( + [ + dict( + method='GET', + uri=self._get_keystone_mock_url( + resource='users', append=[user_data.user_id] + ), + json=user_data.json_response, + ), + dict( + method='GET', + uri=self._get_keystone_mock_url( + resource='groups', append=[group_data.group_id] + ), + status_code=200, + json=group_data.json_response, + ), + dict( + method='DELETE', + uri=self._get_keystone_mock_url( + resource='groups', + append=[ + group_data.group_id, + 'users', + user_data.user_id, + ], + ), + status_code=204, + ), + ] + ) + + self.cloud.remove_user_from_group( + user_data.user_id, group_data.group_id + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_volume.py b/openstack/tests/unit/cloud/test_volume.py new file mode 100644 index 0000000000..6a418a25e5 --- /dev/null +++ b/openstack/tests/unit/cloud/test_volume.py @@ -0,0 +1,702 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +import testtools + +from openstack.block_storage.v3 import volume +from openstack.cloud import meta +from openstack.compute.v2 import volume_attachment +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestVolume(base.TestCase): + def _compare_volumes(self, exp, real): + self.assertDictEqual( + volume.Volume(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def _compare_volume_attachments(self, exp, real): + self.assertDictEqual( + volume_attachment.VolumeAttachment(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def test_attach_volume(self): + server = dict(id='server001') + vol = { + 'id': 'volume001', + 'status': 'available', + 'name': '', + 'attachments': [], + } + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + rattach = { + 'server_id': server['id'], + 'device': 'device001', + 'volumeId': volume['id'], + 'id': 'attachmentId', + } + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=[ + 'servers', + server['id'], + 'os-volume_attachments', + ], + ), + json={'volumeAttachment': rattach}, + validate=dict( + json={'volumeAttachment': {'volumeId': vol['id']}} + ), + ), + ] + ) + ret = self.cloud.attach_volume(server, volume, wait=False) + self._compare_volume_attachments(rattach, ret) + self.assert_calls() + + def test_attach_volume_exception(self): + server = dict(id='server001') + vol = { + 'id': 'volume001', + 'status': 'available', + 'name': '', + 'attachments': [], + } + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=[ + 'servers', + server['id'], + 'os-volume_attachments', + ], + ), + status_code=404, + validate=dict( + json={'volumeAttachment': {'volumeId': vol['id']}} + ), + ), + ] + ) + with testtools.ExpectedException( + exceptions.NotFoundException, + ): + self.cloud.attach_volume(server, volume, wait=False) + self.assert_calls() + + def test_attach_volume_wait(self): + server = dict(id='server001') + vol = { + 'id': 'volume001', + 'status': 'available', + 'name': '', + 'attachments': [], + } + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + vol['attachments'] = [ + {'server_id': server['id'], 'device': 'device001'} + ] + vol['status'] = 'in-use' + attached_volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + rattach = { + 'server_id': server['id'], + 'device': 'device001', + 'volumeId': volume['id'], + 'id': 'attachmentId', + } + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=[ + 'servers', + server['id'], + 'os-volume_attachments', + ], + ), + json={'volumeAttachment': rattach}, + validate=dict( + json={'volumeAttachment': {'volumeId': vol['id']}} + ), + ), + self.get_cinder_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', vol['id']] + ), + json={'volume': volume}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', vol['id']] + ), + json={'volume': attached_volume}, + ), + ] + ) + # defaults to wait=True + ret = self.cloud.attach_volume(server, volume) + self._compare_volume_attachments(rattach, ret) + self.assert_calls() + + def test_attach_volume_wait_error(self): + server = dict(id='server001') + vol = { + 'id': 'volume001', + 'status': 'available', + 'name': '', + 'attachments': [], + } + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + vol['status'] = 'error' + errored_volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + rattach = { + 'server_id': server['id'], + 'device': 'device001', + 'volumeId': volume['id'], + 'id': 'attachmentId', + } + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'compute', + 'public', + append=[ + 'servers', + server['id'], + 'os-volume_attachments', + ], + ), + json={'volumeAttachment': rattach}, + validate=dict( + json={'volumeAttachment': {'volumeId': vol['id']}} + ), + ), + self.get_cinder_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume['id']] + ), + json={'volume': errored_volume}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume['id']] + ), + json={'volume': errored_volume}, + ), + ] + ) + + with testtools.ExpectedException(exceptions.ResourceFailure): + self.cloud.attach_volume(server, volume) + self.assert_calls() + + def test_attach_volume_not_available(self): + server = dict(id='server001') + volume = dict(id='volume001', status='error', attachments=[]) + + with testtools.ExpectedException( + exceptions.SDKException, + "Volume {} is not available. Status is '{}'".format( + volume['id'], volume['status'] + ), + ): + self.cloud.attach_volume(server, volume) + self.assertEqual(0, len(self.adapter.request_history)) + + def test_attach_volume_already_attached(self): + device_id = 'device001' + server = dict(id='server001') + volume = dict( + id='volume001', + attachments=[{'server_id': 'server001', 'device': device_id}], + ) + + with testtools.ExpectedException( + exceptions.SDKException, + "Volume {} already attached to server {} on device {}".format( + volume['id'], server['id'], device_id + ), + ): + self.cloud.attach_volume(server, volume) + self.assertEqual(0, len(self.adapter.request_history)) + + def test_detach_volume(self): + server = dict(id='server001') + volume = dict( + id='volume001', + attachments=[{'server_id': 'server001', 'device': 'device001'}], + ) + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', server['id']] + ), + json={'server': server}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', + 'public', + append=[ + 'servers', + server['id'], + 'os-volume_attachments', + volume['id'], + ], + ), + ), + ] + ) + self.cloud.detach_volume(server, volume, wait=False) + self.assert_calls() + + def test_detach_volume_exception(self): + server = dict(id='server001') + volume = dict( + id='volume001', + attachments=[{'server_id': 'server001', 'device': 'device001'}], + ) + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', server['id']] + ), + json={'server': server}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', + 'public', + append=[ + 'servers', + server['id'], + 'os-volume_attachments', + volume['id'], + ], + ), + status_code=404, + ), + ] + ) + with testtools.ExpectedException( + exceptions.NotFoundException, + ): + self.cloud.detach_volume(server, volume, wait=False) + self.assert_calls() + + def test_detach_volume_wait(self): + server = dict(id='server001') + attachments = [{'server_id': 'server001', 'device': 'device001'}] + vol = { + 'id': 'volume001', + 'status': 'attached', + 'name': '', + 'attachments': attachments, + } + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + vol['status'] = 'available' + vol['attachments'] = [] + avail_volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', server['id']] + ), + json={'server': server}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', + 'public', + append=[ + 'servers', + server['id'], + 'os-volume_attachments', + volume.id, + ], + ), + ), + self.get_cinder_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume.id] + ), + json={'volume': avail_volume}, + ), + ] + ) + self.cloud.detach_volume(server, volume) + self.assert_calls() + + def test_detach_volume_wait_error(self): + server = dict(id='server001') + attachments = [{'server_id': 'server001', 'device': 'device001'}] + vol = { + 'id': 'volume001', + 'status': 'attached', + 'name': '', + 'attachments': attachments, + } + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + vol['status'] = 'error' + vol['attachments'] = [] + errored_volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', server['id']] + ), + json={'server': server}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'compute', + 'public', + append=[ + 'servers', + server['id'], + 'os-volume_attachments', + volume.id, + ], + ), + ), + self.get_cinder_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume.id] + ), + json={'volume': errored_volume}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['volumes', errored_volume['id']], + ), + json={'volume': errored_volume}, + ), + ] + ) + with testtools.ExpectedException(exceptions.ResourceFailure): + self.cloud.detach_volume(server, volume) + self.assert_calls() + + def test_delete_volume_deletes(self): + vol = { + 'id': 'volume001', + 'status': 'attached', + 'name': '', + 'attachments': [], + } + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris( + [ + self.get_cinder_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume.id] + ), + json={'volumes': [volume]}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['volumes', volume.id], + qs_elements=['cascade=False'], + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume.id] + ), + status_code=404, + ), + ] + ) + self.assertTrue(self.cloud.delete_volume(volume['id'])) + self.assert_calls() + + def test_delete_volume_gone_away(self): + vol = { + 'id': 'volume001', + 'status': 'attached', + 'name': '', + 'attachments': [], + } + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris( + [ + self.get_cinder_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume.id] + ), + json=volume, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['volumes', volume.id], + qs_elements=['cascade=False'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume.id] + ), + status_code=404, + ), + ] + ) + self.assertTrue(self.cloud.delete_volume(volume['id'])) + self.assert_calls() + + def test_delete_volume_force(self): + vol = { + 'id': 'volume001', + 'status': 'attached', + 'name': '', + 'attachments': [], + } + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris( + [ + self.get_cinder_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume['id']] + ), + json={'volumes': [volume]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['volumes', volume.id, 'action'], + ), + validate=dict(json={'os-force_delete': None}), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume['id']] + ), + status_code=404, + ), + ] + ) + self.assertTrue(self.cloud.delete_volume(volume['id'], force=True)) + self.assert_calls() + + def test_set_volume_bootable(self): + vol = { + 'id': 'volume001', + 'status': 'attached', + 'name': '', + 'attachments': [], + } + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris( + [ + self.get_cinder_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume.id] + ), + json={'volume': volume}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['volumes', volume.id, 'action'], + ), + json={'os-set_bootable': {'bootable': True}}, + ), + ] + ) + self.cloud.set_volume_bootable(volume['id']) + self.assert_calls() + + def test_set_volume_bootable_false(self): + vol = { + 'id': 'volume001', + 'status': 'attached', + 'name': '', + 'attachments': [], + } + volume = meta.obj_to_munch(fakes.FakeVolume(**vol)) + self.register_uris( + [ + self.get_cinder_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', volume.id] + ), + json={'volume': volume}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['volumes', volume.id, 'action'], + ), + json={'os-set_bootable': {'bootable': False}}, + ), + ] + ) + self.cloud.set_volume_bootable(volume['id']) + self.assert_calls() + + def test_get_volume_by_id(self): + vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1')) + self.register_uris( + [ + self.get_cinder_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes', '01'] + ), + json={'volume': vol1}, + ), + ] + ) + self._compare_volumes(vol1, self.cloud.get_volume_by_id('01')) + self.assert_calls() + + def test_create_volume(self): + vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1')) + self.register_uris( + [ + self.get_cinder_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes'] + ), + json={'volume': vol1}, + validate=dict( + json={ + 'volume': { + 'size': 50, + 'name': 'vol1', + } + } + ), + ), + ] + ) + + self.cloud.create_volume(50, name='vol1') + self.assert_calls() + + def test_create_bootable_volume(self): + vol1 = meta.obj_to_munch(fakes.FakeVolume('01', 'available', 'vol1')) + self.register_uris( + [ + self.get_cinder_discovery_mock_dict(), + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', 'public', append=['volumes'] + ), + json={'volume': vol1}, + validate=dict( + json={ + 'volume': { + 'size': 50, + 'name': 'vol1', + } + } + ), + ), + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['volumes', '01', 'action'], + ), + validate=dict( + json={'os-set_bootable': {'bootable': True}} + ), + ), + ] + ) + + self.cloud.create_volume(50, name='vol1', bootable=True) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_volume_access.py b/openstack/tests/unit/cloud/test_volume_access.py new file mode 100644 index 0000000000..d1516c0cf7 --- /dev/null +++ b/openstack/tests/unit/cloud/test_volume_access.py @@ -0,0 +1,383 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from openstack import exceptions +from openstack.tests.unit import base + + +class TestVolumeAccess(base.TestCase): + def setUp(self): + super().setUp() + self.use_cinder() + + def test_list_volume_types(self): + volume_type = dict( + id='voltype01', + description='volume type description', + name='name', + is_public=False, + ) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['types'] + ), + json={'volume_types': [volume_type]}, + ) + ] + ) + self.assertTrue(self.cloud.list_volume_types()) + self.assert_calls() + + def test_get_volume_type(self): + volume_type = dict( + id='voltype01', + description='volume type description', + name='name', + is_public=False, + ) + self.register_uris( + [ + # "find" will attempt to retrieve using the name as an ID + # first, but cinder only supports lookup by ID so we'll see 404 + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['types', volume_type['name']], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['types'] + ), + json={'volume_types': [volume_type]}, + ), + ] + ) + volume_type_got = self.cloud.get_volume_type(volume_type['name']) + self.assertEqual(volume_type_got.id, volume_type['id']) + + def test_get_volume_type_access(self): + volume_type = dict( + id='voltype01', + description='volume type description', + name='name', + is_public=False, + ) + volume_type_access = [ + dict(volume_type_id='voltype01', name='name', project_id='prj01'), + dict(volume_type_id='voltype01', name='name', project_id='prj02'), + ] + self.register_uris( + [ + # "find" will attempt to retrieve using the name as an ID + # first, but cinder only supports lookup by ID so we'll see 404 + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['types', volume_type['name']], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['types'] + ), + json={'volume_types': [volume_type]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=[ + 'types', + volume_type['id'], + 'os-volume-type-access', + ], + ), + json={'volume_type_access': volume_type_access}, + ), + ] + ) + self.assertEqual( + len(self.cloud.get_volume_type_access(volume_type['name'])), 2 + ) + self.assert_calls() + + def test_remove_volume_type_access(self): + volume_type = dict( + id='voltype01', + description='volume type description', + name='name', + is_public=False, + ) + project_001 = dict( + volume_type_id='voltype01', name='name', project_id='prj01' + ) + project_002 = dict( + volume_type_id='voltype01', name='name', project_id='prj02' + ) + volume_type_access = [project_001, project_002] + self.register_uris( + [ + # "find" will attempt to retrieve using the name as an ID + # first, but cinder only supports lookup by ID so we'll see 404 + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['types', volume_type['name']], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['types'] + ), + json={'volume_types': [volume_type]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=[ + 'types', + volume_type['id'], + 'os-volume-type-access', + ], + ), + json={'volume_type_access': volume_type_access}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['types', volume_type['name']], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['types'] + ), + json={'volume_types': [volume_type]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['types', volume_type['id'], 'action'], + ), + json={ + 'removeProjectAccess': { + 'project': project_001['project_id'] + } + }, + validate=dict( + json={ + 'removeProjectAccess': { + 'project': project_001['project_id'] + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['types', volume_type['name']], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['types'] + ), + json={'volume_types': [volume_type]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=[ + 'types', + volume_type['id'], + 'os-volume-type-access', + ], + ), + json={'volume_type_access': [project_001]}, + ), + ] + ) + self.assertEqual( + len(self.cloud.get_volume_type_access(volume_type['name'])), 2 + ) + self.cloud.remove_volume_type_access( + volume_type['name'], project_001['project_id'] + ) + self.assertEqual( + len(self.cloud.get_volume_type_access(volume_type['name'])), 1 + ) + self.assert_calls() + + def test_add_volume_type_access(self): + volume_type = dict( + id='voltype01', + description='volume type description', + name='name', + is_public=False, + ) + project_001 = dict( + volume_type_id='voltype01', name='name', project_id='prj01' + ) + project_002 = dict( + volume_type_id='voltype01', name='name', project_id='prj02' + ) + volume_type_access = [project_001, project_002] + self.register_uris( + [ + # "find" will attempt to retrieve using the name as an ID + # first, but cinder only supports lookup by ID so we'll see 404 + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['types', volume_type['name']], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['types'] + ), + json={'volume_types': [volume_type]}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['types', volume_type['id'], 'action'], + ), + json={ + 'addProjectAccess': { + 'project': project_002['project_id'] + } + }, + validate=dict( + json={ + 'addProjectAccess': { + 'project': project_002['project_id'] + } + } + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['types', volume_type['name']], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['types'] + ), + json={'volume_types': [volume_type]}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=[ + 'types', + volume_type['id'], + 'os-volume-type-access', + ], + ), + json={'volume_type_access': volume_type_access}, + ), + ] + ) + self.cloud.add_volume_type_access( + volume_type['name'], project_002['project_id'] + ) + self.assertEqual( + len(self.cloud.get_volume_type_access(volume_type['name'])), 2 + ) + self.assert_calls() + + def test_add_volume_type_access_missing(self): + volume_type = dict( + id='voltype01', + description='volume type description', + name='name', + is_public=False, + ) + project_001 = dict( + volume_type_id='voltype01', name='name', project_id='prj01' + ) + self.register_uris( + [ + # "find" will attempt to retrieve using the name as an ID + # first, but cinder only supports lookup by ID so we'll see 404 + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['types', 'MISSING'] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['types'] + ), + json={'volume_types': [volume_type]}, + ), + ] + ) + with testtools.ExpectedException( + exceptions.SDKException, + "VolumeType not found: MISSING", + ): + self.cloud.add_volume_type_access( + "MISSING", project_001['project_id'] + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_volume_backups.py b/openstack/tests/unit/cloud/test_volume_backups.py new file mode 100644 index 0000000000..da4da4a677 --- /dev/null +++ b/openstack/tests/unit/cloud/test_volume_backups.py @@ -0,0 +1,316 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack.block_storage.v3 import backup +from openstack.tests.unit import base + + +class TestVolumeBackups(base.TestCase): + def setUp(self): + super().setUp() + self.use_cinder() + + def _compare_backups(self, exp, real): + self.assertDictEqual( + backup.Backup(**exp).to_dict(computed=False), + real.to_dict(computed=False), + ) + + def test_search_volume_backups(self): + name = 'Volume1' + vol1 = {'name': name, 'availability_zone': 'az1'} + vol2 = {'name': name, 'availability_zone': 'az1'} + vol3 = {'name': 'Volume2', 'availability_zone': 'az2'} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups', 'detail'] + ), + json={"backups": [vol1, vol2, vol3]}, + ) + ] + ) + result = self.cloud.search_volume_backups( + name, {'availability_zone': 'az1'} + ) + self.assertEqual(len(result), 2) + for a, b in zip([vol1, vol2], result): + self._compare_backups(a, b) + self.assert_calls() + + def test_get_volume_backup(self): + name = 'Volume1' + backup = {'name': name, 'availability_zone': 'az1'} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups', name] + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['backups', 'detail'], + qs_elements=[f'name={name}'], + ), + json={"backups": [backup]}, + ), + ] + ) + result = self.cloud.get_volume_backup(name) + self._compare_backups(backup, result) + self.assert_calls() + + def test_get_volume_backup_with_filters(self): + name = 'Volume1' + vol1 = {'name': name, 'availability_zone': 'az1'} + vol2 = {'name': name, 'availability_zone': 'az2'} + vol3 = {'name': 'Volume2', 'availability_zone': 'az1'} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups', 'detail'] + ), + json={"backups": [vol1, vol2, vol3]}, + ) + ] + ) + result = self.cloud.get_volume_backup( + name, {'availability_zone': 'az1'} + ) + self._compare_backups(vol1, result) + self.assert_calls() + + def test_list_volume_backups(self): + backup = { + 'id': '6ff16bdf-44d5-4bf9-b0f3-687549c76414', + 'status': 'available', + } + search_opts = {'status': 'available'} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['backups', 'detail'], + qs_elements=['='.join(i) for i in search_opts.items()], + ), + json={"backups": [backup]}, + ) + ] + ) + result = self.cloud.list_volume_backups(True, search_opts) + self.assertEqual(len(result), 1) + + self._compare_backups(backup, result[0]) + self.assert_calls() + + def test_delete_volume_backup_wait(self): + backup_id = '6ff16bdf-44d5-4bf9-b0f3-687549c76414' + backup = {'id': backup_id, 'status': 'available'} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups', backup_id] + ), + json={'backup': backup}, + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups', backup_id] + ), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups', backup_id] + ), + json={"backup": backup}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups', backup_id] + ), + status_code=404, + ), + ] + ) + self.cloud.delete_volume_backup(backup_id, False, True, 1) + self.assert_calls() + + def test_delete_volume_backup_force(self): + backup_id = '6ff16bdf-44d5-4bf9-b0f3-687549c76414' + backup = {'id': backup_id, 'status': 'available'} + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups', backup_id] + ), + json={'backup': backup}, + ), + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', + 'public', + append=['backups', backup_id, 'action'], + ), + json={'os-force_delete': None}, + validate=dict(json={'os-force_delete': None}), + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups', backup_id] + ), + json={"backup": backup}, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups', backup_id] + ), + status_code=404, + ), + ] + ) + self.cloud.delete_volume_backup(backup_id, True, True, 1) + self.assert_calls() + + def test_create_volume_backup(self): + volume_id = '1234' + backup_name = 'bak1' + bak1 = { + 'id': '5678', + 'volume_id': volume_id, + 'status': 'available', + 'name': backup_name, + } + + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups'] + ), + json={'backup': bak1}, + validate=dict( + json={ + 'backup': { + 'name': backup_name, + 'volume_id': volume_id, + 'description': None, + 'force': False, + 'snapshot_id': None, + 'incremental': False, + } + } + ), + ), + ] + ) + self.cloud.create_volume_backup(volume_id, name=backup_name) + self.assert_calls() + + def test_create_incremental_volume_backup(self): + volume_id = '1234' + backup_name = 'bak1' + bak1 = { + 'id': '5678', + 'volume_id': volume_id, + 'status': 'available', + 'name': backup_name, + } + + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups'] + ), + json={'backup': bak1}, + validate=dict( + json={ + 'backup': { + 'name': backup_name, + 'volume_id': volume_id, + 'description': None, + 'force': False, + 'snapshot_id': None, + 'incremental': True, + } + } + ), + ), + ] + ) + self.cloud.create_volume_backup( + volume_id, name=backup_name, incremental=True + ) + self.assert_calls() + + def test_create_volume_backup_from_snapshot(self): + volume_id = '1234' + backup_name = 'bak1' + snapshot_id = '5678' + bak1 = { + 'id': '5678', + 'volume_id': volume_id, + 'status': 'available', + 'name': 'bak1', + } + + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'volumev3', 'public', append=['backups'] + ), + json={'backup': bak1}, + validate=dict( + json={ + 'backup': { + 'name': backup_name, + 'volume_id': volume_id, + 'description': None, + 'force': False, + 'snapshot_id': snapshot_id, + 'incremental': False, + } + } + ), + ), + ] + ) + self.cloud.create_volume_backup( + volume_id, name=backup_name, snapshot_id=snapshot_id + ) + self.assert_calls() diff --git a/openstack/tests/unit/cloud/test_zone.py b/openstack/tests/unit/cloud/test_zone.py new file mode 100644 index 0000000000..d23a3f3ef1 --- /dev/null +++ b/openstack/tests/unit/cloud/test_zone.py @@ -0,0 +1,293 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from openstack import exceptions +from openstack.tests.unit import base + + +zone_dict = { + 'name': 'example.net.', + 'type': 'PRIMARY', + 'email': 'test@example.net', + 'description': 'Example zone', + 'ttl': 3600, + 'id': '1', +} + + +class ZoneTestWrapper: + def __init__(self, ut, attrs): + self.remote_res = attrs + self.ut = ut + + def get_create_response_json(self): + return self.remote_res + + def get_get_response_json(self): + return self.remote_res + + def __getitem__(self, key): + """Dict access to be able to access properties easily""" + return self.remote_res[key] + + def cmp(self, other): + ut = self.ut + me = self.remote_res + + for k, v in me.items(): + # Go over known attributes. We might of course compare others, + # but not necessary here + ut.assertEqual(v, other[k]) + + +class TestZone(base.TestCase): + def setUp(self): + super().setUp() + self.use_designate() + + def test_create_zone(self): + fake_zone = ZoneTestWrapper(self, zone_dict) + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones'] + ), + json=fake_zone.get_create_response_json(), + validate=dict( + json={ + 'description': zone_dict['description'], + 'email': zone_dict['email'], + 'name': zone_dict['name'], + 'ttl': zone_dict['ttl'], + 'type': 'PRIMARY', + } + ), + ) + ] + ) + z = self.cloud.create_zone( + name=zone_dict['name'], + zone_type=zone_dict['type'], + email=zone_dict['email'], + description=zone_dict['description'], + ttl=zone_dict['ttl'], + masters=None, + ) + fake_zone.cmp(z) + self.assert_calls() + + def test_create_zone_exception(self): + self.register_uris( + [ + dict( + method='POST', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones'] + ), + status_code=500, + ) + ] + ) + + self.assertRaises( + exceptions.SDKException, self.cloud.create_zone, 'example.net.' + ) + self.assert_calls() + + def test_update_zone(self): + fake_zone = ZoneTestWrapper(self, zone_dict) + new_ttl = 7200 + updated_zone_dict = copy.copy(zone_dict) + updated_zone_dict['ttl'] = new_ttl + updated_zone = ZoneTestWrapper(self, updated_zone_dict) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id']], + ), + json=fake_zone.get_get_response_json(), + ), + dict( + method='PATCH', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id']], + ), + json=updated_zone.get_get_response_json(), + validate=dict(json={"ttl": new_ttl}), + ), + ] + ) + z = self.cloud.update_zone(fake_zone['id'], ttl=new_ttl) + updated_zone.cmp(z) + self.assert_calls() + + def test_delete_zone(self): + fake_zone = ZoneTestWrapper(self, zone_dict) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id']], + ), + json=fake_zone.get_get_response_json(), + ), + dict( + method='DELETE', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id']], + ), + status_code=202, + ), + ] + ) + self.assertTrue(self.cloud.delete_zone(fake_zone['id'])) + self.assert_calls() + + def test_get_zone_by_id(self): + fake_zone = ZoneTestWrapper(self, zone_dict) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['id']], + ), + json=fake_zone.get_get_response_json(), + ) + ] + ) + res = self.cloud.get_zone(fake_zone['id']) + + fake_zone.cmp(res) + self.assert_calls() + + def test_get_zone_by_name(self): + fake_zone = ZoneTestWrapper(self, zone_dict) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', fake_zone['name']], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones'], + qs_elements=[ + 'name={name}'.format(name=fake_zone['name']) + ], + ), + json={"zones": [fake_zone.get_get_response_json()]}, + ), + ] + ) + res = self.cloud.get_zone(fake_zone['name']) + fake_zone.cmp(res) + self.assert_calls() + + def test_get_zone_not_found_returns_false(self): + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones', 'nonexistingzone.net.'], + ), + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones'], + qs_elements=['name=nonexistingzone.net.'], + ), + json={"zones": []}, + ), + ] + ) + zone = self.cloud.get_zone('nonexistingzone.net.') + self.assertFalse(zone) + self.assert_calls() + + def test_list_zones(self): + fake_zone = ZoneTestWrapper(self, zone_dict) + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'dns', 'public', append=['v2', 'zones'] + ), + json={ + 'zones': [fake_zone.get_get_response_json()], + 'links': { + 'next': self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones/'], + qs_elements=['limit=1', 'marker=asd'], + ), + 'self': self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones/'], + qs_elements=['limit=1'], + ), + }, + 'metadata': {'total_count': 2}, + }, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'dns', + 'public', + append=['v2', 'zones/'], + qs_elements=['limit=1', 'marker=asd'], + ), + json={'zones': [fake_zone.get_get_response_json()]}, + ), + ] + ) + res = self.cloud.list_zones() + + # updated_rs.cmp(res) + self.assertEqual(2, len(res)) + self.assert_calls() diff --git a/openstack/tests/unit/cluster/test_cluster_service.py b/openstack/tests/unit/cluster/test_cluster_service.py deleted file mode 100644 index 0d7532a600..0000000000 --- a/openstack/tests/unit/cluster/test_cluster_service.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.cluster import cluster_service - - -class TestClusterService(testtools.TestCase): - - def test_service(self): - sot = cluster_service.ClusterService() - self.assertEqual('clustering', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(1, len(sot.valid_versions)) - self.assertEqual('v1', sot.valid_versions[0].module) - self.assertEqual('v1', sot.valid_versions[0].path) diff --git a/openstack/tests/unit/cluster/test_version.py b/openstack/tests/unit/cluster/test_version.py deleted file mode 100644 index 0efe92cfcc..0000000000 --- a/openstack/tests/unit/cluster/test_version.py +++ /dev/null @@ -1,43 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.cluster import version - -IDENTIFIER = 'IDENTIFIER' -EXAMPLE = { - 'id': IDENTIFIER, - 'links': '2', - 'status': '3', -} - - -class TestVersion(testtools.TestCase): - - def test_basic(self): - sot = version.Version() - self.assertEqual('version', sot.resource_key) - self.assertEqual('versions', sot.resources_key) - self.assertEqual('/', sot.base_path) - self.assertEqual('clustering', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_make_it(self): - sot = version.Version(EXAMPLE) - self.assertEqual(EXAMPLE['id'], sot.id) - self.assertEqual(EXAMPLE['links'], sot.links) - self.assertEqual(EXAMPLE['status'], sot.status) diff --git a/openstack/tests/unit/cluster/v1/test_cluster.py b/openstack/tests/unit/cluster/v1/test_cluster.py deleted file mode 100644 index 2eb616c42e..0000000000 --- a/openstack/tests/unit/cluster/v1/test_cluster.py +++ /dev/null @@ -1,263 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from openstack.cluster.v1 import cluster - - -FAKE_ID = '092d0955-2645-461a-b8fa-6a44655cdb2c' -FAKE_NAME = 'test_cluster' - -FAKE = { - 'id': 'IDENTIFIER', - 'desired_capacity': 1, - 'max_size': 3, - 'min_size': 0, - 'name': FAKE_NAME, - 'profile_id': 'myserver', - 'metadata': {}, - 'dependents': {}, - 'timeout': None, - 'init_at': '2015-10-10T12:46:36.000000', - 'created_at': '2015-10-10T12:46:36.000000', - 'updated_at': '2016-10-10T12:46:36.000000', -} - -FAKE_CREATE_RESP = { - 'cluster': { - 'action': 'a679c926-908f-49e7-a822-06ca371e64e1', - 'init_at': '2015-10-10T12:46:36.000000', - 'created_at': '2015-10-10T12:46:36.000000', - 'updated_at': '2016-10-10T12:46:36.000000', - 'data': {}, - 'desired_capacity': 1, - 'domain': None, - 'id': FAKE_ID, - 'init_time': None, - 'max_size': 3, - 'metadata': {}, - 'min_size': 0, - 'name': 'test_cluster', - 'nodes': [], - 'policies': [], - 'profile_id': '560a8f9d-7596-4a32-85e8-03645fa7be13', - 'profile_name': 'myserver', - 'project': '333acb15a43242f4a609a27cb097a8f2', - 'status': 'INIT', - 'status_reason': 'Initializing', - 'timeout': None, - 'user': '6d600911ff764e54b309ce734c89595e', - 'dependents': {}, - } -} - - -class TestCluster(testtools.TestCase): - - def setUp(self): - super(TestCluster, self).setUp() - - def test_basic(self): - sot = cluster.Cluster() - self.assertEqual('cluster', sot.resource_key) - self.assertEqual('clusters', sot.resources_key) - self.assertEqual('/clusters', sot.base_path) - self.assertEqual('clustering', sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) - self.assertTrue(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_instantiate(self): - sot = cluster.Cluster(**FAKE) - - self.assertEqual(FAKE['id'], sot.id) - self.assertEqual(FAKE['name'], sot.name) - - self.assertEqual(FAKE['profile_id'], sot.profile_id) - - self.assertEqual(FAKE['min_size'], sot.min_size) - self.assertEqual(FAKE['max_size'], sot.max_size) - self.assertEqual(FAKE['desired_capacity'], sot.desired_capacity) - - self.assertEqual(FAKE['timeout'], sot.timeout) - self.assertEqual(FAKE['metadata'], sot.metadata) - - self.assertEqual(FAKE['init_at'], sot.init_at) - self.assertEqual(FAKE['created_at'], sot.created_at) - self.assertEqual(FAKE['updated_at'], sot.updated_at) - self.assertEqual(FAKE['dependents'], sot.dependents) - - def test_scale_in(self): - sot = cluster.Cluster(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - self.assertEqual('', sot.scale_in(sess, 3)) - url = 'clusters/%s/actions' % sot.id - body = {'scale_in': {'count': 3}} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) - - def test_scale_out(self): - sot = cluster.Cluster(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - self.assertEqual('', sot.scale_out(sess, 3)) - url = 'clusters/%s/actions' % sot.id - body = {'scale_out': {'count': 3}} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) - - def test_resize(self): - sot = cluster.Cluster(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - self.assertEqual('', sot.resize(sess, foo='bar', zoo=5)) - url = 'clusters/%s/actions' % sot.id - body = {'resize': {'foo': 'bar', 'zoo': 5}} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) - - def test_add_nodes(self): - sot = cluster.Cluster(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - self.assertEqual('', sot.add_nodes(sess, ['node-33'])) - url = 'clusters/%s/actions' % sot.id - body = {'add_nodes': {'nodes': ['node-33']}} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) - - def test_del_nodes(self): - sot = cluster.Cluster(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - self.assertEqual('', sot.del_nodes(sess, ['node-11'])) - url = 'clusters/%s/actions' % sot.id - body = {'del_nodes': {'nodes': ['node-11']}} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) - - def test_replace_nodes(self): - sot = cluster.Cluster(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - self.assertEqual('', sot.replace_nodes(sess, {'node-22': 'node-44'})) - url = 'clusters/%s/actions' % sot.id - body = {'replace_nodes': {'nodes': {'node-22': 'node-44'}}} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) - - def test_policy_attach(self): - sot = cluster.Cluster(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - params = { - 'enabled': True, - } - self.assertEqual('', sot.policy_attach(sess, 'POLICY', **params)) - - url = 'clusters/%s/actions' % sot.id - body = { - 'policy_attach': { - 'policy_id': 'POLICY', - 'enabled': True, - } - } - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) - - def test_policy_detach(self): - sot = cluster.Cluster(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - self.assertEqual('', sot.policy_detach(sess, 'POLICY')) - - url = 'clusters/%s/actions' % sot.id - body = {'policy_detach': {'policy_id': 'POLICY'}} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) - - def test_policy_update(self): - sot = cluster.Cluster(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - params = { - 'enabled': False - } - self.assertEqual('', sot.policy_update(sess, 'POLICY', **params)) - - url = 'clusters/%s/actions' % sot.id - body = { - 'policy_update': { - 'policy_id': 'POLICY', - 'enabled': False - } - } - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) - - def test_check(self): - sot = cluster.Cluster(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - self.assertEqual('', sot.check(sess)) - url = 'clusters/%s/actions' % sot.id - body = {'check': {}} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) - - def test_recover(self): - sot = cluster.Cluster(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - self.assertEqual('', sot.recover(sess)) - url = 'clusters/%s/actions' % sot.id - body = {'recover': {}} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) diff --git a/openstack/tests/unit/cluster/v1/test_cluster_policy.py b/openstack/tests/unit/cluster/v1/test_cluster_policy.py deleted file mode 100644 index a4126977c0..0000000000 --- a/openstack/tests/unit/cluster/v1/test_cluster_policy.py +++ /dev/null @@ -1,61 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.cluster.v1 import cluster_policy - - -FAKE = { - 'cluster_id': '99e39f4b-1990-4237-a556-1518f0f0c9e7', - 'cluster_name': 'test_cluster', - 'data': {'purpose': 'unknown'}, - 'enabled': True, - 'policy_id': 'ac5415bd-f522-4160-8be0-f8853e4bc332', - 'policy_name': 'dp01', - 'policy_type': 'senlin.poicy.deletion-1.0', -} - - -class TestClusterPolicy(testtools.TestCase): - - def setUp(self): - super(TestClusterPolicy, self).setUp() - - def test_basic(self): - sot = cluster_policy.ClusterPolicy() - self.assertEqual('cluster_policy', sot.resource_key) - self.assertEqual('cluster_policies', sot.resources_key) - self.assertEqual('/clusters/%(cluster_id)s/policies', - sot.base_path) - self.assertEqual('clustering', sot.service.service_type) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_list) - - self.assertDictEqual({"policy_name": "policy_name", - "policy_type": "policy_type", - "is_enabled": "enabled", - "sort": "sort", - "limit": "limit", - "marker": "marker"}, - sot._query_mapping._mapping) - - def test_instantiate(self): - sot = cluster_policy.ClusterPolicy(**FAKE) - self.assertEqual(FAKE['policy_id'], sot.id) - self.assertEqual(FAKE['cluster_id'], sot.cluster_id) - self.assertEqual(FAKE['cluster_name'], sot.cluster_name) - self.assertEqual(FAKE['data'], sot.data) - self.assertTrue(sot.is_enabled) - self.assertEqual(FAKE['policy_id'], sot.policy_id) - self.assertEqual(FAKE['policy_name'], sot.policy_name) - self.assertEqual(FAKE['policy_type'], sot.policy_type) diff --git a/openstack/tests/unit/cluster/v1/test_node.py b/openstack/tests/unit/cluster/v1/test_node.py deleted file mode 100644 index afc3cdeab1..0000000000 --- a/openstack/tests/unit/cluster/v1/test_node.py +++ /dev/null @@ -1,105 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from openstack.cluster.v1 import node - - -FAKE_ID = '123d0955-0099-aabb-b8fa-6a44655ceeff' -FAKE_NAME = 'test_node' - -FAKE = { - 'id': FAKE_ID, - 'cluster_id': 'clusterA', - 'metadata': {'key1': 'value1'}, - 'name': FAKE_NAME, - 'profile_id': 'myserver', - 'user': '3747afc360b64702a53bdd64dc1b8976', - 'project': '42d9e9663331431f97b75e25136307ff', - 'index': 1, - 'role': 'master', - 'dependents': {}, - 'created_at': '2015-10-10T12:46:36.000000', - 'updated_at': '2016-10-10T12:46:36.000000', - 'init_at': '2015-10-10T12:46:36.000000', -} - - -class TestNode(testtools.TestCase): - - def test_basic(self): - sot = node.Node() - self.assertEqual('node', sot.resource_key) - self.assertEqual('nodes', sot.resources_key) - self.assertEqual('/nodes', sot.base_path) - self.assertEqual('clustering', sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) - self.assertTrue(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_instantiate(self): - sot = node.Node(**FAKE) - self.assertEqual(FAKE['id'], sot.id) - self.assertEqual(FAKE['profile_id'], sot.profile_id) - self.assertEqual(FAKE['cluster_id'], sot.cluster_id) - self.assertEqual(FAKE['user'], sot.user_id) - self.assertEqual(FAKE['project'], sot.project_id) - self.assertEqual(FAKE['name'], sot.name) - self.assertEqual(FAKE['index'], sot.index) - self.assertEqual(FAKE['role'], sot.role) - self.assertEqual(FAKE['metadata'], sot.metadata) - self.assertEqual(FAKE['init_at'], sot.init_at) - self.assertEqual(FAKE['created_at'], sot.created_at) - self.assertEqual(FAKE['updated_at'], sot.updated_at) - self.assertEqual(FAKE['dependents'], sot.dependents) - - def test_check(self): - sot = node.Node(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - self.assertEqual('', sot.check(sess)) - url = 'nodes/%s/actions' % sot.id - body = {'check': {}} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) - - def test_recover(self): - sot = node.Node(**FAKE) - - resp = mock.Mock() - resp.json = mock.Mock(return_value='') - sess = mock.Mock() - sess.post = mock.Mock(return_value=resp) - self.assertEqual('', sot.recover(sess)) - url = 'nodes/%s/actions' % sot.id - body = {'recover': {}} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - json=body) - - -class TestNodeDetail(testtools.TestCase): - - def test_basic(self): - sot = node.NodeDetail() - self.assertEqual('/nodes/%(node_id)s?show_details=True', sot.base_path) - self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertFalse(sot.allow_list) diff --git a/openstack/tests/unit/cluster/v1/test_profile.py b/openstack/tests/unit/cluster/v1/test_profile.py deleted file mode 100644 index e371ed1b46..0000000000 --- a/openstack/tests/unit/cluster/v1/test_profile.py +++ /dev/null @@ -1,90 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.cluster.v1 import profile - - -FAKE_ID = '9b127538-a675-4271-ab9b-f24f54cfe173' -FAKE_NAME = 'test_profile' - -FAKE = { - 'metadata': {}, - 'name': FAKE_NAME, - 'id': FAKE_ID, - 'spec': { - 'type': 'os.nova.server', - 'version': 1.0, - 'properties': { - 'flavor': 1, - 'image': 'cirros-0.3.2-x86_64-uec', - 'key_name': 'oskey', - 'name': 'cirros_server' - } - }, - 'project': '42d9e9663331431f97b75e25136307ff', - 'user': '3747afc360b64702a53bdd64dc1b8976', - 'type': 'os.nova.server', - 'created_at': '2015-10-10T12:46:36.000000', - 'updated_at': '2016-10-10T12:46:36.000000', -} - - -class TestProfile(testtools.TestCase): - - def setUp(self): - super(TestProfile, self).setUp() - - def test_basic(self): - sot = profile.Profile() - self.assertEqual('profile', sot.resource_key) - self.assertEqual('profiles', sot.resources_key) - self.assertEqual('/profiles', sot.base_path) - self.assertEqual('clustering', sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) - self.assertTrue(sot.allow_delete) - self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) - - def test_instantiate(self): - sot = profile.Profile(**FAKE) - self.assertEqual(FAKE['id'], sot.id) - self.assertEqual(FAKE['name'], sot.name) - self.assertEqual(FAKE['metadata'], sot.metadata) - self.assertEqual(FAKE['spec'], sot.spec) - self.assertEqual(FAKE['project'], sot.project_id) - self.assertEqual(FAKE['user'], sot.user_id) - self.assertEqual(FAKE['type'], sot.type) - self.assertEqual(FAKE['created_at'], sot.created_at) - self.assertEqual(FAKE['updated_at'], sot.updated_at) - - -class TestProfileValidate(testtools.TestCase): - - def setUp(self): - super(TestProfileValidate, self).setUp() - - def test_basic(self): - sot = profile.ProfileValidate() - self.assertEqual('profile', sot.resource_key) - self.assertEqual('profiles', sot.resources_key) - self.assertEqual('/profiles/validate', sot.base_path) - self.assertEqual('clustering', sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertFalse(sot.allow_list) - self.assertFalse(sot.patch_update) diff --git a/openstack/tests/unit/cluster/v1/test_profile_type.py b/openstack/tests/unit/cluster/v1/test_profile_type.py deleted file mode 100644 index d494a8e551..0000000000 --- a/openstack/tests/unit/cluster/v1/test_profile_type.py +++ /dev/null @@ -1,48 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.cluster.v1 import profile_type - - -FAKE = { - 'name': 'FAKE_PROFILE_TYPE', - 'schema': { - 'foo': 'bar' - }, - 'support_status': { - '1.0': [{ - 'status': 'supported', - 'since': '2016.10', - }] - } -} - - -class TestProfileType(testtools.TestCase): - - def test_basic(self): - sot = profile_type.ProfileType() - self.assertEqual('profile_type', sot.resource_key) - self.assertEqual('profile_types', sot.resources_key) - self.assertEqual('/profile-types', sot.base_path) - self.assertEqual('clustering', sot.service.service_type) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_list) - - def test_instantiate(self): - sot = profile_type.ProfileType(**FAKE) - self.assertEqual(FAKE['name'], sot._get_id(sot)) - self.assertEqual(FAKE['name'], sot.name) - self.assertEqual(FAKE['schema'], sot.schema) - self.assertEqual(FAKE['support_status'], sot.support_status) diff --git a/openstack/tests/unit/cluster/v1/test_proxy.py b/openstack/tests/unit/cluster/v1/test_proxy.py deleted file mode 100644 index b6643bace4..0000000000 --- a/openstack/tests/unit/cluster/v1/test_proxy.py +++ /dev/null @@ -1,458 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from openstack.cluster.v1 import _proxy -from openstack.cluster.v1 import action -from openstack.cluster.v1 import build_info -from openstack.cluster.v1 import cluster -from openstack.cluster.v1 import cluster_attr -from openstack.cluster.v1 import cluster_policy -from openstack.cluster.v1 import event -from openstack.cluster.v1 import node -from openstack.cluster.v1 import policy -from openstack.cluster.v1 import policy_type -from openstack.cluster.v1 import profile -from openstack.cluster.v1 import profile_type -from openstack.cluster.v1 import receiver -from openstack import proxy2 as proxy_base -from openstack.tests.unit import test_proxy_base2 - - -class TestClusterProxy(test_proxy_base2.TestProxyBase): - def setUp(self): - super(TestClusterProxy, self).setUp() - self.proxy = _proxy.Proxy(self.session) - - def test_build_info_get(self): - self.verify_get(self.proxy.get_build_info, build_info.BuildInfo, - ignore_value=True, - expected_kwargs={'requires_id': False}) - - def test_profile_types(self): - self.verify_list(self.proxy.profile_types, - profile_type.ProfileType, - paginated=False) - - def test_profile_type_get(self): - self.verify_get(self.proxy.get_profile_type, - profile_type.ProfileType) - - def test_policy_types(self): - self.verify_list(self.proxy.policy_types, policy_type.PolicyType, - paginated=False) - - def test_policy_type_get(self): - self.verify_get(self.proxy.get_policy_type, policy_type.PolicyType) - - def test_profile_create(self): - self.verify_create(self.proxy.create_profile, profile.Profile) - - def test_profile_validate(self): - self.verify_create(self.proxy.validate_profile, - profile.ProfileValidate) - - def test_profile_delete(self): - self.verify_delete(self.proxy.delete_profile, profile.Profile, False) - - def test_profile_delete_ignore(self): - self.verify_delete(self.proxy.delete_profile, profile.Profile, True) - - def test_profile_find(self): - self.verify_find(self.proxy.find_profile, profile.Profile) - - def test_profile_get(self): - self.verify_get(self.proxy.get_profile, profile.Profile) - - def test_profiles(self): - self.verify_list(self.proxy.profiles, profile.Profile, - paginated=True, - method_kwargs={'limit': 2}, - expected_kwargs={'limit': 2}) - - def test_profile_update(self): - self.verify_update(self.proxy.update_profile, profile.Profile) - - def test_cluster_create(self): - self.verify_create(self.proxy.create_cluster, cluster.Cluster) - - def test_cluster_delete(self): - self.verify_delete(self.proxy.delete_cluster, cluster.Cluster, False) - - def test_cluster_delete_ignore(self): - self.verify_delete(self.proxy.delete_cluster, cluster.Cluster, True) - - def test_cluster_find(self): - self.verify_find(self.proxy.find_cluster, cluster.Cluster) - - def test_cluster_get(self): - self.verify_get(self.proxy.get_cluster, cluster.Cluster) - - def test_clusters(self): - self.verify_list(self.proxy.clusters, cluster.Cluster, - paginated=True, - method_kwargs={'limit': 2}, - expected_kwargs={'limit': 2}) - - def test_cluster_update(self): - self.verify_update(self.proxy.update_cluster, cluster.Cluster) - - @mock.patch.object(proxy_base.BaseProxy, '_find') - def test_cluster_add_nodes(self, mock_find): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - mock_find.return_value = mock_cluster - self._verify("openstack.cluster.v1.cluster.Cluster.add_nodes", - self.proxy.cluster_add_nodes, - method_args=["FAKE_CLUSTER", ["node1"]], - expected_args=[["node1"]]) - mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER", - ignore_missing=False) - - def test_cluster_add_nodes_with_obj(self): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - self._verify("openstack.cluster.v1.cluster.Cluster.add_nodes", - self.proxy.cluster_add_nodes, - method_args=[mock_cluster, ["node1"]], - expected_args=[["node1"]]) - - @mock.patch.object(proxy_base.BaseProxy, '_find') - def test_cluster_del_nodes(self, mock_find): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - mock_find.return_value = mock_cluster - self._verify("openstack.cluster.v1.cluster.Cluster.del_nodes", - self.proxy.cluster_del_nodes, - method_args=["FAKE_CLUSTER", ["node1"]], - expected_args=[["node1"]]) - mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER", - ignore_missing=False) - - def test_cluster_del_nodes_with_obj(self): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - self._verify("openstack.cluster.v1.cluster.Cluster.del_nodes", - self.proxy.cluster_del_nodes, - method_args=[mock_cluster, ["node1"]], - expected_args=[["node1"]]) - - @mock.patch.object(proxy_base.BaseProxy, '_find') - def test_cluster_replace_nodes(self, mock_find): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - mock_find.return_value = mock_cluster - self._verify("openstack.cluster.v1.cluster.Cluster.replace_nodes", - self.proxy.cluster_replace_nodes, - method_args=["FAKE_CLUSTER", {"node1": "node2"}], - expected_args=[{"node1": "node2"}]) - mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER", - ignore_missing=False) - - def test_cluster_replace_nodes_with_obj(self): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - self._verify("openstack.cluster.v1.cluster.Cluster.replace_nodes", - self.proxy.cluster_replace_nodes, - method_args=[mock_cluster, {"node1": "node2"}], - expected_args=[{"node1": "node2"}]) - - @mock.patch.object(proxy_base.BaseProxy, '_find') - def test_cluster_scale_out(self, mock_find): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - mock_find.return_value = mock_cluster - self._verify("openstack.cluster.v1.cluster.Cluster.scale_out", - self.proxy.cluster_scale_out, - method_args=["FAKE_CLUSTER", 3], - expected_args=[3]) - mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER", - ignore_missing=False) - - def test_cluster_scale_out_with_obj(self): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - self._verify("openstack.cluster.v1.cluster.Cluster.scale_out", - self.proxy.cluster_scale_out, - method_args=[mock_cluster, 5], - expected_args=[5]) - - @mock.patch.object(proxy_base.BaseProxy, '_find') - def test_cluster_scale_in(self, mock_find): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - mock_find.return_value = mock_cluster - self._verify("openstack.cluster.v1.cluster.Cluster.scale_in", - self.proxy.cluster_scale_in, - method_args=["FAKE_CLUSTER", 3], - expected_args=[3]) - mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER", - ignore_missing=False) - - def test_cluster_scale_in_with_obj(self): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - self._verify("openstack.cluster.v1.cluster.Cluster.scale_in", - self.proxy.cluster_scale_in, - method_args=[mock_cluster, 5], - expected_args=[5]) - - @mock.patch.object(proxy_base.BaseProxy, '_find') - def test_cluster_resize(self, mock_find): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - mock_find.return_value = mock_cluster - self._verify("openstack.cluster.v1.cluster.Cluster.resize", - self.proxy.cluster_resize, - method_args=["FAKE_CLUSTER"], - method_kwargs={'k1': 'v1', 'k2': 'v2'}, - expected_kwargs={'k1': 'v1', 'k2': 'v2'}) - mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER", - ignore_missing=False) - - def test_cluster_resize_with_obj(self): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - self._verify("openstack.cluster.v1.cluster.Cluster.resize", - self.proxy.cluster_resize, - method_args=[mock_cluster], - method_kwargs={'k1': 'v1', 'k2': 'v2'}, - expected_kwargs={'k1': 'v1', 'k2': 'v2'}) - - @mock.patch.object(proxy_base.BaseProxy, '_find') - def test_cluster_attach_policy(self, mock_find): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - mock_find.return_value = mock_cluster - self._verify("openstack.cluster.v1.cluster.Cluster.policy_attach", - self.proxy.cluster_attach_policy, - method_args=["FAKE_CLUSTER", "FAKE_POLICY"], - method_kwargs={"k1": "v1", "k2": "v2"}, - expected_args=["FAKE_POLICY"], - expected_kwargs={"k1": "v1", 'k2': "v2"}) - mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER", - ignore_missing=False) - - def test_cluster_attach_policy_with_obj(self): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - self._verify("openstack.cluster.v1.cluster.Cluster.policy_attach", - self.proxy.cluster_attach_policy, - method_args=[mock_cluster, "FAKE_POLICY"], - method_kwargs={"k1": "v1", "k2": "v2"}, - expected_args=["FAKE_POLICY"], - expected_kwargs={"k1": "v1", 'k2': "v2"}) - - @mock.patch.object(proxy_base.BaseProxy, '_find') - def test_cluster_detach_policy(self, mock_find): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - mock_find.return_value = mock_cluster - self._verify("openstack.cluster.v1.cluster.Cluster.policy_detach", - self.proxy.cluster_detach_policy, - method_args=["FAKE_CLUSTER", "FAKE_POLICY"], - expected_args=["FAKE_POLICY"]) - mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER", - ignore_missing=False) - - def test_cluster_detach_policy_with_obj(self): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - self._verify("openstack.cluster.v1.cluster.Cluster.policy_detach", - self.proxy.cluster_detach_policy, - method_args=[mock_cluster, "FAKE_POLICY"], - expected_args=["FAKE_POLICY"]) - - @mock.patch.object(proxy_base.BaseProxy, '_find') - def test_cluster_update_policy(self, mock_find): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - mock_find.return_value = mock_cluster - self._verify("openstack.cluster.v1.cluster.Cluster.policy_update", - self.proxy.cluster_update_policy, - method_args=["FAKE_CLUSTER", "FAKE_POLICY"], - method_kwargs={"k1": "v1", "k2": "v2"}, - expected_args=["FAKE_POLICY"], - expected_kwargs={"k1": "v1", 'k2': "v2"}) - mock_find.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER", - ignore_missing=False) - - def test_cluster_update_policy_with_obj(self): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - self._verify("openstack.cluster.v1.cluster.Cluster.policy_update", - self.proxy.cluster_update_policy, - method_args=[mock_cluster, "FAKE_POLICY"], - method_kwargs={"k1": "v1", "k2": "v2"}, - expected_args=["FAKE_POLICY"], - expected_kwargs={"k1": "v1", 'k2': "v2"}) - - def test_collect_cluster_attrs(self): - self.verify_list(self.proxy.collect_cluster_attrs, - cluster_attr.ClusterAttr, paginated=False, - method_args=['FAKE_ID', 'path.to.attr'], - expected_kwargs={'cluster_id': 'FAKE_ID', - 'path': 'path.to.attr'}) - - @mock.patch.object(proxy_base.BaseProxy, '_get_resource') - def test_cluster_check(self, mock_get): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - mock_get.return_value = mock_cluster - self._verify("openstack.cluster.v1.cluster.Cluster.check", - self.proxy.check_cluster, - method_args=["FAKE_CLUSTER"]) - mock_get.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER") - - @mock.patch.object(proxy_base.BaseProxy, '_get_resource') - def test_cluster_recover(self, mock_get): - mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - mock_get.return_value = mock_cluster - self._verify("openstack.cluster.v1.cluster.Cluster.recover", - self.proxy.recover_cluster, - method_args=["FAKE_CLUSTER"]) - mock_get.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER") - - def test_node_create(self): - self.verify_create(self.proxy.create_node, node.Node) - - def test_node_delete(self): - self.verify_delete(self.proxy.delete_node, node.Node, False) - - def test_node_delete_ignore(self): - self.verify_delete(self.proxy.delete_node, node.Node, True) - - def test_node_find(self): - self.verify_find(self.proxy.find_node, node.Node) - - def test_node_get(self): - self.verify_get(self.proxy.get_node, node.Node) - - def test_node_get_with_details(self): - self._verify2('openstack.proxy2.BaseProxy._get', - self.proxy.get_node, - method_args=['NODE_ID'], - method_kwargs={'details': True}, - expected_args=[node.NodeDetail], - expected_kwargs={'node_id': 'NODE_ID', - 'requires_id': False}) - - def test_nodes(self): - self.verify_list(self.proxy.nodes, node.Node, - paginated=True, - method_kwargs={'limit': 2}, - expected_kwargs={'limit': 2}) - - def test_node_update(self): - self.verify_update(self.proxy.update_node, node.Node) - - @mock.patch.object(proxy_base.BaseProxy, '_get_resource') - def test_node_check(self, mock_get): - mock_node = node.Node.new(id='FAKE_NODE') - mock_get.return_value = mock_node - self._verify("openstack.cluster.v1.node.Node.check", - self.proxy.check_node, - method_args=["FAKE_NODE"]) - mock_get.assert_called_once_with(node.Node, "FAKE_NODE") - - @mock.patch.object(proxy_base.BaseProxy, '_get_resource') - def test_node_recover(self, mock_get): - mock_node = node.Node.new(id='FAKE_NODE') - mock_get.return_value = mock_node - self._verify("openstack.cluster.v1.node.Node.recover", - self.proxy.recover_node, - method_args=["FAKE_NODE"]) - mock_get.assert_called_once_with(node.Node, "FAKE_NODE") - - def test_policy_create(self): - self.verify_create(self.proxy.create_policy, policy.Policy) - - def test_policy_validate(self): - self.verify_create(self.proxy.validate_policy, policy.PolicyValidate) - - def test_policy_delete(self): - self.verify_delete(self.proxy.delete_policy, policy.Policy, False) - - def test_policy_delete_ignore(self): - self.verify_delete(self.proxy.delete_policy, policy.Policy, True) - - def test_policy_find(self): - self.verify_find(self.proxy.find_policy, policy.Policy) - - def test_policy_get(self): - self.verify_get(self.proxy.get_policy, policy.Policy) - - def test_policies(self): - self.verify_list(self.proxy.policies, policy.Policy, - paginated=True, - method_kwargs={'limit': 2}, - expected_kwargs={'limit': 2}) - - def test_policy_update(self): - self.verify_update(self.proxy.update_policy, policy.Policy) - - def test_cluster_policies(self): - self.verify_list(self.proxy.cluster_policies, - cluster_policy.ClusterPolicy, - paginated=False, method_args=["FAKE_CLUSTER"], - expected_kwargs={"cluster_id": "FAKE_CLUSTER"}) - - def test_get_cluster_policy(self): - fake_policy = cluster_policy.ClusterPolicy.new(id="FAKE_POLICY") - fake_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') - - # ClusterPolicy object as input - self._verify2('openstack.proxy2.BaseProxy._get', - self.proxy.get_cluster_policy, - method_args=[fake_policy, "FAKE_CLUSTER"], - expected_args=[cluster_policy.ClusterPolicy, - fake_policy], - expected_kwargs={'cluster_id': 'FAKE_CLUSTER'}, - expected_result=fake_policy) - - # Policy ID as input - self._verify2('openstack.proxy2.BaseProxy._get', - self.proxy.get_cluster_policy, - method_args=["FAKE_POLICY", "FAKE_CLUSTER"], - expected_args=[cluster_policy.ClusterPolicy, - "FAKE_POLICY"], - expected_kwargs={"cluster_id": "FAKE_CLUSTER"}) - - # Cluster object as input - self._verify2('openstack.proxy2.BaseProxy._get', - self.proxy.get_cluster_policy, - method_args=["FAKE_POLICY", fake_cluster], - expected_args=[cluster_policy.ClusterPolicy, - "FAKE_POLICY"], - expected_kwargs={"cluster_id": fake_cluster}) - - def test_receiver_create(self): - self.verify_create(self.proxy.create_receiver, receiver.Receiver) - - def test_receiver_delete(self): - self.verify_delete(self.proxy.delete_receiver, receiver.Receiver, - False) - - def test_receiver_delete_ignore(self): - self.verify_delete(self.proxy.delete_receiver, receiver.Receiver, True) - - def test_receiver_find(self): - self.verify_find(self.proxy.find_receiver, receiver.Receiver) - - def test_receiver_get(self): - self.verify_get(self.proxy.get_receiver, receiver.Receiver) - - def test_receivers(self): - self.verify_list(self.proxy.receivers, receiver.Receiver, - paginated=True, - method_kwargs={'limit': 2}, - expected_kwargs={'limit': 2}) - - def test_action_get(self): - self.verify_get(self.proxy.get_action, action.Action) - - def test_actions(self): - self.verify_list(self.proxy.actions, action.Action, - paginated=True, - method_kwargs={'limit': 2}, - expected_kwargs={'limit': 2}) - - def test_event_get(self): - self.verify_get(self.proxy.get_event, event.Event) - - def test_events(self): - self.verify_list(self.proxy.events, event.Event, - paginated=True, - method_kwargs={'limit': 2}, - expected_kwargs={'limit': 2}) diff --git a/openstack/tests/unit/clustering/__init__.py b/openstack/tests/unit/clustering/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/clustering/test_version.py b/openstack/tests/unit/clustering/test_version.py new file mode 100644 index 0000000000..497114bb16 --- /dev/null +++ b/openstack/tests/unit/clustering/test_version.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.clustering import version +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'links': '2', + 'status': '3', +} + + +class TestVersion(base.TestCase): + def test_basic(self): + sot = version.Version() + self.assertEqual('version', sot.resource_key) + self.assertEqual('versions', sot.resources_key) + self.assertEqual('/', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = version.Version(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['status'], sot.status) diff --git a/openstack/tests/unit/clustering/v1/__init__.py b/openstack/tests/unit/clustering/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/cluster/v1/test_action.py b/openstack/tests/unit/clustering/v1/test_action.py similarity index 85% rename from openstack/tests/unit/cluster/v1/test_action.py rename to openstack/tests/unit/clustering/v1/test_action.py index 0cf72a3808..464c3ac14c 100644 --- a/openstack/tests/unit/cluster/v1/test_action.py +++ b/openstack/tests/unit/clustering/v1/test_action.py @@ -10,11 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - -from openstack.cluster.v1 import action +from openstack.clustering.v1 import action +from openstack.tests.unit import base +FAKE_CLUSTER_ID = 'ffaed25e-46f5-4089-8e20-b3b4722fd597' FAKE_ID = '633bd3c6-520b-420f-8e6a-dc2a47022b53' FAKE_NAME = 'node_create_c3783474' @@ -27,6 +27,7 @@ 'owner': None, 'user': '3747afc360b64702a53bdd64dc1b8976', 'project': '42d9e9663331431f97b75e25136307ff', + 'domain': '204ccccd267b40aea871750116b5b184', 'interval': -1, 'start_time': 1453414055.48672, 'end_time': 1453414055.48672, @@ -39,22 +40,22 @@ 'depended_by': [], 'created_at': '2015-10-10T12:46:36.000000', 'updated_at': '2016-10-10T12:46:36.000000', + 'cluster_id': FAKE_CLUSTER_ID, } -class TestAction(testtools.TestCase): - +class TestAction(base.TestCase): def setUp(self): - super(TestAction, self).setUp() + super().setUp() def test_basic(self): sot = action.Action() self.assertEqual('action', sot.resource_key) self.assertEqual('actions', sot.resources_key) self.assertEqual('/actions', sot.base_path) - self.assertEqual('clustering', sot.service.service_type) - self.assertTrue(sot.allow_get) + self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_commit) def test_instantiate(self): sot = action.Action(**FAKE) @@ -66,6 +67,7 @@ def test_instantiate(self): self.assertEqual(FAKE['owner'], sot.owner_id) self.assertEqual(FAKE['user'], sot.user_id) self.assertEqual(FAKE['project'], sot.project_id) + self.assertEqual(FAKE['domain'], sot.domain_id) self.assertEqual(FAKE['interval'], sot.interval) self.assertEqual(FAKE['start_time'], sot.start_at) self.assertEqual(FAKE['end_time'], sot.end_at) @@ -78,3 +80,4 @@ def test_instantiate(self): self.assertEqual(FAKE['depended_by'], sot.depended_by) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['updated_at'], sot.updated_at) + self.assertEqual(FAKE['cluster_id'], sot.cluster_id) diff --git a/openstack/tests/unit/cluster/v1/test_build_info.py b/openstack/tests/unit/clustering/v1/test_build_info.py similarity index 80% rename from openstack/tests/unit/cluster/v1/test_build_info.py rename to openstack/tests/unit/clustering/v1/test_build_info.py index ab695c027d..a80f8608cf 100644 --- a/openstack/tests/unit/cluster/v1/test_build_info.py +++ b/openstack/tests/unit/clustering/v1/test_build_info.py @@ -10,9 +10,8 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - -from openstack.cluster.v1 import build_info +from openstack.clustering.v1 import build_info +from openstack.tests.unit import base FAKE = { @@ -21,21 +20,19 @@ }, 'engine': { 'revision': '1.0.0', - } + }, } -class TestBuildInfo(testtools.TestCase): - +class TestBuildInfo(base.TestCase): def setUp(self): - super(TestBuildInfo, self).setUp() + super().setUp() def test_basic(self): sot = build_info.BuildInfo() self.assertEqual('/build-info', sot.base_path) self.assertEqual('build_info', sot.resource_key) - self.assertEqual('clustering', sot.service.service_type) - self.assertTrue(sot.allow_get) + self.assertTrue(sot.allow_fetch) def test_instantiate(self): sot = build_info.BuildInfo(**FAKE) diff --git a/openstack/tests/unit/clustering/v1/test_cluster.py b/openstack/tests/unit/clustering/v1/test_cluster.py new file mode 100644 index 0000000000..4a5da101c2 --- /dev/null +++ b/openstack/tests/unit/clustering/v1/test_cluster.py @@ -0,0 +1,307 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.clustering.v1 import cluster +from openstack.tests.unit import base + +FAKE_ID = '092d0955-2645-461a-b8fa-6a44655cdb2c' +FAKE_NAME = 'test_cluster' + +FAKE = { + 'id': 'IDENTIFIER', + 'config': {'key1': 'value1', 'key2': 'value2'}, + 'desired_capacity': 1, + 'max_size': 3, + 'min_size': 0, + 'name': FAKE_NAME, + 'profile_id': 'myserver', + 'profile_only': True, + 'metadata': {}, + 'dependents': {}, + 'timeout': None, + 'init_at': '2015-10-10T12:46:36.000000', + 'created_at': '2015-10-10T12:46:36.000000', + 'updated_at': '2016-10-10T12:46:36.000000', +} + +FAKE_CREATE_RESP = { + 'cluster': { + 'action': 'a679c926-908f-49e7-a822-06ca371e64e1', + 'init_at': '2015-10-10T12:46:36.000000', + 'created_at': '2015-10-10T12:46:36.000000', + 'updated_at': '2016-10-10T12:46:36.000000', + 'data': {}, + 'desired_capacity': 1, + 'domain': None, + 'id': FAKE_ID, + 'init_time': None, + 'max_size': 3, + 'metadata': {}, + 'min_size': 0, + 'name': 'test_cluster', + 'nodes': [], + 'policies': [], + 'profile_id': '560a8f9d-7596-4a32-85e8-03645fa7be13', + 'profile_name': 'myserver', + 'project': '333acb15a43242f4a609a27cb097a8f2', + 'status': 'INIT', + 'status_reason': 'Initializing', + 'timeout': None, + 'user': '6d600911ff764e54b309ce734c89595e', + 'dependents': {}, + } +} + + +class TestCluster(base.TestCase): + def setUp(self): + super().setUp() + + def test_basic(self): + sot = cluster.Cluster() + self.assertEqual('cluster', sot.resource_key) + self.assertEqual('clusters', sot.resources_key) + self.assertEqual('/clusters', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_instantiate(self): + sot = cluster.Cluster(**FAKE) + + self.assertEqual(FAKE['id'], sot.id) + self.assertEqual(FAKE['name'], sot.name) + + self.assertEqual(FAKE['profile_id'], sot.profile_id) + + self.assertEqual(FAKE['min_size'], sot.min_size) + self.assertEqual(FAKE['max_size'], sot.max_size) + self.assertEqual(FAKE['desired_capacity'], sot.desired_capacity) + + self.assertEqual(FAKE['config'], sot.config) + self.assertEqual(FAKE['timeout'], sot.timeout) + self.assertEqual(FAKE['metadata'], sot.metadata) + + self.assertEqual(FAKE['init_at'], sot.init_at) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['updated_at'], sot.updated_at) + self.assertEqual(FAKE['dependents'], sot.dependents) + self.assertTrue(sot.is_profile_only) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + "name": "name", + "status": "status", + "sort": "sort", + "global_project": "global_project", + }, + sot._query_mapping._mapping, + ) + + def test_scale_in(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.scale_in(sess, 3)) + url = f'clusters/{sot.id}/actions' + body = {'scale_in': {'count': 3}} + sess.post.assert_called_once_with(url, json=body) + + def test_scale_out(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.scale_out(sess, 3)) + url = f'clusters/{sot.id}/actions' + body = {'scale_out': {'count': 3}} + sess.post.assert_called_once_with(url, json=body) + + def test_resize(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.resize(sess, foo='bar', zoo=5)) + url = f'clusters/{sot.id}/actions' + body = {'resize': {'foo': 'bar', 'zoo': 5}} + sess.post.assert_called_once_with(url, json=body) + + def test_add_nodes(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.add_nodes(sess, ['node-33'])) + url = f'clusters/{sot.id}/actions' + body = {'add_nodes': {'nodes': ['node-33']}} + sess.post.assert_called_once_with(url, json=body) + + def test_del_nodes(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.del_nodes(sess, ['node-11'])) + url = f'clusters/{sot.id}/actions' + body = {'del_nodes': {'nodes': ['node-11']}} + sess.post.assert_called_once_with(url, json=body) + + def test_del_nodes_with_params(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + params = { + 'destroy_after_deletion': True, + } + self.assertEqual('', sot.del_nodes(sess, ['node-11'], **params)) + url = f'clusters/{sot.id}/actions' + body = { + 'del_nodes': { + 'nodes': ['node-11'], + 'destroy_after_deletion': True, + } + } + sess.post.assert_called_once_with(url, json=body) + + def test_replace_nodes(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.replace_nodes(sess, {'node-22': 'node-44'})) + url = f'clusters/{sot.id}/actions' + body = {'replace_nodes': {'nodes': {'node-22': 'node-44'}}} + sess.post.assert_called_once_with(url, json=body) + + def test_policy_attach(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + params = { + 'enabled': True, + } + self.assertEqual('', sot.policy_attach(sess, 'POLICY', **params)) + + url = f'clusters/{sot.id}/actions' + body = { + 'policy_attach': { + 'policy_id': 'POLICY', + 'enabled': True, + } + } + sess.post.assert_called_once_with(url, json=body) + + def test_policy_detach(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.policy_detach(sess, 'POLICY')) + + url = f'clusters/{sot.id}/actions' + body = {'policy_detach': {'policy_id': 'POLICY'}} + sess.post.assert_called_once_with(url, json=body) + + def test_policy_update(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + params = {'enabled': False} + self.assertEqual('', sot.policy_update(sess, 'POLICY', **params)) + + url = f'clusters/{sot.id}/actions' + body = {'policy_update': {'policy_id': 'POLICY', 'enabled': False}} + sess.post.assert_called_once_with(url, json=body) + + def test_check(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.check(sess)) + url = f'clusters/{sot.id}/actions' + body = {'check': {}} + sess.post.assert_called_once_with(url, json=body) + + def test_recover(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.recover(sess)) + url = f'clusters/{sot.id}/actions' + body = {'recover': {}} + sess.post.assert_called_once_with(url, json=body) + + def test_operation(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.op(sess, 'dance', style='tango')) + url = f'clusters/{sot.id}/ops' + body = {'dance': {'style': 'tango'}} + sess.post.assert_called_once_with(url, json=body) + + def test_force_delete(self): + sot = cluster.Cluster(**FAKE) + + resp = mock.Mock() + fake_action_id = 'f1de9847-2382-4272-8e73-cab0bc194663' + resp.headers = {'Location': fake_action_id} + resp.json = mock.Mock(return_value={"foo": "bar"}) + resp.status_code = 200 + sess = mock.Mock() + sess.delete = mock.Mock(return_value=resp) + + res = sot.force_delete(sess) + self.assertEqual(fake_action_id, res.id) + url = f'clusters/{sot.id}' + body = {'force': True} + sess.delete.assert_called_once_with(url, json=body) diff --git a/openstack/tests/unit/cluster/v1/test_cluster_attr.py b/openstack/tests/unit/clustering/v1/test_cluster_attr.py similarity index 78% rename from openstack/tests/unit/cluster/v1/test_cluster_attr.py rename to openstack/tests/unit/clustering/v1/test_cluster_attr.py index cded9ef87f..bdbaf56c9f 100644 --- a/openstack/tests/unit/cluster/v1/test_cluster_attr.py +++ b/openstack/tests/unit/clustering/v1/test_cluster_attr.py @@ -10,9 +10,8 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - -from openstack.cluster.v1 import cluster_attr as ca +from openstack.clustering.v1 import cluster_attr as ca +from openstack.tests.unit import base FAKE = { @@ -23,17 +22,16 @@ } -class TestClusterAttr(testtools.TestCase): - +class TestClusterAttr(base.TestCase): def setUp(self): - super(TestClusterAttr, self).setUp() + super().setUp() def test_basic(self): sot = ca.ClusterAttr() self.assertEqual('cluster_attributes', sot.resources_key) - self.assertEqual('/clusters/%(cluster_id)s/attrs/%(path)s', - sot.base_path) - self.assertEqual('clustering', sot.service.service_type) + self.assertEqual( + '/clusters/%(cluster_id)s/attrs/%(path)s', sot.base_path + ) self.assertTrue(sot.allow_list) def test_instantiate(self): diff --git a/openstack/tests/unit/clustering/v1/test_cluster_policy.py b/openstack/tests/unit/clustering/v1/test_cluster_policy.py new file mode 100644 index 0000000000..d544664cbd --- /dev/null +++ b/openstack/tests/unit/clustering/v1/test_cluster_policy.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.clustering.v1 import cluster_policy +from openstack.tests.unit import base + + +FAKE = { + 'cluster_id': '99e39f4b-1990-4237-a556-1518f0f0c9e7', + 'cluster_name': 'test_cluster', + 'data': {'purpose': 'unknown'}, + 'enabled': True, + 'policy_id': 'ac5415bd-f522-4160-8be0-f8853e4bc332', + 'policy_name': 'dp01', + 'policy_type': 'senlin.poicy.deletion-1.0', +} + + +class TestClusterPolicy(base.TestCase): + def setUp(self): + super().setUp() + + def test_basic(self): + sot = cluster_policy.ClusterPolicy() + self.assertEqual('cluster_policy', sot.resource_key) + self.assertEqual('cluster_policies', sot.resources_key) + self.assertEqual('/clusters/%(cluster_id)s/policies', sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + "policy_name": "policy_name", + "policy_type": "policy_type", + "is_enabled": "enabled", + "sort": "sort", + "limit": "limit", + "marker": "marker", + }, + sot._query_mapping._mapping, + ) + + def test_instantiate(self): + sot = cluster_policy.ClusterPolicy(**FAKE) + self.assertEqual(FAKE['policy_id'], sot.id) + self.assertEqual(FAKE['cluster_id'], sot.cluster_id) + self.assertEqual(FAKE['cluster_name'], sot.cluster_name) + self.assertEqual(FAKE['data'], sot.data) + self.assertTrue(sot.is_enabled) + self.assertEqual(FAKE['policy_id'], sot.policy_id) + self.assertEqual(FAKE['policy_name'], sot.policy_name) + self.assertEqual(FAKE['policy_type'], sot.policy_type) diff --git a/openstack/tests/unit/cluster/v1/test_event.py b/openstack/tests/unit/clustering/v1/test_event.py similarity index 83% rename from openstack/tests/unit/cluster/v1/test_event.py rename to openstack/tests/unit/clustering/v1/test_event.py index 0d482b97ec..598931247d 100644 --- a/openstack/tests/unit/cluster/v1/test_event.py +++ b/openstack/tests/unit/clustering/v1/test_event.py @@ -10,9 +10,8 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - -from openstack.cluster.v1 import event +from openstack.clustering.v1 import event +from openstack.tests.unit import base FAKE = { @@ -27,22 +26,23 @@ 'status': 'START', 'status_reason': 'The action was abandoned.', 'timestamp': '2016-10-10T12:46:36.000000', - 'user': '5e5bf8027826429c96af157f68dc9072' + 'user': '5e5bf8027826429c96af157f68dc9072', + 'meta_data': { + "action": {"created_at": "2019-07-13T13:18:18Z", "outputs": {}} + }, } -class TestEvent(testtools.TestCase): - +class TestEvent(base.TestCase): def setUp(self): - super(TestEvent, self).setUp() + super().setUp() def test_basic(self): sot = event.Event() self.assertEqual('event', sot.resource_key) self.assertEqual('events', sot.resources_key) self.assertEqual('/events', sot.base_path) - self.assertEqual('clustering', sot.service.service_type) - self.assertTrue(sot.allow_get) + self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) def test_instantiate(self): @@ -59,3 +59,4 @@ def test_instantiate(self): self.assertEqual(FAKE['status_reason'], sot.status_reason) self.assertEqual(FAKE['timestamp'], sot.generated_at) self.assertEqual(FAKE['user'], sot.user_id) + self.assertEqual(FAKE['meta_data'], sot.meta_data) diff --git a/openstack/tests/unit/clustering/v1/test_node.py b/openstack/tests/unit/clustering/v1/test_node.py new file mode 100644 index 0000000000..0627efe811 --- /dev/null +++ b/openstack/tests/unit/clustering/v1/test_node.py @@ -0,0 +1,166 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.clustering.v1 import node +from openstack.tests.unit import base + +FAKE_ID = '123d0955-0099-aabb-b8fa-6a44655ceeff' +FAKE_NAME = 'test_node' + +FAKE = { + 'id': FAKE_ID, + 'cluster_id': 'clusterA', + 'metadata': {'key1': 'value1'}, + 'name': FAKE_NAME, + 'profile_id': 'myserver', + 'domain': '204ccccd267b40aea871750116b5b184', + 'user': '3747afc360b64702a53bdd64dc1b8976', + 'project': '42d9e9663331431f97b75e25136307ff', + 'index': 1, + 'role': 'master', + 'dependents': {}, + 'created_at': '2015-10-10T12:46:36.000000', + 'updated_at': '2016-10-10T12:46:36.000000', + 'init_at': '2015-10-10T12:46:36.000000', + 'tainted': True, +} + + +class TestNode(base.TestCase): + def test_basic(self): + sot = node.Node() + self.assertEqual('node', sot.resource_key) + self.assertEqual('nodes', sot.resources_key) + self.assertEqual('/nodes', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_instantiate(self): + sot = node.Node(**FAKE) + self.assertEqual(FAKE['id'], sot.id) + self.assertEqual(FAKE['profile_id'], sot.profile_id) + self.assertEqual(FAKE['cluster_id'], sot.cluster_id) + self.assertEqual(FAKE['user'], sot.user_id) + self.assertEqual(FAKE['project'], sot.project_id) + self.assertEqual(FAKE['domain'], sot.domain_id) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual(FAKE['index'], sot.index) + self.assertEqual(FAKE['role'], sot.role) + self.assertEqual(FAKE['metadata'], sot.metadata) + self.assertEqual(FAKE['init_at'], sot.init_at) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['updated_at'], sot.updated_at) + self.assertEqual(FAKE['dependents'], sot.dependents) + self.assertEqual(FAKE['tainted'], sot.tainted) + + def test_check(self): + sot = node.Node(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.check(sess)) + url = f'nodes/{sot.id}/actions' + body = {'check': {}} + sess.post.assert_called_once_with(url, json=body) + + def test_recover(self): + sot = node.Node(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.recover(sess)) + url = f'nodes/{sot.id}/actions' + body = {'recover': {}} + sess.post.assert_called_once_with(url, json=body) + + def test_operation(self): + sot = node.Node(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + self.assertEqual('', sot.op(sess, 'dance', style='tango')) + url = f'nodes/{sot.id}/ops' + sess.post.assert_called_once_with( + url, json={'dance': {'style': 'tango'}} + ) + + def test_adopt_preview(self): + sot = node.Node.new() + resp = mock.Mock() + resp.headers = {} + resp.json = mock.Mock(return_value={"foo": "bar"}) + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + + attrs = { + 'identity': 'fake-resource-id', + 'overrides': {}, + 'type': 'os.nova.server-1.0', + 'snapshot': False, + } + res = sot.adopt(sess, True, **attrs) + self.assertEqual({"foo": "bar"}, res) + sess.post.assert_called_once_with("nodes/adopt-preview", json=attrs) + + def test_adopt(self): + sot = node.Node.new() + resp = mock.Mock() + resp.headers = {} + resp.json = mock.Mock(return_value={"foo": "bar"}) + resp.status_code = 200 + sess = mock.Mock() + sess.post = mock.Mock(return_value=resp) + + res = sot.adopt(sess, False, param="value") + self.assertEqual(sot, res) + sess.post.assert_called_once_with( + "nodes/adopt", json={"param": "value"} + ) + + def test_force_delete(self): + sot = node.Node(**FAKE) + + resp = mock.Mock() + fake_action_id = 'f1de9847-2382-4272-8e73-cab0bc194663' + resp.headers = {'Location': fake_action_id} + resp.json = mock.Mock(return_value={"foo": "bar"}) + resp.status_code = 200 + sess = mock.Mock() + sess.delete = mock.Mock(return_value=resp) + + res = sot.force_delete(sess) + self.assertEqual(fake_action_id, res.id) + url = f'nodes/{sot.id}' + body = {'force': True} + sess.delete.assert_called_once_with(url, json=body) + + +class TestNodeDetail(base.TestCase): + def test_basic(self): + sot = node.NodeDetail() + self.assertEqual('/nodes/%(node_id)s?show_details=True', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) diff --git a/openstack/tests/unit/cluster/v1/test_policy.py b/openstack/tests/unit/clustering/v1/test_policy.py similarity index 82% rename from openstack/tests/unit/cluster/v1/test_policy.py rename to openstack/tests/unit/clustering/v1/test_policy.py index 3c2be20436..4cbcd78c23 100644 --- a/openstack/tests/unit/cluster/v1/test_policy.py +++ b/openstack/tests/unit/clustering/v1/test_policy.py @@ -10,9 +10,8 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - -from openstack.cluster.v1 import policy +from openstack.clustering.v1 import policy +from openstack.tests.unit import base FAKE_ID = 'ac5415bd-f522-4160-8be0-f8853e4bc332' @@ -29,9 +28,10 @@ 'grace_period': 60, 'reduce_desired_capacity': False, 'destroy_after_deletion': True, - } + }, }, 'project': '42d9e9663331431f97b75e25136307ff', + 'domain': '204ccccd267b40aea871750116b5b184', 'user': '3747afc360b64702a53bdd64dc1b8976', 'type': 'senlin.policy.deletion-1.0', 'created_at': '2015-10-10T12:46:36.000000', @@ -40,20 +40,18 @@ } -class TestPolicy(testtools.TestCase): - +class TestPolicy(base.TestCase): def setUp(self): - super(TestPolicy, self).setUp() + super().setUp() def test_basic(self): sot = policy.Policy() self.assertEqual('policy', sot.resource_key) self.assertEqual('policies', sot.resources_key) self.assertEqual('/policies', sot.base_path) - self.assertEqual('clustering', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -63,25 +61,24 @@ def test_instantiate(self): self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['spec'], sot.spec) self.assertEqual(FAKE['project'], sot.project_id) + self.assertEqual(FAKE['domain'], sot.domain_id) self.assertEqual(FAKE['user'], sot.user_id) self.assertEqual(FAKE['data'], sot.data) self.assertEqual(FAKE['created_at'], sot.created_at) self.assertEqual(FAKE['updated_at'], sot.updated_at) -class TestPolicyValidate(testtools.TestCase): - +class TestPolicyValidate(base.TestCase): def setUp(self): - super(TestPolicyValidate, self).setUp() + super().setUp() def test_basic(self): sot = policy.PolicyValidate() self.assertEqual('policy', sot.resource_key) self.assertEqual('policies', sot.resources_key) self.assertEqual('/policies/validate', sot.base_path) - self.assertEqual('clustering', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) diff --git a/openstack/tests/unit/cluster/v1/test_policy_type.py b/openstack/tests/unit/clustering/v1/test_policy_type.py similarity index 75% rename from openstack/tests/unit/cluster/v1/test_policy_type.py rename to openstack/tests/unit/clustering/v1/test_policy_type.py index fd20733a3a..7ada53e6df 100644 --- a/openstack/tests/unit/cluster/v1/test_policy_type.py +++ b/openstack/tests/unit/clustering/v1/test_policy_type.py @@ -10,34 +10,24 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - -from openstack.cluster.v1 import policy_type +from openstack.clustering.v1 import policy_type +from openstack.tests.unit import base FAKE = { 'name': 'FAKE_POLICY_TYPE', - 'schema': { - 'foo': 'bar' - }, - 'support_status': { - '1.0': [{ - 'status': 'supported', - 'since': '2016.10' - }] - } + 'schema': {'foo': 'bar'}, + 'support_status': {'1.0': [{'status': 'supported', 'since': '2016.10'}]}, } -class TestPolicyType(testtools.TestCase): - +class TestPolicyType(base.TestCase): def test_basic(self): sot = policy_type.PolicyType() self.assertEqual('policy_type', sot.resource_key) self.assertEqual('policy_types', sot.resources_key) self.assertEqual('/policy-types', sot.base_path) - self.assertEqual('clustering', sot.service.service_type) - self.assertTrue(sot.allow_get) + self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) def test_instantiate(self): diff --git a/openstack/tests/unit/clustering/v1/test_profile.py b/openstack/tests/unit/clustering/v1/test_profile.py new file mode 100644 index 0000000000..7a2e7ea6f5 --- /dev/null +++ b/openstack/tests/unit/clustering/v1/test_profile.py @@ -0,0 +1,87 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.clustering.v1 import profile +from openstack.tests.unit import base + + +FAKE_ID = '9b127538-a675-4271-ab9b-f24f54cfe173' +FAKE_NAME = 'test_profile' + +FAKE = { + 'metadata': {}, + 'name': FAKE_NAME, + 'id': FAKE_ID, + 'spec': { + 'type': 'os.nova.server', + 'version': 1.0, + 'properties': { + 'flavor': 1, + 'image': 'cirros-0.3.2-x86_64-uec', + 'key_name': 'oskey', + 'name': 'cirros_server', + }, + }, + 'project': '42d9e9663331431f97b75e25136307ff', + 'domain': '204ccccd267b40aea871750116b5b184', + 'user': '3747afc360b64702a53bdd64dc1b8976', + 'type': 'os.nova.server', + 'created_at': '2015-10-10T12:46:36.000000', + 'updated_at': '2016-10-10T12:46:36.000000', +} + + +class TestProfile(base.TestCase): + def setUp(self): + super().setUp() + + def test_basic(self): + sot = profile.Profile() + self.assertEqual('profile', sot.resource_key) + self.assertEqual('profiles', sot.resources_key) + self.assertEqual('/profiles', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + def test_instantiate(self): + sot = profile.Profile(**FAKE) + self.assertEqual(FAKE['id'], sot.id) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual(FAKE['metadata'], sot.metadata) + self.assertEqual(FAKE['spec'], sot.spec) + self.assertEqual(FAKE['project'], sot.project_id) + self.assertEqual(FAKE['domain'], sot.domain_id) + self.assertEqual(FAKE['user'], sot.user_id) + self.assertEqual(FAKE['type'], sot.type) + self.assertEqual(FAKE['created_at'], sot.created_at) + self.assertEqual(FAKE['updated_at'], sot.updated_at) + + +class TestProfileValidate(base.TestCase): + def setUp(self): + super().setUp() + + def test_basic(self): + sot = profile.ProfileValidate() + self.assertEqual('profile', sot.resource_key) + self.assertEqual('profiles', sot.resources_key) + self.assertEqual('/profiles/validate', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) + self.assertEqual('PUT', sot.commit_method) diff --git a/openstack/tests/unit/clustering/v1/test_profile_type.py b/openstack/tests/unit/clustering/v1/test_profile_type.py new file mode 100644 index 0000000000..b61030c9e5 --- /dev/null +++ b/openstack/tests/unit/clustering/v1/test_profile_type.py @@ -0,0 +1,57 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.clustering.v1 import profile_type +from openstack.tests.unit import base + +FAKE = { + 'name': 'FAKE_PROFILE_TYPE', + 'schema': {'foo': 'bar'}, + 'support_status': { + '1.0': [ + { + 'status': 'supported', + 'since': '2016.10', + } + ] + }, +} + + +class TestProfileType(base.TestCase): + def test_basic(self): + sot = profile_type.ProfileType() + self.assertEqual('profile_type', sot.resource_key) + self.assertEqual('profile_types', sot.resources_key) + self.assertEqual('/profile-types', sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_list) + + def test_instantiate(self): + sot = profile_type.ProfileType(**FAKE) + self.assertEqual(FAKE['name'], sot._get_id(sot)) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual(FAKE['schema'], sot.schema) + self.assertEqual(FAKE['support_status'], sot.support_status) + + def test_ops(self): + sot = profile_type.ProfileType(**FAKE) + + resp = mock.Mock() + resp.json = mock.Mock(return_value='') + sess = mock.Mock() + sess.get = mock.Mock(return_value=resp) + self.assertEqual('', sot.type_ops(sess)) + url = f'profile-types/{sot.id}/ops' + sess.get.assert_called_once_with(url) diff --git a/openstack/tests/unit/clustering/v1/test_proxy.py b/openstack/tests/unit/clustering/v1/test_proxy.py new file mode 100644 index 0000000000..46d717e541 --- /dev/null +++ b/openstack/tests/unit/clustering/v1/test_proxy.py @@ -0,0 +1,486 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.clustering.v1 import _proxy +from openstack.clustering.v1 import action +from openstack.clustering.v1 import build_info +from openstack.clustering.v1 import cluster +from openstack.clustering.v1 import cluster_attr +from openstack.clustering.v1 import cluster_policy +from openstack.clustering.v1 import event +from openstack.clustering.v1 import node +from openstack.clustering.v1 import policy +from openstack.clustering.v1 import policy_type +from openstack.clustering.v1 import profile +from openstack.clustering.v1 import profile_type +from openstack.clustering.v1 import receiver +from openstack.clustering.v1 import service +from openstack import proxy as proxy_base +from openstack.tests.unit import test_proxy_base + + +class TestClusterProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_build_info_get(self): + self.verify_get( + self.proxy.get_build_info, + build_info.BuildInfo, + method_args=[], + expected_kwargs={'requires_id': False}, + ) + + def test_profile_types(self): + self.verify_list(self.proxy.profile_types, profile_type.ProfileType) + + def test_profile_type_get(self): + self.verify_get(self.proxy.get_profile_type, profile_type.ProfileType) + + def test_policy_types(self): + self.verify_list(self.proxy.policy_types, policy_type.PolicyType) + + def test_policy_type_get(self): + self.verify_get(self.proxy.get_policy_type, policy_type.PolicyType) + + def test_profile_create(self): + self.verify_create(self.proxy.create_profile, profile.Profile) + + def test_profile_validate(self): + self.verify_create( + self.proxy.validate_profile, profile.ProfileValidate + ) + + def test_profile_delete(self): + self.verify_delete(self.proxy.delete_profile, profile.Profile, False) + + def test_profile_delete_ignore(self): + self.verify_delete(self.proxy.delete_profile, profile.Profile, True) + + def test_profile_find(self): + self.verify_find(self.proxy.find_profile, profile.Profile) + + def test_profile_get(self): + self.verify_get(self.proxy.get_profile, profile.Profile) + + def test_profiles(self): + self.verify_list( + self.proxy.profiles, + profile.Profile, + method_kwargs={'limit': 2}, + expected_kwargs={'limit': 2}, + ) + + def test_profile_update(self): + self.verify_update(self.proxy.update_profile, profile.Profile) + + def test_cluster_create(self): + self.verify_create(self.proxy.create_cluster, cluster.Cluster) + + def test_cluster_delete(self): + self.verify_delete(self.proxy.delete_cluster, cluster.Cluster, False) + + def test_cluster_delete_ignore(self): + self.verify_delete(self.proxy.delete_cluster, cluster.Cluster, True) + + def test_cluster_force_delete(self): + self._verify( + "openstack.clustering.v1.cluster.Cluster.force_delete", + self.proxy.delete_cluster, + method_args=["value", False, True], + expected_args=[self.proxy], + ) + + def test_cluster_find(self): + self.verify_find(self.proxy.find_cluster, cluster.Cluster) + + def test_cluster_get(self): + self.verify_get(self.proxy.get_cluster, cluster.Cluster) + + def test_clusters(self): + self.verify_list( + self.proxy.clusters, + cluster.Cluster, + method_kwargs={'limit': 2}, + expected_kwargs={'limit': 2}, + ) + + def test_cluster_update(self): + self.verify_update(self.proxy.update_cluster, cluster.Cluster) + + def test_services(self): + self.verify_list(self.proxy.services, service.Service) + + @mock.patch.object(proxy_base.Proxy, '_find') + def test_resize_cluster(self, mock_find): + mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') + mock_find.return_value = mock_cluster + self._verify( + "openstack.clustering.v1.cluster.Cluster.resize", + self.proxy.resize_cluster, + method_args=["FAKE_CLUSTER"], + method_kwargs={'k1': 'v1', 'k2': 'v2'}, + expected_args=[self.proxy], + expected_kwargs={'k1': 'v1', 'k2': 'v2'}, + ) + mock_find.assert_called_once_with( + cluster.Cluster, "FAKE_CLUSTER", ignore_missing=False + ) + + def test_resize_cluster_with_obj(self): + mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') + self._verify( + "openstack.clustering.v1.cluster.Cluster.resize", + self.proxy.resize_cluster, + method_args=[mock_cluster], + method_kwargs={'k1': 'v1', 'k2': 'v2'}, + expected_args=[self.proxy], + expected_kwargs={'k1': 'v1', 'k2': 'v2'}, + ) + + def test_collect_cluster_attrs(self): + self.verify_list( + self.proxy.collect_cluster_attrs, + cluster_attr.ClusterAttr, + method_args=['FAKE_ID', 'path.to.attr'], + expected_args=[], + expected_kwargs={'cluster_id': 'FAKE_ID', 'path': 'path.to.attr'}, + ) + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_cluster_check(self, mock_get): + mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') + mock_get.return_value = mock_cluster + self._verify( + "openstack.clustering.v1.cluster.Cluster.check", + self.proxy.check_cluster, + method_args=["FAKE_CLUSTER"], + expected_args=[self.proxy], + ) + mock_get.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_cluster_recover(self, mock_get): + mock_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') + mock_get.return_value = mock_cluster + self._verify( + "openstack.clustering.v1.cluster.Cluster.recover", + self.proxy.recover_cluster, + method_args=["FAKE_CLUSTER"], + expected_args=[self.proxy], + ) + mock_get.assert_called_once_with(cluster.Cluster, "FAKE_CLUSTER") + + def test_node_create(self): + self.verify_create(self.proxy.create_node, node.Node) + + def test_node_delete(self): + self.verify_delete(self.proxy.delete_node, node.Node, False) + + def test_node_delete_ignore(self): + self.verify_delete(self.proxy.delete_node, node.Node, True) + + def test_node_force_delete(self): + self._verify( + "openstack.clustering.v1.node.Node.force_delete", + self.proxy.delete_node, + method_args=["value", False, True], + expected_args=[self.proxy], + ) + + def test_node_find(self): + self.verify_find(self.proxy.find_node, node.Node) + + def test_node_get(self): + self.verify_get(self.proxy.get_node, node.Node) + + def test_node_get_with_details(self): + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_node, + method_args=['NODE_ID'], + method_kwargs={'details': True}, + expected_args=[node.NodeDetail], + expected_kwargs={'node_id': 'NODE_ID', 'requires_id': False}, + ) + + def test_nodes(self): + self.verify_list( + self.proxy.nodes, + node.Node, + method_kwargs={'limit': 2}, + expected_kwargs={'limit': 2}, + ) + + def test_node_update(self): + self.verify_update(self.proxy.update_node, node.Node) + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_node_check(self, mock_get): + mock_node = node.Node.new(id='FAKE_NODE') + mock_get.return_value = mock_node + self._verify( + "openstack.clustering.v1.node.Node.check", + self.proxy.check_node, + method_args=["FAKE_NODE"], + expected_args=[self.proxy], + ) + mock_get.assert_called_once_with(node.Node, "FAKE_NODE") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_node_recover(self, mock_get): + mock_node = node.Node.new(id='FAKE_NODE') + mock_get.return_value = mock_node + self._verify( + "openstack.clustering.v1.node.Node.recover", + self.proxy.recover_node, + method_args=["FAKE_NODE"], + expected_args=[self.proxy], + ) + mock_get.assert_called_once_with(node.Node, "FAKE_NODE") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_node_adopt(self, mock_get): + mock_node = node.Node.new() + mock_get.return_value = mock_node + self._verify( + "openstack.clustering.v1.node.Node.adopt", + self.proxy.adopt_node, + method_kwargs={"preview": False, "foo": "bar"}, + expected_args=[self.proxy], + expected_kwargs={"preview": False, "foo": "bar"}, + ) + + mock_get.assert_called_once_with(node.Node, None) + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_node_adopt_preview(self, mock_get): + mock_node = node.Node.new() + mock_get.return_value = mock_node + self._verify( + "openstack.clustering.v1.node.Node.adopt", + self.proxy.adopt_node, + method_kwargs={"preview": True, "foo": "bar"}, + expected_args=[self.proxy], + expected_kwargs={"preview": True, "foo": "bar"}, + ) + + mock_get.assert_called_once_with(node.Node, None) + + def test_policy_create(self): + self.verify_create(self.proxy.create_policy, policy.Policy) + + def test_policy_validate(self): + self.verify_create(self.proxy.validate_policy, policy.PolicyValidate) + + def test_policy_delete(self): + self.verify_delete(self.proxy.delete_policy, policy.Policy, False) + + def test_policy_delete_ignore(self): + self.verify_delete(self.proxy.delete_policy, policy.Policy, True) + + def test_policy_find(self): + self.verify_find(self.proxy.find_policy, policy.Policy) + + def test_policy_get(self): + self.verify_get(self.proxy.get_policy, policy.Policy) + + def test_policies(self): + self.verify_list( + self.proxy.policies, + policy.Policy, + method_kwargs={'limit': 2}, + expected_kwargs={'limit': 2}, + ) + + def test_policy_update(self): + self.verify_update(self.proxy.update_policy, policy.Policy) + + def test_cluster_policies(self): + self.verify_list( + self.proxy.cluster_policies, + cluster_policy.ClusterPolicy, + method_args=["FAKE_CLUSTER"], + expected_args=[], + expected_kwargs={"cluster_id": "FAKE_CLUSTER"}, + ) + + def test_get_cluster_policy(self): + fake_policy = cluster_policy.ClusterPolicy.new(id="FAKE_POLICY") + fake_cluster = cluster.Cluster.new(id='FAKE_CLUSTER') + + # ClusterPolicy object as input + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_cluster_policy, + method_args=[fake_policy, "FAKE_CLUSTER"], + expected_args=[cluster_policy.ClusterPolicy, fake_policy], + expected_kwargs={'cluster_id': 'FAKE_CLUSTER'}, + expected_result=fake_policy, + ) + + # Policy ID as input + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_cluster_policy, + method_args=["FAKE_POLICY", "FAKE_CLUSTER"], + expected_args=[cluster_policy.ClusterPolicy, "FAKE_POLICY"], + expected_kwargs={"cluster_id": "FAKE_CLUSTER"}, + ) + + # Cluster object as input + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_cluster_policy, + method_args=["FAKE_POLICY", fake_cluster], + expected_args=[cluster_policy.ClusterPolicy, "FAKE_POLICY"], + expected_kwargs={"cluster_id": fake_cluster}, + ) + + def test_receiver_create(self): + self.verify_create(self.proxy.create_receiver, receiver.Receiver) + + def test_receiver_update(self): + self.verify_update(self.proxy.update_receiver, receiver.Receiver) + + def test_receiver_delete(self): + self.verify_delete( + self.proxy.delete_receiver, receiver.Receiver, False + ) + + def test_receiver_delete_ignore(self): + self.verify_delete(self.proxy.delete_receiver, receiver.Receiver, True) + + def test_receiver_find(self): + self.verify_find(self.proxy.find_receiver, receiver.Receiver) + + def test_receiver_get(self): + self.verify_get(self.proxy.get_receiver, receiver.Receiver) + + def test_receivers(self): + self.verify_list( + self.proxy.receivers, + receiver.Receiver, + method_kwargs={'limit': 2}, + expected_kwargs={'limit': 2}, + ) + + def test_action_get(self): + self.verify_get(self.proxy.get_action, action.Action) + + def test_actions(self): + self.verify_list( + self.proxy.actions, + action.Action, + method_kwargs={'limit': 2}, + expected_kwargs={'limit': 2}, + ) + + def test_action_update(self): + self.verify_update(self.proxy.update_action, action.Action) + + def test_event_get(self): + self.verify_get(self.proxy.get_event, event.Event) + + def test_events(self): + self.verify_list( + self.proxy.events, + event.Event, + method_kwargs={'limit': 2}, + expected_kwargs={'limit': 2}, + ) + + @mock.patch("openstack.resource.wait_for_status") + def test_wait_for(self, mock_wait): + mock_resource = mock.Mock() + mock_wait.return_value = mock_resource + + self.proxy.wait_for_status(mock_resource, 'ACTIVE') + + mock_wait.assert_called_once_with( + self.proxy, mock_resource, 'ACTIVE', None, 2, None, 'status', None + ) + + @mock.patch("openstack.resource.wait_for_status") + def test_wait_for_params(self, mock_wait): + mock_resource = mock.Mock() + mock_wait.return_value = mock_resource + + self.proxy.wait_for_status(mock_resource, 'ACTIVE', ['ERROR'], 1, 2) + + mock_wait.assert_called_once_with( + self.proxy, + mock_resource, + 'ACTIVE', + ['ERROR'], + 1, + 2, + 'status', + None, + ) + + @mock.patch("openstack.resource.wait_for_delete") + def test_wait_for_delete(self, mock_wait): + mock_resource = mock.Mock() + mock_wait.return_value = mock_resource + + self.proxy.wait_for_delete(mock_resource) + + mock_wait.assert_called_once_with( + self.proxy, mock_resource, 2, 120, None + ) + + @mock.patch("openstack.resource.wait_for_delete") + def test_wait_for_delete_params(self, mock_wait): + mock_resource = mock.Mock() + mock_wait.return_value = mock_resource + + self.proxy.wait_for_delete(mock_resource, 1, 2) + + mock_wait.assert_called_once_with( + self.proxy, mock_resource, 1, 2, None + ) + + def test_get_cluster_metadata(self): + self._verify( + "openstack.clustering.v1.cluster.Cluster.fetch_metadata", + self.proxy.get_cluster_metadata, + method_args=["value"], + expected_args=[self.proxy], + expected_result=cluster.Cluster(id="value", metadata={}), + ) + + def test_set_cluster_metadata(self): + kwargs = {"a": "1", "b": "2"} + id = "an_id" + self._verify( + "openstack.clustering.v1.cluster.Cluster.set_metadata", + self.proxy.set_cluster_metadata, + method_args=[id], + method_kwargs=kwargs, + method_result=cluster.Cluster.existing(id=id, metadata=kwargs), + expected_args=[self.proxy], + expected_kwargs={'metadata': kwargs}, + expected_result=cluster.Cluster.existing(id=id, metadata=kwargs), + ) + + def test_delete_cluster_metadata(self): + self._verify( + "openstack.clustering.v1.cluster.Cluster.delete_metadata_item", + self.proxy.delete_cluster_metadata, + expected_result=None, + method_args=["value", ["key"]], + expected_args=[self.proxy, "key"], + ) diff --git a/openstack/tests/unit/cluster/v1/test_receiver.py b/openstack/tests/unit/clustering/v1/test_receiver.py similarity index 84% rename from openstack/tests/unit/cluster/v1/test_receiver.py rename to openstack/tests/unit/clustering/v1/test_receiver.py index b566c7976d..43c61b6d9d 100644 --- a/openstack/tests/unit/cluster/v1/test_receiver.py +++ b/openstack/tests/unit/clustering/v1/test_receiver.py @@ -10,9 +10,8 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - -from openstack.cluster.v1 import receiver +from openstack.clustering.v1 import receiver +from openstack.tests.unit import base FAKE_ID = 'ae63a10b-4a90-452c-aef1-113a0b255ee3' @@ -27,10 +26,7 @@ 'created_at': '2015-10-10T12:46:36.000000', 'updated_at': '2016-10-10T12:46:36.000000', 'actor': {}, - 'params': { - 'adjustment_type': 'CHANGE_IN_CAPACITY', - 'adjustment': 2 - }, + 'params': {'adjustment_type': 'CHANGE_IN_CAPACITY', 'adjustment': 2}, 'channel': { 'alarm_url': 'http://host:port/webhooks/AN_ID/trigger?V=1', }, @@ -40,20 +36,18 @@ } -class TestReceiver(testtools.TestCase): - +class TestReceiver(base.TestCase): def setUp(self): - super(TestReceiver, self).setUp() + super().setUp() def test_basic(self): sot = receiver.Receiver() self.assertEqual('receiver', sot.resource_key) self.assertEqual('receivers', sot.resources_key) self.assertEqual('/receivers', sot.base_path) - self.assertEqual('clustering', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/clustering/v1/test_service.py b/openstack/tests/unit/clustering/v1/test_service.py new file mode 100644 index 0000000000..303603465d --- /dev/null +++ b/openstack/tests/unit/clustering/v1/test_service.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.clustering.v1 import service +from openstack.tests.unit import base + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'binary': 'senlin-engine', + 'host': 'host1', + 'status': 'enabled', + 'state': 'up', + 'disabled_reason': None, + 'updated_at': '2016-10-10T12:46:36.000000', +} + + +class TestService(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.sess = mock.Mock() + self.sess.put = mock.Mock(return_value=self.resp) + + def test_basic(self): + sot = service.Service() + self.assertEqual('service', sot.resource_key) + self.assertEqual('services', sot.resources_key) + self.assertEqual('/services', sot.base_path) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = service.Service(**EXAMPLE) + self.assertEqual(EXAMPLE['host'], sot.host) + self.assertEqual(EXAMPLE['binary'], sot.binary) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['state'], sot.state) + self.assertEqual(EXAMPLE['disabled_reason'], sot.disabled_reason) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/common/__init__.py b/openstack/tests/unit/common/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/common/test_metadata.py b/openstack/tests/unit/common/test_metadata.py new file mode 100644 index 0000000000..839bbd3cfe --- /dev/null +++ b/openstack/tests/unit/common/test_metadata.py @@ -0,0 +1,197 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.common import metadata +from openstack import exceptions +from openstack import resource +from openstack.tests.unit import base +from openstack.tests.unit.test_resource import FakeResponse + +IDENTIFIER = 'IDENTIFIER' + + +class TestMetadata(base.TestCase): + def setUp(self): + super().setUp() + + self.service_name = "service" + self.base_path = "base_path" + + self.metadata_result = {"metadata": {"go": "cubs", "boo": "sox"}} + self.meta_result = {"meta": {"oh": "yeah"}} + + class Test(resource.Resource, metadata.MetadataMixin): + service = self.service_name + base_path = self.base_path + resources_key = 'resources' + allow_create = True + allow_fetch = True + allow_head = True + allow_commit = True + allow_delete = True + allow_list = True + + self.test_class = Test + + self.request = mock.Mock(spec=resource._Request) + self.request.url = "uri" + self.request.body = "body" + self.request.headers = "headers" + + self.response = FakeResponse({}) + + self.sot = Test.new(id="id") + self.sot._prepare_request = mock.Mock(return_value=self.request) + self.sot._translate_response = mock.Mock() + + self.session = mock.Mock(spec=adapter.Adapter) + self.session.get = mock.Mock(return_value=self.response) + self.session.put = mock.Mock(return_value=self.response) + self.session.post = mock.Mock(return_value=self.response) + self.session.delete = mock.Mock(return_value=self.response) + + def test_metadata_attribute(self): + res = self.sot + self.assertTrue(hasattr(res, 'metadata')) + + def test_get_metadata(self): + res = self.sot + + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.return_value = {'metadata': {'foo': 'bar'}} + + self.session.get.side_effect = [mock_response] + + result = res.fetch_metadata(self.session) + # Check metadata attribute is updated + self.assertDictEqual({'foo': 'bar'}, result.metadata) + # Check passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/metadata' + self.session.get.assert_called_once_with(url) + + def test_set_metadata(self): + res = self.sot + + result = res.set_metadata(self.session, {'foo': 'bar'}) + # Check metadata attribute is updated + self.assertDictEqual({'foo': 'bar'}, res.metadata) + # Check passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/metadata' + self.session.post.assert_called_once_with( + url, json={'metadata': {'foo': 'bar'}} + ) + + def test_replace_metadata(self): + res = self.sot + + result = res.replace_metadata(self.session, {'foo': 'bar'}) + # Check metadata attribute is updated + self.assertDictEqual({'foo': 'bar'}, res.metadata) + # Check passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/metadata' + self.session.put.assert_called_once_with( + url, json={'metadata': {'foo': 'bar'}} + ) + + def test_delete_all_metadata(self): + res = self.sot + + # Set some initial value to check removal + res.metadata = {'foo': 'bar'} + + result = res.delete_metadata(self.session) + # Check metadata attribute is updated + self.assertEqual({}, res.metadata) + # Check passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/metadata' + self.session.put.assert_called_once_with(url, json={'metadata': {}}) + + def test_get_metadata_item(self): + res = self.sot + + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {'meta': {'foo': 'bar'}} + self.session.get.side_effect = [mock_response] + + result = res.get_metadata_item(self.session, 'foo') + # Check tags attribute is updated + self.assertEqual({'foo': 'bar'}, res.metadata) + # Check the passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/metadata/foo' + self.session.get.assert_called_once_with(url) + + def test_delete_single_item(self): + res = self.sot + + res.metadata = {'foo': 'bar', 'foo2': 'bar2'} + + result = res.delete_metadata_item(self.session, 'foo2') + # Check metadata attribute is updated + self.assertEqual({'foo': 'bar'}, res.metadata) + # Check passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/metadata/foo2' + self.session.delete.assert_called_once_with(url) + + def test_delete_signle_item_empty(self): + res = self.sot + + result = res.delete_metadata_item(self.session, 'foo2') + # Check metadata attribute is updated + self.assertEqual({}, res.metadata) + # Check passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/metadata/foo2' + self.session.delete.assert_called_once_with(url) + + def test_get_metadata_item_not_exists(self): + res = self.sot + + mock_response = mock.Mock() + mock_response.status_code = 404 + mock_response.content = None + self.session.get.side_effect = [mock_response] + + # ensure we get 404 + self.assertRaises( + exceptions.NotFoundException, + res.get_metadata_item, + self.session, + 'dummy', + ) + + def test_set_metadata_item(self): + res = self.sot + + # Set some initial value to check add + res.metadata = {'foo': 'bar'} + + result = res.set_metadata_item(self.session, 'foo', 'black') + # Check metadata attribute is updated + self.assertEqual({'foo': 'black'}, res.metadata) + # Check passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/metadata/foo' + self.session.put.assert_called_once_with( + url, json={'meta': {'foo': 'black'}} + ) diff --git a/openstack/tests/unit/common/test_quota_set.py b/openstack/tests/unit/common/test_quota_set.py new file mode 100644 index 0000000000..c686e7bba2 --- /dev/null +++ b/openstack/tests/unit/common/test_quota_set.py @@ -0,0 +1,153 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import copy +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.common import quota_set as _qs +from openstack.tests.unit import base + + +BASIC_EXAMPLE = { + "backup_gigabytes": 1000, + "backups": 10, + "gigabytes___DEFAULT__": -1, +} + +USAGE_EXAMPLE = { + "backup_gigabytes": {"in_use": 0, "limit": 1000, "reserved": 0}, + "backups": {"in_use": 0, "limit": 10, "reserved": 0}, + "gigabytes___DEFAULT__": {"in_use": 0, "limit": -1, "reserved": 0}, +} + + +class TestQuotaSet(base.TestCase): + def setUp(self): + super().setUp() + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = 1 + self.sess._get_connection = mock.Mock(return_value=self.cloud) + self.sess.retriable_status_codes = set() + + def test_basic(self): + sot = _qs.QuotaSet() + self.assertEqual('quota_set', sot.resource_key) + self.assertIsNone(sot.resources_key) + self.assertEqual('/os-quota-sets/%(project_id)s', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_delete) + self.assertFalse(sot.allow_list) + self.assertTrue(sot.allow_commit) + + self.assertDictEqual( + {"usage": "usage", "limit": "limit", "marker": "marker"}, + sot._query_mapping._mapping, + ) + + def test_make_basic(self): + sot = _qs.QuotaSet(**BASIC_EXAMPLE) + + self.assertEqual(BASIC_EXAMPLE['backups'], sot.backups) + + def test_get(self): + sot = _qs.QuotaSet(project_id='proj') + + resp = mock.Mock() + resp.body = {'quota_set': copy.deepcopy(BASIC_EXAMPLE)} + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + resp.headers = {} + self.sess.get = mock.Mock(return_value=resp) + + sot.fetch(self.sess) + + self.sess.get.assert_called_with( + '/os-quota-sets/proj', microversion=1, params={}, skip_cache=False + ) + + self.assertEqual(BASIC_EXAMPLE['backups'], sot.backups) + self.assertEqual({}, sot.reservation) + self.assertEqual({}, sot.usage) + + def test_get_usage(self): + sot = _qs.QuotaSet(project_id='proj') + + resp = mock.Mock() + resp.body = {'quota_set': copy.deepcopy(USAGE_EXAMPLE)} + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + resp.headers = {} + self.sess.get = mock.Mock(return_value=resp) + + sot.fetch(self.sess, usage=True) + + self.sess.get.assert_called_with( + '/os-quota-sets/proj', + microversion=1, + params={'usage': True}, + skip_cache=False, + ) + + self.assertEqual(USAGE_EXAMPLE['backups']['limit'], sot.backups) + + def test_update_quota(self): + # Use QuotaSet as if it was returned by get(usage=True) + sot = _qs.QuotaSet.existing( + project_id='proj', + reservation={'a': 'b'}, + usage={'c': 'd'}, + foo='bar', + ) + + resp = mock.Mock() + resp.body = {'quota_set': copy.deepcopy(BASIC_EXAMPLE)} + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + resp.headers = {} + self.sess.put = mock.Mock(return_value=resp) + + sot._update(reservation={'b': 'd'}, backups=15, something_else=20) + + sot.commit(self.sess) + + self.sess.put.assert_called_with( + '/os-quota-sets/proj', + microversion=1, + headers={}, + json={'quota_set': {'backups': 15, 'something_else': 20}}, + ) + + def test_delete_quota(self): + # Use QuotaSet as if it was returned by get(usage=True) + sot = _qs.QuotaSet.existing( + project_id='proj', + reservation={'a': 'b'}, + usage={'c': 'd'}, + foo='bar', + ) + + resp = mock.Mock() + resp.body = None + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + resp.headers = {} + self.sess.delete = mock.Mock(return_value=resp) + + sot.delete(self.sess) + + self.sess.delete.assert_called_with( + '/os-quota-sets/proj', + microversion=1, + headers={}, + ) diff --git a/openstack/tests/unit/common/test_tag.py b/openstack/tests/unit/common/test_tag.py new file mode 100644 index 0000000000..e61d04f068 --- /dev/null +++ b/openstack/tests/unit/common/test_tag.py @@ -0,0 +1,267 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.common import tag +from openstack import exceptions +from openstack import resource +from openstack.tests.unit import base +from openstack.tests.unit.test_resource import FakeResponse + + +class TestTagMixin(base.TestCase): + def setUp(self): + super().setUp() + + self.service_name = "service" + self.base_path = "base_path" + + class Test(resource.Resource, tag.TagMixin): + service = self.service_name + base_path = self.base_path + resources_key = 'resources' + allow_create = True + allow_fetch = True + allow_head = True + allow_commit = True + allow_delete = True + allow_list = True + + self.test_class = Test + + self.request = mock.Mock(spec=resource._Request) + self.request.url = "uri" + self.request.body = "body" + self.request.headers = "headers" + + self.response = FakeResponse({}) + + self.sot = Test.new(id="id", tags=[]) + self.sot._prepare_request = mock.Mock(return_value=self.request) + self.sot._translate_response = mock.Mock() + self.sot._get_microversion = mock.Mock(return_value=None) + + self.session = mock.Mock(spec=adapter.Adapter) + self.session.get = mock.Mock(return_value=self.response) + self.session.put = mock.Mock(return_value=self.response) + self.session.delete = mock.Mock(return_value=self.response) + + def test_tags_attribute(self): + res = self.sot + self.assertTrue(hasattr(res, 'tags')) + self.assertIsInstance(res.tags, list) + + def test_fetch_tags(self): + res = self.sot + sess = self.session + + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.return_value = {'tags': ['blue1', 'green1']} + + sess.get.side_effect = [mock_response] + + result = res.fetch_tags(sess) + # Check tags attribute is updated + self.assertEqual(['blue1', 'green1'], res.tags) + # Check the passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags' + sess.get.assert_called_once_with(url, microversion=None) + + def test_set_tags(self): + res = self.sot + sess = self.session + + # Set some initial value to check rewrite + res.tags = ['blue_old', 'green_old'] + + result = res.set_tags(sess, ['blue', 'green']) + # Check tags attribute is updated + self.assertEqual(['blue', 'green'], res.tags) + # Check the passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags' + sess.put.assert_called_once_with( + url, json={'tags': ['blue', 'green']}, microversion=None + ) + + def test_remove_all_tags(self): + res = self.sot + sess = self.session + + # Set some initial value to check removal + res.tags = ['blue_old', 'green_old'] + + result = res.remove_all_tags(sess) + # Check tags attribute is updated + self.assertEqual([], res.tags) + # Check the passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags' + sess.delete.assert_called_once_with(url, microversion=None) + + def test_remove_single_tag(self): + res = self.sot + sess = self.session + + res.tags = ['blue', 'dummy'] + + result = res.remove_tag(sess, 'dummy') + # Check tags attribute is updated + self.assertEqual(['blue'], res.tags) + # Check the passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags/dummy' + sess.delete.assert_called_once_with(url, microversion=None) + + def test_check_tag_exists(self): + res = self.sot + sess = self.session + + sess.get.side_effect = [FakeResponse(None, 202)] + + result = res.check_tag(sess, 'blue') + # Check tags attribute is updated + self.assertEqual([], res.tags) + # Check the passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags/blue' + sess.get.assert_called_once_with(url, microversion=None) + + def test_check_tag_not_exists(self): + res = self.sot + sess = self.session + + mock_response = mock.Mock() + mock_response.status_code = 404 + mock_response.links = {} + mock_response.content = None + + sess.get.side_effect = [mock_response] + + # ensure we get 404 + self.assertRaises( + exceptions.NotFoundException, + res.check_tag, + sess, + 'dummy', + ) + + def test_add_tag(self): + res = self.sot + sess = self.session + + # Set some initial value to check add + res.tags = ['blue', 'green'] + + result = res.add_tag(sess, 'lila') + # Check tags attribute is updated + self.assertEqual(['blue', 'green', 'lila'], res.tags) + # Check the passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags/lila' + sess.put.assert_called_once_with(url, microversion=None) + + def test_add_tag_with_microversion(self): + res = self.sot + res._get_microversion = mock.Mock(return_value='2.26') + sess = self.session + + res.tags = ['blue', 'green'] + + result = res.add_tag(sess, 'lila') + self.assertEqual(['blue', 'green', 'lila'], res.tags) + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags/lila' + sess.put.assert_called_once_with(url, microversion='2.26') + + def test_remove_single_tag_with_microversion(self): + res = self.sot + res._get_microversion = mock.Mock(return_value='2.26') + sess = self.session + + res.tags = ['blue', 'dummy'] + + result = res.remove_tag(sess, 'dummy') + self.assertEqual(['blue'], res.tags) + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags/dummy' + sess.delete.assert_called_once_with(url, microversion='2.26') + + def test_fetch_tags_with_microversion(self): + res = self.sot + res._get_microversion = mock.Mock(return_value='2.26') + sess = self.session + + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.return_value = {'tags': ['blue1', 'green1']} + + sess.get.side_effect = [mock_response] + + result = res.fetch_tags(sess) + self.assertEqual(['blue1', 'green1'], res.tags) + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags' + sess.get.assert_called_once_with(url, microversion='2.26') + + def test_set_tags_with_microversion(self): + res = self.sot + res._get_microversion = mock.Mock(return_value='2.26') + sess = self.session + + res.tags = ['blue_old', 'green_old'] + + result = res.set_tags(sess, ['blue', 'green']) + self.assertEqual(['blue', 'green'], res.tags) + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags' + sess.put.assert_called_once_with( + url, json={'tags': ['blue', 'green']}, microversion='2.26' + ) + + def test_remove_all_tags_with_microversion(self): + res = self.sot + res._get_microversion = mock.Mock(return_value='2.26') + sess = self.session + + res.tags = ['blue_old', 'green_old'] + + result = res.remove_all_tags(sess) + self.assertEqual([], res.tags) + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags' + sess.delete.assert_called_once_with(url, microversion='2.26') + + def test_check_tag_with_microversion(self): + res = self.sot + res._get_microversion = mock.Mock(return_value='2.26') + sess = self.session + + sess.get.side_effect = [FakeResponse(None, 202)] + + result = res.check_tag(sess, 'blue') + self.assertEqual([], res.tags) + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags/blue' + sess.get.assert_called_once_with(url, microversion='2.26') + + def test_tagged_resource_always_created_with_empty_tag_list(self): + res = self.sot + + self.assertIsNotNone(res.tags) + self.assertEqual(res.tags, list()) diff --git a/openstack/tests/unit/compute/test_compute_service.py b/openstack/tests/unit/compute/test_compute_service.py deleted file mode 100644 index 3c5b26c8fb..0000000000 --- a/openstack/tests/unit/compute/test_compute_service.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.compute import compute_service - - -class TestComputeService(testtools.TestCase): - - def test_service(self): - sot = compute_service.ComputeService() - self.assertEqual('compute', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(1, len(sot.valid_versions)) - self.assertEqual('v2', sot.valid_versions[0].module) - self.assertEqual('v2', sot.valid_versions[0].path) diff --git a/openstack/tests/unit/compute/test_version.py b/openstack/tests/unit/compute/test_version.py index 4e0ce5ec3a..f33ca9f57d 100644 --- a/openstack/tests/unit/compute/test_version.py +++ b/openstack/tests/unit/compute/test_version.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.compute import version +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,22 +23,20 @@ } -class TestVersion(testtools.TestCase): - +class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) - self.assertEqual('compute', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): - sot = version.Version(EXAMPLE) + sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) diff --git a/openstack/tests/unit/compute/v2/test_aggregate.py b/openstack/tests/unit/compute/v2/test_aggregate.py new file mode 100644 index 0000000000..dec21ca906 --- /dev/null +++ b/openstack/tests/unit/compute/v2/test_aggregate.py @@ -0,0 +1,107 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.compute.v2 import aggregate +from openstack.tests.unit import base + +IDENTIFIER = 'IDENTIFIER' + +EXAMPLE = { + "name": "m-family", + "availability_zone": None, + "deleted": False, + "created_at": "2018-07-06T14:58:16.000000", + "updated_at": None, + "hosts": ["oscomp-m001", "oscomp-m002", "oscomp-m003"], + "deleted_at": None, + "id": 4, + "uuid": IDENTIFIER, + "metadata": {"type": "public", "family": "m-family"}, +} + + +class TestAggregate(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = EXAMPLE.copy() + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.status_code = 200 + self.resp.headers = {'Accept': ''} + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.post = mock.Mock(return_value=self.resp) + + def test_basic(self): + sot = aggregate.Aggregate() + self.assertEqual('aggregate', sot.resource_key) + self.assertEqual('aggregates', sot.resources_key) + self.assertEqual('/os-aggregates', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = aggregate.Aggregate(**EXAMPLE) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['availability_zone'], sot.availability_zone) + self.assertEqual(EXAMPLE['deleted'], sot.is_deleted) + self.assertEqual(EXAMPLE['deleted_at'], sot.deleted_at) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE['hosts'], sot.hosts) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['uuid'], sot.uuid) + self.assertDictEqual(EXAMPLE['metadata'], sot.metadata) + + def test_add_host(self): + sot = aggregate.Aggregate(**EXAMPLE) + + sot.add_host(self.sess, 'host1') + + url = 'os-aggregates/4/action' + body = {"add_host": {"host": "host1"}} + self.sess.post.assert_called_with(url, json=body, microversion=None) + + def test_remove_host(self): + sot = aggregate.Aggregate(**EXAMPLE) + + sot.remove_host(self.sess, 'host1') + + url = 'os-aggregates/4/action' + body = {"remove_host": {"host": "host1"}} + self.sess.post.assert_called_with(url, json=body, microversion=None) + + def test_set_metadata(self): + sot = aggregate.Aggregate(**EXAMPLE) + + sot.set_metadata(self.sess, {"key: value"}) + + url = 'os-aggregates/4/action' + body = {"set_metadata": {"metadata": {"key: value"}}} + self.sess.post.assert_called_with(url, json=body, microversion=None) + + def test_precache_image(self): + sot = aggregate.Aggregate(**EXAMPLE) + + sot.precache_images(self.sess, ['1']) + + url = 'os-aggregates/4/images' + body = {"cache": ['1']} + self.sess.post.assert_called_with( + url, json=body, microversion=sot._max_microversion + ) diff --git a/openstack/tests/unit/compute/v2/test_availability_zone.py b/openstack/tests/unit/compute/v2/test_availability_zone.py index 4d4abe0999..c71d96f000 100644 --- a/openstack/tests/unit/compute/v2/test_availability_zone.py +++ b/openstack/tests/unit/compute/v2/test_availability_zone.py @@ -10,34 +10,25 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.compute.v2 import availability_zone as az +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' BASIC_EXAMPLE = { 'id': IDENTIFIER, 'zoneState': 'available', 'hosts': 'host1', - 'zoneName': 'zone1' + 'zoneName': 'zone1', } -class TestAvailabilityZone(testtools.TestCase): - +class TestAvailabilityZone(base.TestCase): def test_basic(self): sot = az.AvailabilityZone() self.assertEqual('availabilityZoneInfo', sot.resources_key) self.assertEqual('/os-availability-zone', sot.base_path) self.assertTrue(sot.allow_list) - self.assertEqual('compute', sot.service.service_type) - - def test_basic_detail(self): - sot = az.AvailabilityZoneDetail() - self.assertEqual('availabilityZoneInfo', sot.resources_key) - self.assertEqual('/os-availability-zone/detail', sot.base_path) - self.assertTrue(sot.allow_list) - self.assertEqual('compute', sot.service.service_type) def test_make_basic(self): sot = az.AvailabilityZone(**BASIC_EXAMPLE) diff --git a/openstack/tests/unit/compute/v2/test_extension.py b/openstack/tests/unit/compute/v2/test_extension.py index 8d59084b2e..421cbd3aff 100644 --- a/openstack/tests/unit/compute/v2/test_extension.py +++ b/openstack/tests/unit/compute/v2/test_extension.py @@ -10,32 +10,30 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.compute.v2 import extension +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'alias': '1', 'description': '2', - 'links': '3', + 'links': [], 'name': '4', 'namespace': '5', 'updated': '2015-03-09T12:14:57.233772', } -class TestExtension(testtools.TestCase): - +class TestExtension(base.TestCase): def test_basic(self): sot = extension.Extension() self.assertEqual('extension', sot.resource_key) self.assertEqual('extensions', sot.resources_key) self.assertEqual('/extensions', sot.base_path) - self.assertEqual('compute', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/compute/v2/test_flavor.py b/openstack/tests/unit/compute/v2/test_flavor.py index dba4a4aaaf..8b5d2dc210 100644 --- a/openstack/tests/unit/compute/v2/test_flavor.py +++ b/openstack/tests/unit/compute/v2/test_flavor.py @@ -10,15 +10,20 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools +from unittest import mock + +from keystoneauth1 import adapter from openstack.compute.v2 import flavor +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' BASIC_EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'name': '3', + 'description': 'Testing flavor', 'disk': 4, 'os-flavor-access:is_public': True, 'ram': 6, @@ -26,57 +31,232 @@ 'swap': 8, 'OS-FLV-EXT-DATA:ephemeral': 9, 'OS-FLV-DISABLED:disabled': False, - 'rxtx_factor': 11.0 + 'rxtx_factor': 11.0, +} +DEFAULTS_EXAMPLE = { + 'links': '2', + 'original_name': IDENTIFIER, + 'description': 'Testing flavor', } -class TestFlavor(testtools.TestCase): +class TestFlavor(base.TestCase): + def setUp(self): + super().setUp() + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = 1 + self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_basic(self): sot = flavor.Flavor() self.assertEqual('flavor', sot.resource_key) self.assertEqual('flavors', sot.resources_key) self.assertEqual('/flavors', sot.base_path) - self.assertEqual('compute', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) + self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_commit) - self.assertDictEqual({"sort_key": "sort_key", - "sort_dir": "sort_dir", - "min_disk": "minDisk", - "min_ram": "minRam", - "limit": "limit", - "marker": "marker"}, - sot._query_mapping._mapping) + self.assertDictEqual( + { + "sort_key": "sort_key", + "sort_dir": "sort_dir", + "min_disk": "minDisk", + "min_ram": "minRam", + "limit": "limit", + "marker": "marker", + "is_public": "is_public", + }, + sot._query_mapping._mapping, + ) def test_make_basic(self): sot = flavor.Flavor(**BASIC_EXAMPLE) self.assertEqual(BASIC_EXAMPLE['id'], sot.id) - self.assertEqual(BASIC_EXAMPLE['links'], sot.links) self.assertEqual(BASIC_EXAMPLE['name'], sot.name) + self.assertEqual(BASIC_EXAMPLE['description'], sot.description) self.assertEqual(BASIC_EXAMPLE['disk'], sot.disk) - self.assertEqual(BASIC_EXAMPLE['os-flavor-access:is_public'], - sot.is_public) + self.assertEqual( + BASIC_EXAMPLE['os-flavor-access:is_public'], sot.is_public + ) self.assertEqual(BASIC_EXAMPLE['ram'], sot.ram) self.assertEqual(BASIC_EXAMPLE['vcpus'], sot.vcpus) self.assertEqual(BASIC_EXAMPLE['swap'], sot.swap) - self.assertEqual(BASIC_EXAMPLE['OS-FLV-EXT-DATA:ephemeral'], - sot.ephemeral) - self.assertEqual(BASIC_EXAMPLE['OS-FLV-DISABLED:disabled'], - sot.is_disabled) + self.assertEqual( + BASIC_EXAMPLE['OS-FLV-EXT-DATA:ephemeral'], sot.ephemeral + ) + self.assertEqual( + BASIC_EXAMPLE['OS-FLV-DISABLED:disabled'], sot.is_disabled + ) self.assertEqual(BASIC_EXAMPLE['rxtx_factor'], sot.rxtx_factor) - def test_detail(self): - sot = flavor.FlavorDetail() - self.assertEqual('flavor', sot.resource_key) - self.assertEqual('flavors', sot.resources_key) - self.assertEqual('/flavors/detail', sot.base_path) - self.assertEqual('compute', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) + def test_make_basic_swap(self): + sot = flavor.Flavor(id=IDENTIFIER, swap="") + self.assertEqual(0, sot.swap) + sot1 = flavor.Flavor(id=IDENTIFIER, swap=0) + self.assertEqual(0, sot1.swap) + + def test_make_defaults(self): + sot = flavor.Flavor(**DEFAULTS_EXAMPLE) + self.assertEqual(DEFAULTS_EXAMPLE['original_name'], sot.name) + self.assertEqual(0, sot.disk) + self.assertEqual(True, sot.is_public) + self.assertEqual(0, sot.ram) + self.assertEqual(0, sot.vcpus) + self.assertEqual(0, sot.swap) + self.assertEqual(0, sot.ephemeral) + self.assertEqual(IDENTIFIER, sot.id) + + def test_flavor_id(self): + id = 'fake_id' + sot = flavor.Flavor(id=id) + self.assertEqual(sot.id, id) + sot = flavor.Flavor(name=id) + self.assertEqual(sot.id, id) + self.assertEqual(sot.name, id) + sot = flavor.Flavor(original_name=id) + self.assertEqual(sot.id, id) + self.assertEqual(sot.original_name, id) + + def test_add_tenant_access(self): + sot = flavor.Flavor(**BASIC_EXAMPLE) + resp = mock.Mock() + resp.body = None + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.post = mock.Mock(return_value=resp) + + sot.add_tenant_access(self.sess, 'fake_tenant') + + self.sess.post.assert_called_with( + 'flavors/IDENTIFIER/action', + json={'addTenantAccess': {'tenant': 'fake_tenant'}}, + headers={'Accept': ''}, + ) + + def test_remove_tenant_access(self): + sot = flavor.Flavor(**BASIC_EXAMPLE) + resp = mock.Mock() + resp.body = None + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.post = mock.Mock(return_value=resp) + + sot.remove_tenant_access(self.sess, 'fake_tenant') + + self.sess.post.assert_called_with( + 'flavors/IDENTIFIER/action', + json={'removeTenantAccess': {'tenant': 'fake_tenant'}}, + headers={'Accept': ''}, + ) + + def test_get_flavor_access(self): + sot = flavor.Flavor(**BASIC_EXAMPLE) + resp = mock.Mock() + resp.body = { + 'flavor_access': [ + {'flavor_id': 'fake_flavor', 'tenant_id': 'fake_tenant'} + ] + } + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.get = mock.Mock(return_value=resp) + + rsp = sot.get_access(self.sess) + + self.sess.get.assert_called_with( + 'flavors/IDENTIFIER/os-flavor-access', + ) + + self.assertEqual(resp.body['flavor_access'], rsp) + + def test_fetch_extra_specs(self): + sot = flavor.Flavor(**BASIC_EXAMPLE) + resp = mock.Mock() + resp.body = {'extra_specs': {'a': 'b', 'c': 'd'}} + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.get = mock.Mock(return_value=resp) + + rsp = sot.fetch_extra_specs(self.sess) + + self.sess.get.assert_called_with( + 'flavors/IDENTIFIER/os-extra_specs', + microversion=self.sess.default_microversion, + ) + + self.assertEqual(resp.body['extra_specs'], rsp.extra_specs) + self.assertIsInstance(rsp, flavor.Flavor) + + def test_create_extra_specs(self): + sot = flavor.Flavor(**BASIC_EXAMPLE) + specs = {'a': 'b', 'c': 'd'} + resp = mock.Mock() + resp.body = {'extra_specs': specs} + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.post = mock.Mock(return_value=resp) + + rsp = sot.create_extra_specs(self.sess, specs) + + self.sess.post.assert_called_with( + 'flavors/IDENTIFIER/os-extra_specs', + json={'extra_specs': specs}, + microversion=self.sess.default_microversion, + ) + + self.assertEqual(resp.body['extra_specs'], rsp.extra_specs) + self.assertIsInstance(rsp, flavor.Flavor) + + def test_get_extra_specs_property(self): + sot = flavor.Flavor(**BASIC_EXAMPLE) + resp = mock.Mock() + resp.body = {'a': 'b'} + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.get = mock.Mock(return_value=resp) + + rsp = sot.get_extra_specs_property(self.sess, 'a') + + self.sess.get.assert_called_with( + 'flavors/IDENTIFIER/os-extra_specs/a', + microversion=self.sess.default_microversion, + ) + + self.assertEqual('b', rsp) + + def test_update_extra_specs_property(self): + sot = flavor.Flavor(**BASIC_EXAMPLE) + resp = mock.Mock() + resp.body = {'a': 'b'} + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.put = mock.Mock(return_value=resp) + + rsp = sot.update_extra_specs_property(self.sess, 'a', 'b') + + self.sess.put.assert_called_with( + 'flavors/IDENTIFIER/os-extra_specs/a', + json={'a': 'b'}, + microversion=self.sess.default_microversion, + ) + + self.assertEqual('b', rsp) + + def test_delete_extra_specs_property(self): + sot = flavor.Flavor(**BASIC_EXAMPLE) + resp = mock.Mock() + resp.body = None + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.delete = mock.Mock(return_value=resp) + + rsp = sot.delete_extra_specs_property(self.sess, 'a') + + self.sess.delete.assert_called_with( + 'flavors/IDENTIFIER/os-extra_specs/a', + microversion=self.sess.default_microversion, + ) + + self.assertIsNone(rsp) diff --git a/openstack/tests/unit/compute/v2/test_hypervisor.py b/openstack/tests/unit/compute/v2/test_hypervisor.py index 04829c0814..88f4a88c7c 100644 --- a/openstack/tests/unit/compute/v2/test_hypervisor.py +++ b/openstack/tests/unit/compute/v2/test_hypervisor.py @@ -10,56 +10,104 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools +import copy +from unittest import mock + +from keystoneauth1 import adapter from openstack.compute.v2 import hypervisor +from openstack import exceptions +from openstack.tests.unit import base + EXAMPLE = { + "cpu_info": { + "arch": "x86_64", + "model": "Nehalem", + "vendor": "Intel", + "features": ["pge", "clflush"], + "topology": {"cores": 1, "threads": 1, "sockets": 4}, + }, + "state": "up", "status": "enabled", + "servers": [ + { + "name": "test_server1", + "uuid": "aaaaaaaa-aaaa-aaaa-aaaa-aaaaaaaaaaaa", + }, + { + "name": "test_server2", + "uuid": "bbbbbbbb-bbbb-bbbb-bbbb-bbbbbbbbbbbb", + }, + ], + "host_ip": "1.1.1.1", + "hypervisor_hostname": "fake-mini", + "hypervisor_type": "fake", + "hypervisor_version": 1000, + "id": "b1e43b5f-eec1-44e0-9f10-7b4945c0226d", + "uptime": ( + " 08:32:11 up 93 days, 18:25, 12 users, " + "load average: 0.20, 0.12, 0.14" + ), "service": { - "host": "fake-mini", + "host": "043b3cacf6f34c90a7245151fc8ebcda", + "id": "5d343e1d-938e-4284-b98b-6a2b5406ba76", "disabled_reason": None, - "id": 6 }, + # deprecated attributes "vcpus_used": 0, - "hypervisor_type": "QEMU", "local_gb_used": 0, "vcpus": 8, - "hypervisor_hostname": "fake-mini", "memory_mb_used": 512, "memory_mb": 7980, "current_workload": 0, - "state": "up", - "host_ip": "23.253.248.171", - "cpu_info": "some cpu info", "running_vms": 0, "free_disk_gb": 157, - "hypervisor_version": 2000000, "disk_available_least": 140, "local_gb": 157, "free_ram_mb": 7468, - "id": 1 } -class TestHypervisor(testtools.TestCase): +class TestHypervisor(base.TestCase): + def setUp(self): + super().setUp() + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = 1 + self.sess._get_connection = mock.Mock(return_value=self.cloud) def test_basic(self): sot = hypervisor.Hypervisor() self.assertEqual('hypervisor', sot.resource_key) self.assertEqual('hypervisors', sot.resources_key) self.assertEqual('/os-hypervisors', sot.base_path) - self.assertEqual('compute', sot.service.service_type) - self.assertTrue(sot.allow_get) + self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_list) + self.assertDictEqual( + { + 'hypervisor_hostname_pattern': 'hypervisor_hostname_pattern', + 'limit': 'limit', + 'marker': 'marker', + 'with_servers': 'with_servers', + }, + sot._query_mapping._mapping, + ) + def test_make_it(self): sot = hypervisor.Hypervisor(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['cpu_info'], sot.cpu_info) + self.assertEqual(EXAMPLE['host_ip'], sot.host_ip) + self.assertEqual(EXAMPLE['hypervisor_type'], sot.hypervisor_type) + self.assertEqual(EXAMPLE['hypervisor_version'], sot.hypervisor_version) self.assertEqual(EXAMPLE['hypervisor_hostname'], sot.name) + self.assertEqual(EXAMPLE['service'], sot.service_details) + self.assertEqual(EXAMPLE['servers'], sot.servers) self.assertEqual(EXAMPLE['state'], sot.state) self.assertEqual(EXAMPLE['status'], sot.status) - self.assertEqual(EXAMPLE['service'], sot.service_details) + self.assertEqual(EXAMPLE['uptime'], sot.uptime) + # Verify deprecated attributes self.assertEqual(EXAMPLE['vcpus_used'], sot.vcpus_used) self.assertEqual(EXAMPLE['hypervisor_type'], sot.hypervisor_type) self.assertEqual(EXAMPLE['local_gb_used'], sot.local_disk_used) @@ -68,11 +116,48 @@ def test_make_it(self): self.assertEqual(EXAMPLE['memory_mb_used'], sot.memory_used) self.assertEqual(EXAMPLE['memory_mb'], sot.memory_size) self.assertEqual(EXAMPLE['current_workload'], sot.current_workload) - self.assertEqual(EXAMPLE['host_ip'], sot.host_ip) - self.assertEqual(EXAMPLE['cpu_info'], sot.cpu_info) self.assertEqual(EXAMPLE['running_vms'], sot.running_vms) self.assertEqual(EXAMPLE['free_disk_gb'], sot.local_disk_free) - self.assertEqual(EXAMPLE['hypervisor_version'], sot.hypervisor_version) self.assertEqual(EXAMPLE['disk_available_least'], sot.disk_available) self.assertEqual(EXAMPLE['local_gb'], sot.local_disk_size) self.assertEqual(EXAMPLE['free_ram_mb'], sot.memory_free) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_get_uptime(self, mv_mock): + sot = hypervisor.Hypervisor(**copy.deepcopy(EXAMPLE)) + rsp = { + "hypervisor": { + "hypervisor_hostname": "fake-mini", + "id": sot.id, + "state": "up", + "status": "enabled", + "uptime": "08:32:11 up 93 days, 18:25, 12 users", + } + } + resp = mock.Mock() + resp.body = copy.deepcopy(rsp) + resp.json = mock.Mock(return_value=resp.body) + resp.headers = {} + resp.status_code = 200 + self.sess.get = mock.Mock(return_value=resp) + + hyp = sot.get_uptime(self.sess) + self.sess.get.assert_called_with( + f'os-hypervisors/{sot.id}/uptime', + microversion=self.sess.default_microversion, + ) + self.assertEqual(rsp['hypervisor']['uptime'], hyp.uptime) + self.assertEqual(rsp['hypervisor']['status'], sot.status) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + def test_get_uptime_after_2_88(self, mv_mock): + sot = hypervisor.Hypervisor(**copy.deepcopy(EXAMPLE)) + self.assertRaises(exceptions.SDKException, sot.get_uptime, self.sess) diff --git a/openstack/tests/unit/compute/v2/test_image.py b/openstack/tests/unit/compute/v2/test_image.py index 8e848a2380..01019da42b 100644 --- a/openstack/tests/unit/compute/v2/test_image.py +++ b/openstack/tests/unit/compute/v2/test_image.py @@ -10,18 +10,16 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.compute.v2 import image +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' -BASIC_EXAMPLE = { + +EXAMPLE = { 'id': IDENTIFIER, 'links': '2', 'name': '3', -} - -DETAILS = { 'created': '2015-03-09T12:14:57.233772', 'metadata': {'key': '2'}, 'minDisk': 3, @@ -29,66 +27,50 @@ 'progress': 5, 'status': '6', 'updated': '2015-03-09T12:15:57.233772', - 'OS-EXT-IMG-SIZE:size': 8 + 'OS-EXT-IMG-SIZE:size': 8, } -DETAIL_EXAMPLE = BASIC_EXAMPLE.copy() -DETAIL_EXAMPLE.update(DETAILS) - - -class TestImage(testtools.TestCase): +class TestImage(base.TestCase): def test_basic(self): sot = image.Image() self.assertEqual('image', sot.resource_key) self.assertEqual('images', sot.resources_key) self.assertEqual('/images', sot.base_path) - self.assertEqual('compute', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertDictEqual({"server": "server", - "name": "name", - "status": "status", - "type": "type", - "min_disk": "minDisk", - "min_ram": "minRam", - "changes_since": "changes-since", - "limit": "limit", - "marker": "marker"}, - sot._query_mapping._mapping) + self.assertDictEqual( + { + "server": "server", + "name": "name", + "status": "status", + "type": "type", + "min_disk": "minDisk", + "min_ram": "minRam", + "changes_since": "changes-since", + "limit": "limit", + "marker": "marker", + }, + sot._query_mapping._mapping, + ) def test_make_basic(self): - sot = image.Image(**BASIC_EXAMPLE) - self.assertEqual(BASIC_EXAMPLE['id'], sot.id) - self.assertEqual(BASIC_EXAMPLE['links'], sot.links) - self.assertEqual(BASIC_EXAMPLE['name'], sot.name) - - def test_detail(self): - sot = image.ImageDetail() - self.assertEqual('image', sot.resource_key) - self.assertEqual('images', sot.resources_key) - self.assertEqual('/images/detail', sot.base_path) - self.assertEqual('compute', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_make_detail(self): - sot = image.ImageDetail(**DETAIL_EXAMPLE) - self.assertEqual(DETAIL_EXAMPLE['created'], sot.created_at) - self.assertEqual(DETAIL_EXAMPLE['id'], sot.id) - self.assertEqual(DETAIL_EXAMPLE['links'], sot.links) - self.assertEqual(DETAIL_EXAMPLE['metadata'], sot.metadata) - self.assertEqual(DETAIL_EXAMPLE['minDisk'], sot.min_disk) - self.assertEqual(DETAIL_EXAMPLE['minRam'], sot.min_ram) - self.assertEqual(DETAIL_EXAMPLE['name'], sot.name) - self.assertEqual(DETAIL_EXAMPLE['progress'], sot.progress) - self.assertEqual(DETAIL_EXAMPLE['status'], sot.status) - self.assertEqual(DETAIL_EXAMPLE['updated'], sot.updated_at) - self.assertEqual(DETAIL_EXAMPLE['OS-EXT-IMG-SIZE:size'], sot.size) + sot = image.Image(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['created'], sot.created_at) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['metadata'], sot.metadata) + self.assertEqual(EXAMPLE['minDisk'], sot.min_disk) + self.assertEqual(EXAMPLE['minRam'], sot.min_ram) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['progress'], sot.progress) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['updated'], sot.updated_at) + self.assertEqual(EXAMPLE['OS-EXT-IMG-SIZE:size'], sot.size) diff --git a/openstack/tests/unit/compute/v2/test_keypair.py b/openstack/tests/unit/compute/v2/test_keypair.py index 3fb3dbcbc5..d471d2fc28 100644 --- a/openstack/tests/unit/compute/v2/test_keypair.py +++ b/openstack/tests/unit/compute/v2/test_keypair.py @@ -10,35 +10,52 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.compute.v2 import keypair +from openstack.tests.unit import base + EXAMPLE = { + 'created_at': 'some_time', + 'deleted': False, 'fingerprint': '1', 'name': '2', 'public_key': '3', - 'private_key': '3', + 'private_key': '4', + 'type': 'ssh', + 'user_id': '5', } -class TestKeypair(testtools.TestCase): - +class TestKeypair(base.TestCase): def test_basic(self): sot = keypair.Keypair() self.assertEqual('keypair', sot.resource_key) self.assertEqual('keypairs', sot.resources_key) self.assertEqual('/os-keypairs', sot.base_path) - self.assertEqual('compute', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) + self.assertDictEqual( + {'limit': 'limit', 'marker': 'marker', 'user_id': 'user_id'}, + sot._query_mapping._mapping, + ) + def test_make_it(self): sot = keypair.Keypair(**EXAMPLE) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['deleted'], sot.is_deleted) self.assertEqual(EXAMPLE['fingerprint'], sot.fingerprint) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['public_key'], sot.public_key) self.assertEqual(EXAMPLE['private_key'], sot.private_key) + self.assertEqual(EXAMPLE['type'], sot.type) + self.assertEqual(EXAMPLE['user_id'], sot.user_id) + + def test_make_it_defaults(self): + EXAMPLE_DEFAULT = EXAMPLE.copy() + EXAMPLE_DEFAULT.pop('type') + sot = keypair.Keypair(**EXAMPLE_DEFAULT) + self.assertEqual(EXAMPLE['type'], sot.type) diff --git a/openstack/tests/unit/compute/v2/test_limits.py b/openstack/tests/unit/compute/v2/test_limits.py index e313491696..b58af68428 100644 --- a/openstack/tests/unit/compute/v2/test_limits.py +++ b/openstack/tests/unit/compute/v2/test_limits.py @@ -10,15 +10,16 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +import copy +from unittest import mock + +from keystoneauth1 import adapter from openstack.compute.v2 import limits +from openstack.tests.unit import base ABSOLUTE_LIMITS = { "maxImageMeta": 128, - "maxPersonality": 5, - "maxPersonalitySize": 10240, "maxSecurityGroupRules": 20, "maxSecurityGroups": 10, "maxServerMeta": 128, @@ -34,7 +35,7 @@ "totalRAMUsed": 4, "totalInstancesUsed": 5, "totalServerGroupsUsed": 6, - "totalCoresUsed": 7 + "totalCoresUsed": 7, } RATE_LIMIT = { @@ -44,152 +45,186 @@ "remaining": 120, "unit": "MINUTE", "value": 120, - "verb": "POST" + "verb": "POST", }, ], "regex": ".*", - "uri": "*" -} - -LIMITS_BODY = { - "limits": { - "absolute": ABSOLUTE_LIMITS, - "rate": [RATE_LIMIT] - } + "uri": "*", } +LIMITS_BODY = {"limits": {"absolute": ABSOLUTE_LIMITS, "rate": [RATE_LIMIT]}} -class TestAbsoluteLimits(testtools.TestCase): +class TestAbsoluteLimits(base.TestCase): def test_basic(self): sot = limits.AbsoluteLimits() self.assertIsNone(sot.resource_key) self.assertIsNone(sot.resources_key) self.assertEqual("", sot.base_path) - self.assertIsNone(sot.service) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): sot = limits.AbsoluteLimits(**ABSOLUTE_LIMITS) self.assertEqual(ABSOLUTE_LIMITS["maxImageMeta"], sot.image_meta) - self.assertEqual(ABSOLUTE_LIMITS["maxPersonality"], sot.personality) - self.assertEqual(ABSOLUTE_LIMITS["maxPersonalitySize"], - sot.personality_size) - self.assertEqual(ABSOLUTE_LIMITS["maxSecurityGroupRules"], - sot.security_group_rules) - self.assertEqual(ABSOLUTE_LIMITS["maxSecurityGroups"], - sot.security_groups) + self.assertEqual( + ABSOLUTE_LIMITS["maxSecurityGroupRules"], sot.security_group_rules + ) + self.assertEqual( + ABSOLUTE_LIMITS["maxSecurityGroups"], sot.security_groups + ) self.assertEqual(ABSOLUTE_LIMITS["maxServerMeta"], sot.server_meta) self.assertEqual(ABSOLUTE_LIMITS["maxTotalCores"], sot.total_cores) - self.assertEqual(ABSOLUTE_LIMITS["maxTotalFloatingIps"], - sot.floating_ips) - self.assertEqual(ABSOLUTE_LIMITS["maxTotalInstances"], - sot.instances) - self.assertEqual(ABSOLUTE_LIMITS["maxTotalKeypairs"], - sot.keypairs) - self.assertEqual(ABSOLUTE_LIMITS["maxTotalRAMSize"], - sot.total_ram) + self.assertEqual( + ABSOLUTE_LIMITS["maxTotalFloatingIps"], sot.floating_ips + ) + self.assertEqual(ABSOLUTE_LIMITS["maxTotalInstances"], sot.instances) + self.assertEqual(ABSOLUTE_LIMITS["maxTotalKeypairs"], sot.keypairs) + self.assertEqual(ABSOLUTE_LIMITS["maxTotalRAMSize"], sot.total_ram) self.assertEqual(ABSOLUTE_LIMITS["maxServerGroups"], sot.server_groups) - self.assertEqual(ABSOLUTE_LIMITS["maxServerGroupMembers"], - sot.server_group_members) - self.assertEqual(ABSOLUTE_LIMITS["totalFloatingIpsUsed"], - sot.floating_ips_used) - self.assertEqual(ABSOLUTE_LIMITS["totalSecurityGroupsUsed"], - sot.security_groups_used) + self.assertEqual( + ABSOLUTE_LIMITS["maxServerGroupMembers"], sot.server_group_members + ) + self.assertEqual( + ABSOLUTE_LIMITS["totalFloatingIpsUsed"], sot.floating_ips_used + ) + self.assertEqual( + ABSOLUTE_LIMITS["totalSecurityGroupsUsed"], + sot.security_groups_used, + ) self.assertEqual(ABSOLUTE_LIMITS["totalRAMUsed"], sot.total_ram_used) - self.assertEqual(ABSOLUTE_LIMITS["totalInstancesUsed"], - sot.instances_used) - self.assertEqual(ABSOLUTE_LIMITS["totalServerGroupsUsed"], - sot.server_groups_used) - self.assertEqual(ABSOLUTE_LIMITS["totalCoresUsed"], - sot.total_cores_used) - - -class TestRateLimit(testtools.TestCase): - + self.assertEqual( + ABSOLUTE_LIMITS["totalInstancesUsed"], sot.instances_used + ) + self.assertEqual( + ABSOLUTE_LIMITS["totalServerGroupsUsed"], sot.server_groups_used + ) + self.assertEqual( + ABSOLUTE_LIMITS["totalCoresUsed"], sot.total_cores_used + ) + + +class TestRateLimits(base.TestCase): def test_basic(self): - sot = limits.RateLimit() + sot = limits.RateLimits() self.assertIsNone(sot.resource_key) self.assertIsNone(sot.resources_key) self.assertEqual("", sot.base_path) - self.assertIsNone(sot.service) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) def test_make_it(self): - sot = limits.RateLimit(**RATE_LIMIT) + sot = limits.RateLimits(**RATE_LIMIT) self.assertEqual(RATE_LIMIT["regex"], sot.regex) self.assertEqual(RATE_LIMIT["uri"], sot.uri) - self.assertEqual(RATE_LIMIT["limit"], sot.limits) - + self.assertIsInstance(sot.limits[0], limits.RateLimit) -class TestLimits(testtools.TestCase): +class TestLimits(base.TestCase): def test_basic(self): sot = limits.Limits() self.assertEqual("limits", sot.resource_key) self.assertEqual("/limits", sot.base_path) - self.assertEqual("compute", sot.service.service_type) - self.assertTrue(sot.allow_get) + self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'tenant_id': 'tenant_id', + 'project_id': 'tenant_id', + 'reserved': 'reserved', + }, + sot._query_mapping._mapping, + ) def test_get(self): - sess = mock.Mock() + sess = mock.Mock(spec=adapter.Adapter) + sess.default_microversion = None resp = mock.Mock() sess.get.return_value = resp - resp.json.return_value = LIMITS_BODY - - sot = limits.Limits().get(sess) - - self.assertEqual(ABSOLUTE_LIMITS["maxImageMeta"], - sot.absolute.image_meta) - self.assertEqual(ABSOLUTE_LIMITS["maxPersonality"], - sot.absolute.personality) - self.assertEqual(ABSOLUTE_LIMITS["maxPersonalitySize"], - sot.absolute.personality_size) - self.assertEqual(ABSOLUTE_LIMITS["maxSecurityGroupRules"], - sot.absolute.security_group_rules) - self.assertEqual(ABSOLUTE_LIMITS["maxSecurityGroups"], - sot.absolute.security_groups) - self.assertEqual(ABSOLUTE_LIMITS["maxServerMeta"], - sot.absolute.server_meta) - self.assertEqual(ABSOLUTE_LIMITS["maxTotalCores"], - sot.absolute.total_cores) - self.assertEqual(ABSOLUTE_LIMITS["maxTotalFloatingIps"], - sot.absolute.floating_ips) - self.assertEqual(ABSOLUTE_LIMITS["maxTotalInstances"], - sot.absolute.instances) - self.assertEqual(ABSOLUTE_LIMITS["maxTotalKeypairs"], - sot.absolute.keypairs) - self.assertEqual(ABSOLUTE_LIMITS["maxTotalRAMSize"], - sot.absolute.total_ram) - self.assertEqual(ABSOLUTE_LIMITS["maxServerGroups"], - sot.absolute.server_groups) - self.assertEqual(ABSOLUTE_LIMITS["maxServerGroupMembers"], - sot.absolute.server_group_members) - self.assertEqual(ABSOLUTE_LIMITS["totalFloatingIpsUsed"], - sot.absolute.floating_ips_used) - self.assertEqual(ABSOLUTE_LIMITS["totalSecurityGroupsUsed"], - sot.absolute.security_groups_used) - self.assertEqual(ABSOLUTE_LIMITS["totalRAMUsed"], - sot.absolute.total_ram_used) - self.assertEqual(ABSOLUTE_LIMITS["totalInstancesUsed"], - sot.absolute.instances_used) - self.assertEqual(ABSOLUTE_LIMITS["totalServerGroupsUsed"], - sot.absolute.server_groups_used) - self.assertEqual(ABSOLUTE_LIMITS["totalCoresUsed"], - sot.absolute.total_cores_used) + resp.json.return_value = copy.deepcopy(LIMITS_BODY) + resp.headers = {} + resp.status_code = 200 + + sot = limits.Limits().fetch(sess) + + self.assertEqual( + ABSOLUTE_LIMITS["maxImageMeta"], sot.absolute.image_meta + ) + self.assertEqual( + ABSOLUTE_LIMITS["maxSecurityGroupRules"], + sot.absolute.security_group_rules, + ) + self.assertEqual( + ABSOLUTE_LIMITS["maxSecurityGroups"], sot.absolute.security_groups + ) + self.assertEqual( + ABSOLUTE_LIMITS["maxServerMeta"], sot.absolute.server_meta + ) + self.assertEqual( + ABSOLUTE_LIMITS["maxTotalCores"], sot.absolute.total_cores + ) + self.assertEqual( + ABSOLUTE_LIMITS["maxTotalFloatingIps"], sot.absolute.floating_ips + ) + self.assertEqual( + ABSOLUTE_LIMITS["maxTotalInstances"], sot.absolute.instances + ) + self.assertEqual( + ABSOLUTE_LIMITS["maxTotalKeypairs"], sot.absolute.keypairs + ) + self.assertEqual( + ABSOLUTE_LIMITS["maxTotalRAMSize"], sot.absolute.total_ram + ) + self.assertEqual( + ABSOLUTE_LIMITS["maxServerGroups"], sot.absolute.server_groups + ) + self.assertEqual( + ABSOLUTE_LIMITS["maxServerGroupMembers"], + sot.absolute.server_group_members, + ) + self.assertEqual( + ABSOLUTE_LIMITS["totalFloatingIpsUsed"], + sot.absolute.floating_ips_used, + ) + self.assertEqual( + ABSOLUTE_LIMITS["totalSecurityGroupsUsed"], + sot.absolute.security_groups_used, + ) + self.assertEqual( + ABSOLUTE_LIMITS["totalRAMUsed"], sot.absolute.total_ram_used + ) + self.assertEqual( + ABSOLUTE_LIMITS["totalInstancesUsed"], sot.absolute.instances_used + ) + self.assertEqual( + ABSOLUTE_LIMITS["totalServerGroupsUsed"], + sot.absolute.server_groups_used, + ) + self.assertEqual( + ABSOLUTE_LIMITS["totalCoresUsed"], sot.absolute.total_cores_used + ) self.assertEqual(RATE_LIMIT["uri"], sot.rate[0].uri) self.assertEqual(RATE_LIMIT["regex"], sot.rate[0].regex) - self.assertEqual(RATE_LIMIT["limit"], sot.rate[0].limits) + self.assertIsInstance(sot.rate[0].limits[0], limits.RateLimit) + + dsot = sot.to_dict() + + self.assertIsInstance(dsot['rate'][0], dict) + self.assertIsInstance(dsot['absolute'], dict) + self.assertEqual(RATE_LIMIT["uri"], dsot['rate'][0]['uri']) + self.assertEqual( + ABSOLUTE_LIMITS["totalSecurityGroupsUsed"], + dsot['absolute']['security_groups_used'], + ) diff --git a/openstack/tests/unit/compute/v2/test_metadata.py b/openstack/tests/unit/compute/v2/test_metadata.py deleted file mode 100644 index 458f931b16..0000000000 --- a/openstack/tests/unit/compute/v2/test_metadata.py +++ /dev/null @@ -1,86 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from openstack.compute.v2 import server - -IDENTIFIER = 'IDENTIFIER' - -# NOTE: The implementation for metadata is done via a mixin class that both -# the server and image resources inherit from. Currently this test class -# uses the Server resource to test it. Ideally it would be parameterized -# to run with both Server and Image when the tooling for subtests starts -# working. - - -class TestMetadata(testtools.TestCase): - - def setUp(self): - super(TestMetadata, self).setUp() - self.metadata_result = {"metadata": {"go": "cubs", "boo": "sox"}} - self.meta_result = {"meta": {"oh": "yeah"}} - - def test_get_all_metadata_Server(self): - self._test_get_all_metadata(server.Server(id=IDENTIFIER)) - - def test_get_all_metadata_ServerDetail(self): - # This is tested explicitly so we know ServerDetail items are - # properly having /detail stripped out of their base_path. - self._test_get_all_metadata(server.ServerDetail(id=IDENTIFIER)) - - def _test_get_all_metadata(self, sot): - response = mock.Mock() - response.json.return_value = self.metadata_result - sess = mock.Mock() - sess.get.return_value = response - - result = sot.get_metadata(sess) - - self.assertEqual(result, self.metadata_result["metadata"]) - sess.get.assert_called_once_with("servers/IDENTIFIER/metadata", - headers={}, - endpoint_filter=sot.service) - - def test_set_metadata(self): - response = mock.Mock() - response.json.return_value = self.metadata_result - sess = mock.Mock() - sess.post.return_value = response - - sot = server.Server(id=IDENTIFIER) - - set_meta = {"lol": "rofl"} - - result = sot.set_metadata(sess, **set_meta) - - self.assertEqual(result, self.metadata_result["metadata"]) - sess.post.assert_called_once_with("servers/IDENTIFIER/metadata", - endpoint_filter=sot.service, - headers={}, - json={"metadata": set_meta}) - - def test_delete_metadata(self): - sess = mock.Mock() - sess.delete.return_value = None - - sot = server.Server(id=IDENTIFIER) - - key = "hey" - - sot.delete_metadata(sess, [key]) - - sess.delete.assert_called_once_with( - "servers/IDENTIFIER/metadata/" + key, - headers={"Accept": ""}, - endpoint_filter=sot.service) diff --git a/openstack/tests/unit/compute/v2/test_migration.py b/openstack/tests/unit/compute/v2/test_migration.py new file mode 100644 index 0000000000..739916b161 --- /dev/null +++ b/openstack/tests/unit/compute/v2/test_migration.py @@ -0,0 +1,80 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.compute.v2 import migration +from openstack.tests.unit import base + +EXAMPLE = { + 'uuid': '42341d4b-346a-40d0-83c6-5f4f6892b650', + 'instance_uuid': '9128d044-7b61-403e-b766-7547076ff6c1', + 'user_id': '78348f0e-97ee-4d70-ad34-189692673ea2', + 'project_id': '9842f0f7-1229-4355-afe7-15ebdbb8c3d8', + 'created_at': '2016-06-23T14:42:02.000000', + 'updated_at': '2016-06-23T14:42:02.000000', + 'status': 'migrating', + 'source_compute': 'compute10', + 'source_node': 'node10', + 'dest_host': '5.6.7.8', + 'dest_compute': 'compute20', + 'dest_node': 'node20', + 'migration_type': 'resize', + 'old_instance_type_id': 5, + 'new_instance_type_id': 6, +} + + +class TestMigration(base.TestCase): + def test_basic(self): + sot = migration.Migration() + self.assertIsNone(sot.resource_key) # we don't support fetch + self.assertEqual('migrations', sot.resources_key) + self.assertEqual('/os-migrations', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'host': 'host', + 'status': 'status', + 'migration_type': 'migration_type', + 'source_compute': 'source_compute', + 'user_id': 'user_id', + 'project_id': 'project_id', + 'changes_since': 'changes-since', + 'changes_before': 'changes-before', + 'server_id': 'instance_uuid', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = migration.Migration(**EXAMPLE) + self.assertEqual(EXAMPLE['uuid'], sot.id) + self.assertEqual(EXAMPLE['instance_uuid'], sot.server_id) + self.assertEqual(EXAMPLE['user_id'], sot.user_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['source_compute'], sot.source_compute) + self.assertEqual(EXAMPLE['source_node'], sot.source_node) + self.assertEqual(EXAMPLE['dest_host'], sot.dest_host) + self.assertEqual(EXAMPLE['dest_compute'], sot.dest_compute) + self.assertEqual(EXAMPLE['dest_node'], sot.dest_node) + self.assertEqual(EXAMPLE['migration_type'], sot.migration_type) + self.assertEqual(EXAMPLE['old_instance_type_id'], sot.old_flavor_id) + self.assertEqual(EXAMPLE['new_instance_type_id'], sot.new_flavor_id) diff --git a/openstack/tests/unit/compute/v2/test_proxy.py b/openstack/tests/unit/compute/v2/test_proxy.py index f50112fcf4..7323e57962 100644 --- a/openstack/tests/unit/compute/v2/test_proxy.py +++ b/openstack/tests/unit/compute/v2/test_proxy.py @@ -10,112 +10,902 @@ # License for the specific language governing permissions and limitations # under the License. +import contextlib +import datetime +import fixtures +from unittest import mock +import uuid +import warnings + +from openstack.block_storage.v3 import volume from openstack.compute.v2 import _proxy +from openstack.compute.v2 import aggregate from openstack.compute.v2 import availability_zone as az +from openstack.compute.v2 import console_auth_token from openstack.compute.v2 import extension from openstack.compute.v2 import flavor from openstack.compute.v2 import hypervisor from openstack.compute.v2 import image from openstack.compute.v2 import keypair -from openstack.compute.v2 import limits +from openstack.compute.v2 import migration +from openstack.compute.v2 import quota_class_set +from openstack.compute.v2 import quota_set from openstack.compute.v2 import server +from openstack.compute.v2 import server_action from openstack.compute.v2 import server_group from openstack.compute.v2 import server_interface from openstack.compute.v2 import server_ip +from openstack.compute.v2 import server_migration +from openstack.compute.v2 import server_remote_console from openstack.compute.v2 import service -from openstack.tests.unit import test_proxy_base2 +from openstack.compute.v2 import usage +from openstack.compute.v2 import volume_attachment +from openstack.identity.v3 import project +from openstack import proxy as proxy_base +from openstack.tests.unit import base +from openstack.tests.unit import test_proxy_base +from openstack import types +from openstack import warnings as os_warnings -class TestComputeProxy(test_proxy_base2.TestProxyBase): +class TestComputeProxy(test_proxy_base.TestProxyBase): def setUp(self): - super(TestComputeProxy, self).setUp() + super().setUp() self.proxy = _proxy.Proxy(self.session) - def test_extension_find(self): - self.verify_find(self.proxy.find_extension, extension.Extension) - - def test_extensions(self): - self.verify_list_no_kwargs(self.proxy.extensions, extension.Extension, - paginated=False) +class TestFlavor(TestComputeProxy): def test_flavor_create(self): self.verify_create(self.proxy.create_flavor, flavor.Flavor) def test_flavor_delete(self): self.verify_delete(self.proxy.delete_flavor, flavor.Flavor, False) + def test_flavor_update(self): + self.verify_update(self.proxy.update_flavor, flavor.Flavor, False) + def test_flavor_delete_ignore(self): self.verify_delete(self.proxy.delete_flavor, flavor.Flavor, True) def test_flavor_find(self): self.verify_find(self.proxy.find_flavor, flavor.Flavor) - def test_flavor_get(self): - self.verify_get(self.proxy.get_flavor, flavor.Flavor) + def test_flavor_find_query(self): + self.verify_find( + self.proxy.find_flavor, + flavor.Flavor, + method_kwargs={"a": "b"}, + expected_kwargs={"a": "b", "ignore_missing": True}, + ) + + def test_flavor_find_fetch_extra(self): + """fetch extra_specs is triggered""" + with mock.patch( + 'openstack.compute.v2.flavor.Flavor.fetch_extra_specs' + ) as mocked: + res = flavor.Flavor() + mocked.return_value = res + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_flavor, + method_args=['res', True], + method_kwargs={'get_extra_specs': True}, + expected_result=res, + expected_args=[flavor.Flavor, 'res'], + expected_kwargs={'ignore_missing': True}, + ) + mocked.assert_called_once() + + def test_flavor_find_skip_fetch_extra(self): + """fetch extra_specs not triggered""" + with mock.patch( + 'openstack.compute.v2.flavor.Flavor.fetch_extra_specs' + ) as mocked: + res = flavor.Flavor(extra_specs={'a': 'b'}) + mocked.return_value = res + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_flavor, + method_args=['res', True], + expected_result=res, + expected_args=[flavor.Flavor, 'res'], + expected_kwargs={'ignore_missing': True}, + ) + mocked.assert_not_called() + + def test_flavor_get_no_extra(self): + """fetch extra_specs not triggered""" + with mock.patch( + 'openstack.compute.v2.flavor.Flavor.fetch_extra_specs' + ) as mocked: + res = flavor.Flavor() + mocked.return_value = res + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_flavor, + method_args=['res'], + expected_result=res, + expected_args=[flavor.Flavor, 'res'], + ) + mocked.assert_not_called() + + def test_flavor_get_fetch_extra(self): + """fetch extra_specs is triggered""" + with mock.patch( + 'openstack.compute.v2.flavor.Flavor.fetch_extra_specs' + ) as mocked: + res = flavor.Flavor() + mocked.return_value = res + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_flavor, + method_args=['res', True], + expected_result=res, + expected_args=[flavor.Flavor, 'res'], + ) + mocked.assert_called_once() + + def test_flavor_get_skip_fetch_extra(self): + """fetch extra_specs not triggered""" + with mock.patch( + 'openstack.compute.v2.flavor.Flavor.fetch_extra_specs' + ) as mocked: + res = flavor.Flavor(extra_specs={'a': 'b'}) + mocked.return_value = res + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_flavor, + method_args=['res', True], + expected_result=res, + expected_args=[flavor.Flavor, 'res'], + ) + mocked.assert_not_called() + + @mock.patch("openstack.proxy.Proxy._list") + @mock.patch("openstack.compute.v2.flavor.Flavor.fetch_extra_specs") + def test_flavors_detailed(self, fetch_mock, list_mock): + res = self.proxy.flavors(details=True) + for r in res: + self.assertIsNotNone(r) + fetch_mock.assert_not_called() + list_mock.assert_called_with( + flavor.Flavor, base_path="/flavors/detail" + ) + + @mock.patch("openstack.proxy.Proxy._list") + @mock.patch("openstack.compute.v2.flavor.Flavor.fetch_extra_specs") + def test_flavors_not_detailed(self, fetch_mock, list_mock): + res = self.proxy.flavors(details=False) + for r in res: + self.assertIsNotNone(r) + fetch_mock.assert_not_called() + list_mock.assert_called_with(flavor.Flavor, base_path="/flavors") + + @mock.patch("openstack.proxy.Proxy._list") + @mock.patch("openstack.compute.v2.flavor.Flavor.fetch_extra_specs") + def test_flavors_query(self, fetch_mock, list_mock): + res = self.proxy.flavors(details=False, get_extra_specs=True, a="b") + for r in res: + fetch_mock.assert_called_with(self.proxy) + list_mock.assert_called_with( + flavor.Flavor, base_path="/flavors", a="b" + ) + + @mock.patch("openstack.proxy.Proxy._list") + @mock.patch("openstack.compute.v2.flavor.Flavor.fetch_extra_specs") + def test_flavors_get_extra(self, fetch_mock, list_mock): + res = self.proxy.flavors(details=False, get_extra_specs=True) + for r in res: + fetch_mock.assert_called_with(self.proxy) + list_mock.assert_called_with(flavor.Flavor, base_path="/flavors") + + def test_flavor_get_access(self): + self._verify( + "openstack.compute.v2.flavor.Flavor.get_access", + self.proxy.get_flavor_access, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_flavor_add_tenant_access(self): + self._verify( + "openstack.compute.v2.flavor.Flavor.add_tenant_access", + self.proxy.flavor_add_tenant_access, + method_args=["value", "fake-tenant"], + expected_args=[self.proxy, "fake-tenant"], + ) + + def test_flavor_remove_tenant_access(self): + self._verify( + "openstack.compute.v2.flavor.Flavor.remove_tenant_access", + self.proxy.flavor_remove_tenant_access, + method_args=["value", "fake-tenant"], + expected_args=[self.proxy, "fake-tenant"], + ) + + def test_flavor_fetch_extra_specs(self): + self._verify( + "openstack.compute.v2.flavor.Flavor.fetch_extra_specs", + self.proxy.fetch_flavor_extra_specs, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_create_flavor_extra_specs(self): + self._verify( + "openstack.compute.v2.flavor.Flavor.create_extra_specs", + self.proxy.create_flavor_extra_specs, + method_args=["value", {'a': 'b'}], + expected_args=[self.proxy], + expected_kwargs={"specs": {'a': 'b'}}, + ) + + def test_get_flavor_extra_specs_prop(self): + self._verify( + "openstack.compute.v2.flavor.Flavor.get_extra_specs_property", + self.proxy.get_flavor_extra_specs_property, + method_args=["value", "prop"], + expected_args=[self.proxy, "prop"], + ) + + def test_update_flavor_extra_specs_prop(self): + self._verify( + "openstack.compute.v2.flavor.Flavor.update_extra_specs_property", + self.proxy.update_flavor_extra_specs_property, + method_args=["value", "prop", "val"], + expected_args=[self.proxy, "prop", "val"], + ) + + def test_delete_flavor_extra_specs_prop(self): + self._verify( + "openstack.compute.v2.flavor.Flavor.delete_extra_specs_property", + self.proxy.delete_flavor_extra_specs_property, + method_args=["value", "prop"], + expected_args=[self.proxy, "prop"], + ) + + +class TestKeyPair(TestComputeProxy): + def test_keypair_create(self): + self.verify_create(self.proxy.create_keypair, keypair.Keypair) - def test_flavors_detailed(self): - self.verify_list(self.proxy.flavors, flavor.FlavorDetail, - paginated=True, - method_kwargs={"details": True, "query": 1}, - expected_kwargs={"query": 1}) + def test_keypair_delete(self): + self._verify( + "openstack.compute.v2.keypair.Keypair.delete", + self.proxy.delete_keypair, + method_args=["value"], + expected_args=[self.proxy], + expected_kwargs={"params": {}}, + ) - def test_flavors_not_detailed(self): - self.verify_list(self.proxy.flavors, flavor.Flavor, - paginated=True, - method_kwargs={"details": False, "query": 1}, - expected_kwargs={"query": 1}) + def test_keypair_delete_ignore(self): + self._verify( + "openstack.compute.v2.keypair.Keypair.delete", + self.proxy.delete_keypair, + method_args=["value", True], + method_kwargs={"user_id": "fake_user"}, + expected_args=[self.proxy], + expected_kwargs={"params": {"user_id": "fake_user"}}, + ) + + def test_keypair_delete_user_id(self): + self._verify( + "openstack.compute.v2.keypair.Keypair.delete", + self.proxy.delete_keypair, + method_args=["value"], + method_kwargs={"user_id": "fake_user"}, + expected_args=[self.proxy], + expected_kwargs={"params": {"user_id": "fake_user"}}, + ) - def test_image_delete(self): - self.verify_delete(self.proxy.delete_image, image.Image, False) + def test_keypair_find(self): + self.verify_find(self.proxy.find_keypair, keypair.Keypair) - def test_image_delete_ignore(self): - self.verify_delete(self.proxy.delete_image, image.Image, True) + def test_keypair_find_user_id(self): + self.verify_find( + self.proxy.find_keypair, + keypair.Keypair, + method_kwargs={'user_id': 'fake_user'}, + expected_kwargs={'user_id': 'fake_user'}, + ) - def test_image_find(self): - self.verify_find(self.proxy.find_image, image.Image) + def test_keypair_get(self): + self._verify( + "openstack.compute.v2.keypair.Keypair.fetch", + self.proxy.get_keypair, + method_args=["value"], + method_kwargs={}, + expected_args=[self.proxy], + expected_kwargs={ + "error_message": "No Keypair found for value", + }, + ) + + def test_keypair_get_user_id(self): + self._verify( + "openstack.compute.v2.keypair.Keypair.fetch", + self.proxy.get_keypair, + method_args=["value"], + method_kwargs={"user_id": "fake_user"}, + expected_args=[self.proxy], + expected_kwargs={ + "error_message": "No Keypair found for value", + "user_id": "fake_user", + }, + ) - def test_image_get(self): - self.verify_get(self.proxy.get_image, image.Image) + def test_keypairs(self): + self.verify_list(self.proxy.keypairs, keypair.Keypair) - def test_images_detailed(self): - self.verify_list(self.proxy.images, image.ImageDetail, - paginated=True, - method_kwargs={"details": True, "query": 1}, - expected_kwargs={"query": 1}) + def test_keypairs_user_id(self): + self.verify_list( + self.proxy.keypairs, + keypair.Keypair, + method_kwargs={'user_id': 'fake_user'}, + expected_kwargs={'user_id': 'fake_user'}, + ) - def test_images_not_detailed(self): - self.verify_list(self.proxy.images, image.Image, - paginated=True, - method_kwargs={"details": False, "query": 1}, - expected_kwargs={"query": 1}) - def test_keypair_create(self): - self.verify_create(self.proxy.create_keypair, keypair.Keypair) +class TestKeyPairUrl(base.TestCase): + def setUp(self): + super().setUp() + self.useFixture( + fixtures.MonkeyPatch( + "openstack.utils.maximum_supported_microversion", + lambda *args, **kwargs: "2.10", + ) + ) + + def test_keypair_find_user_id(self): + self.register_uris( + [ + dict( + method="GET", + uri=self.get_mock_url( + "compute", + "public", + append=["os-keypairs", "fake_keypair"], + qs_elements=["user_id=fake_user"], + ), + ), + ] + ) + + self.cloud.compute.find_keypair("fake_keypair", user_id="fake_user") + + def test_keypair_get_user_id(self): + self.register_uris( + [ + dict( + method="GET", + uri=self.get_mock_url( + "compute", + "public", + append=["os-keypairs", "fake_keypair"], + qs_elements=["user_id=fake_user"], + ), + ), + ] + ) + + self.cloud.compute.get_keypair("fake_keypair", user_id="fake_user") + + def test_keypair_delete_user_id(self): + self.register_uris( + [ + dict( + method="DELETE", + uri=self.get_mock_url( + "compute", + "public", + append=["os-keypairs", "fake_keypair"], + qs_elements=["user_id=fake_user"], + ), + ), + ] + ) + + self.cloud.compute.delete_keypair("fake_keypair", user_id="fake_user") + + +class TestAggregate(TestComputeProxy): + def test_aggregate_create(self): + self.verify_create(self.proxy.create_aggregate, aggregate.Aggregate) + + def test_aggregate_delete(self): + self.verify_delete( + self.proxy.delete_aggregate, aggregate.Aggregate, False + ) + + def test_aggregate_delete_ignore(self): + self.verify_delete( + self.proxy.delete_aggregate, aggregate.Aggregate, True + ) + + def test_aggregate_find(self): + self.verify_find(self.proxy.find_aggregate, aggregate.Aggregate) + + def test_aggregates(self): + self.verify_list(self.proxy.aggregates, aggregate.Aggregate) + + def test_aggregate_get(self): + self.verify_get(self.proxy.get_aggregate, aggregate.Aggregate) + + def test_aggregate_update(self): + self.verify_update(self.proxy.update_aggregate, aggregate.Aggregate) + + def test_aggregate_add_host(self): + self._verify( + "openstack.compute.v2.aggregate.Aggregate.add_host", + self.proxy.add_host_to_aggregate, + method_args=["value", "host"], + expected_args=[self.proxy, "host"], + ) + + def test_aggregate_remove_host(self): + self._verify( + "openstack.compute.v2.aggregate.Aggregate.remove_host", + self.proxy.remove_host_from_aggregate, + method_args=["value", "host"], + expected_args=[self.proxy, "host"], + ) + + def test_aggregate_set_metadata(self): + self._verify( + "openstack.compute.v2.aggregate.Aggregate.set_metadata", + self.proxy.set_aggregate_metadata, + method_args=["value", {'a': 'b'}], + expected_args=[self.proxy, {'a': 'b'}], + ) + + def test_aggregate_precache_image(self): + self._verify( + "openstack.compute.v2.aggregate.Aggregate.precache_images", + self.proxy.aggregate_precache_images, + method_args=["value", '1'], + expected_args=[self.proxy, [{'id': '1'}]], + ) + + def test_aggregate_precache_images(self): + self._verify( + "openstack.compute.v2.aggregate.Aggregate.precache_images", + self.proxy.aggregate_precache_images, + method_args=["value", ['1', '2']], + expected_args=[self.proxy, [{'id': '1'}, {'id': '2'}]], + ) + + +class TestService(TestComputeProxy): + def test_services(self): + self.verify_list(self.proxy.services, service.Service) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_enable_service_252(self, mv_mock): + self._verify( + 'openstack.compute.v2.service.Service.enable', + self.proxy.enable_service, + method_args=["value", "host1", "nova-compute"], + expected_args=[self.proxy, "host1", "nova-compute"], + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + def test_enable_service_253(self, mv_mock): + self._verify( + 'openstack.proxy.Proxy._update', + self.proxy.enable_service, + method_args=["value"], + method_kwargs={}, + expected_args=[service.Service, "value"], + expected_kwargs={'status': 'enabled'}, + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_disable_service_252(self, mv_mock): + self._verify( + 'openstack.compute.v2.service.Service.disable', + self.proxy.disable_service, + method_args=["value", "host1", "nova-compute"], + expected_args=[self.proxy, "host1", "nova-compute", None], + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + def test_disable_service_253(self, mv_mock): + self._verify( + 'openstack.proxy.Proxy._update', + self.proxy.disable_service, + method_args=["value"], + method_kwargs={'disabled_reason': 'some_reason'}, + expected_args=[service.Service, "value"], + expected_kwargs={ + 'status': 'disabled', + 'disabled_reason': 'some_reason', + }, + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_force_service_down_252(self, mv_mock): + self._verify( + 'openstack.compute.v2.service.Service.set_forced_down', + self.proxy.update_service_forced_down, + method_args=["value", "host1", "nova-compute"], + expected_args=[self.proxy, "host1", "nova-compute", True], + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_force_service_down_252_empty_vals(self, mv_mock): + self.assertRaises( + ValueError, + self.proxy.update_service_forced_down, + "value", + None, + None, + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_force_service_down_252_empty_vals_svc(self, mv_mock): + self._verify( + 'openstack.compute.v2.service.Service.set_forced_down', + self.proxy.update_service_forced_down, + method_args=[{'host': 'a', 'binary': 'b'}, None, None], + expected_args=[self.proxy, None, None, True], + ) + + def test_find_service(self): + self.verify_find( + self.proxy.find_service, + service.Service, + ) + + def test_find_service_args(self): + self.verify_find( + self.proxy.find_service, + service.Service, + method_kwargs={'host': 'h1'}, + expected_kwargs={'host': 'h1'}, + ) + + +class TestVolumeAttachment(TestComputeProxy): + def test_volume_attachment_create(self): + self.verify_create( + self.proxy.create_volume_attachment, + volume_attachment.VolumeAttachment, + method_kwargs={'server': 'server_id', 'volume': 'volume_id'}, + expected_kwargs={ + 'server_id': 'server_id', + 'volume_id': 'volume_id', + }, + ) + + def test_volume_attachment_create__legacy_parameters(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + + self.verify_create( + self.proxy.create_volume_attachment, + volume_attachment.VolumeAttachment, + method_kwargs={'server': 'server_id', 'volumeId': 'volume_id'}, + expected_kwargs={ + 'server_id': 'server_id', + 'volume_id': 'volume_id', + }, + ) + + self.assertEqual(1, len(w)) + self.assertEqual( + os_warnings.RemovedInSDK50Warning, + w[-1].category, + ) + self.assertIn( + 'This method was called with a volume_id or volumeId argument', + str(w[-1]), + ) + + def test_volume_attachment_create__missing_parameters(self): + exc = self.assertRaises( + TypeError, + self.proxy.create_volume_attachment, + 'server_id', + ) + self.assertIn( + 'create_volume_attachment() missing 1 required positional argument: volume', # noqa: E501 + str(exc), + ) + + def test_volume_attachment_update(self): + self.verify_update( + self.proxy.update_volume_attachment, + volume_attachment.VolumeAttachment, + method_args=[], + method_kwargs={'server': 'server_id', 'volume': 'volume_id'}, + expected_args=[None], + expected_kwargs={ + 'id': 'volume_id', + 'server_id': 'server_id', + 'volume_id': 'volume_id', + }, + ) + + def test_volume_attachment_delete(self): + # We pass objects to avoid the lookup that's done as part of the + # handling of legacy option order. We test that legacy path separately. + fake_server = server.Server(id=str(uuid.uuid4())) + fake_volume = volume.Volume(id=str(uuid.uuid4())) + + self.verify_delete( + self.proxy.delete_volume_attachment, + volume_attachment.VolumeAttachment, + ignore_missing=False, + method_args=[fake_server, fake_volume], + method_kwargs={}, + expected_args=[None], + expected_kwargs={ + 'id': fake_volume.id, + 'server_id': fake_server.id, + }, + ) + + def test_volume_attachment_delete__ignore(self): + # We pass objects to avoid the lookup that's done as part of the + # handling of legacy option order. We test that legacy path separately. + fake_server = server.Server(id=str(uuid.uuid4())) + fake_volume = volume.Volume(id=str(uuid.uuid4())) + + self.verify_delete( + self.proxy.delete_volume_attachment, + volume_attachment.VolumeAttachment, + ignore_missing=True, + method_args=[fake_server, fake_volume], + method_kwargs={}, + expected_args=[None], + expected_kwargs={ + 'id': fake_volume.id, + 'server_id': fake_server.id, + }, + ) + + def test_volume_attachment_delete__legacy_parameters(self): + fake_server = server.Server(id=str(uuid.uuid4())) + fake_volume = volume.Volume(id=str(uuid.uuid4())) + + with mock.patch.object( + self.proxy, + 'find_server', + return_value=None, + ) as mock_find_server: + # we are calling the method with volume and server ID arguments as + # strings and in the wrong order, which results in a query as we + # attempt to match the server ID to an actual server before we + # switch the argument order once we realize we can't do this + self.verify_delete( + self.proxy.delete_volume_attachment, + volume_attachment.VolumeAttachment, + ignore_missing=False, + method_args=[fake_volume.id, fake_server.id], + method_kwargs={}, + expected_args=[None], + expected_kwargs={ + 'id': fake_volume.id, + 'server_id': fake_server.id, + }, + ) + + # note that we attempted to call the server with the volume ID but + # this was mocked to return None (as would happen in the real + # world) + mock_find_server.assert_called_once_with( + fake_volume.id, + ignore_missing=True, + ) + + def test_volume_attachment_get(self): + self.verify_get( + self.proxy.get_volume_attachment, + volume_attachment.VolumeAttachment, + method_args=[], + method_kwargs={'server': 'server_id', 'volume': 'volume_id'}, + expected_kwargs={ + 'id': 'volume_id', + 'server_id': 'server_id', + }, + ) + + def test_volume_attachments(self): + self.verify_list( + self.proxy.volume_attachments, + volume_attachment.VolumeAttachment, + method_kwargs={'server': 'server_id'}, + expected_kwargs={'server_id': 'server_id'}, + ) + + +class TestHypervisor(TestComputeProxy): + def test_hypervisors_not_detailed(self): + self.verify_list( + self.proxy.hypervisors, + hypervisor.Hypervisor, + method_kwargs={"details": False}, + expected_kwargs={}, + ) + + def test_hypervisors_detailed(self): + self.verify_list( + self.proxy.hypervisors, + hypervisor.HypervisorDetail, + method_kwargs={"details": True}, + expected_kwargs={}, + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_hypervisors_search_before_253_no_qp(self, sm): + self.verify_list( + self.proxy.hypervisors, + hypervisor.Hypervisor, + base_path='/os-hypervisors/detail', + method_kwargs={'details': True}, + expected_kwargs={}, + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_hypervisors_search_before_253(self, sm): + self.verify_list( + self.proxy.hypervisors, + hypervisor.Hypervisor, + base_path='/os-hypervisors/substring/search', + method_kwargs={'hypervisor_hostname_pattern': 'substring'}, + expected_kwargs={}, + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + def test_hypervisors_search_after_253(self, sm): + self.verify_list( + self.proxy.hypervisors, + hypervisor.Hypervisor, + method_kwargs={'hypervisor_hostname_pattern': 'substring'}, + base_path=None, + expected_kwargs={'hypervisor_hostname_pattern': 'substring'}, + ) + + def test_find_hypervisor_detail(self): + self.verify_find( + self.proxy.find_hypervisor, + hypervisor.Hypervisor, + expected_kwargs={ + 'list_base_path': '/os-hypervisors/detail', + 'ignore_missing': True, + }, + ) + + def test_find_hypervisor_no_detail(self): + self.verify_find( + self.proxy.find_hypervisor, + hypervisor.Hypervisor, + method_kwargs={'details': False}, + expected_kwargs={'list_base_path': None, 'ignore_missing': True}, + ) - def test_keypair_delete(self): - self.verify_delete(self.proxy.delete_keypair, keypair.Keypair, False) + def test_get_hypervisor(self): + self.verify_get(self.proxy.get_hypervisor, hypervisor.Hypervisor) - def test_keypair_delete_ignore(self): - self.verify_delete(self.proxy.delete_keypair, keypair.Keypair, True) + def test_get_hypervisor_uptime(self): + self._verify( + "openstack.compute.v2.hypervisor.Hypervisor.get_uptime", + self.proxy.get_hypervisor_uptime, + method_args=["value"], + expected_args=[self.proxy], + ) - def test_keypair_find(self): - self.verify_find(self.proxy.find_keypair, keypair.Keypair) - def test_keypair_get(self): - self.verify_get(self.proxy.get_keypair, keypair.Keypair) +class TestCompute(TestComputeProxy): + def test_extension_find(self): + self.verify_find(self.proxy.find_extension, extension.Extension) - def test_keypairs(self): - self.verify_list_no_kwargs(self.proxy.keypairs, keypair.Keypair, - paginated=False) + def test_extensions(self): + self.verify_list(self.proxy.extensions, extension.Extension) + + @contextlib.contextmanager + def _check_image_proxy_deprecation_warning(self): + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + yield + self.assertEqual(1, len(w)) + self.assertTrue(issubclass(w[-1].category, DeprecationWarning)) + self.assertIn( + "This API is a proxy to the image service ", + str(w[-1].message), + ) + + def test_image_delete(self): + with self._check_image_proxy_deprecation_warning(): + self.verify_delete(self.proxy.delete_image, image.Image, False) + + def test_image_delete_ignore(self): + with self._check_image_proxy_deprecation_warning(): + self.verify_delete(self.proxy.delete_image, image.Image, True) + + def test_image_find(self): + with self._check_image_proxy_deprecation_warning(): + self.verify_find(self.proxy.find_image, image.Image) + + def test_image_get(self): + with self._check_image_proxy_deprecation_warning(): + self.verify_get(self.proxy.get_image, image.Image) + + def test_images_detailed(self): + with self._check_image_proxy_deprecation_warning(): + self.verify_list( + self.proxy.images, + image.ImageDetail, + method_kwargs={"details": True, "query": 1}, + expected_kwargs={"query": 1}, + ) + + def test_images_not_detailed(self): + with self._check_image_proxy_deprecation_warning(): + self.verify_list( + self.proxy.images, + image.Image, + method_kwargs={"details": False, "query": 1}, + expected_kwargs={"query": 1}, + ) def test_limits_get(self): - self.verify_get(self.proxy.get_limits, limits.Limits, value=[]) + self._verify( + "openstack.compute.v2.limits.Limits.fetch", + self.proxy.get_limits, + method_args=[], + method_kwargs={"a": "b"}, + expected_args=[self.proxy], + expected_kwargs={"a": "b"}, + ) def test_server_interface_create(self): - self.verify_create(self.proxy.create_server_interface, - server_interface.ServerInterface, - method_kwargs={"server": "test_id"}, - expected_kwargs={"server_id": "test_id"}) + self.verify_create( + self.proxy.create_server_interface, + server_interface.ServerInterface, + method_kwargs={"server": "test_id"}, + expected_kwargs={"server_id": "test_id"}, + ) def test_server_interface_delete(self): self.proxy._get_uri_attribute = lambda *args: args[1] @@ -126,33 +916,34 @@ def test_server_interface_delete(self): test_interface.server_id = server_id # Case1: ServerInterface instance is provided as value - self._verify2("openstack.proxy2.BaseProxy._delete", - self.proxy.delete_server_interface, - method_args=[test_interface], - method_kwargs={"server": server_id}, - expected_args=[server_interface.ServerInterface], - expected_kwargs={"server_id": server_id, - "port_id": interface_id, - "ignore_missing": True}) + self._verify( + "openstack.proxy.Proxy._delete", + self.proxy.delete_server_interface, + method_args=[test_interface], + method_kwargs={"server": server_id}, + expected_args=[server_interface.ServerInterface, interface_id], + expected_kwargs={"server_id": server_id, "ignore_missing": True}, + ) # Case2: ServerInterface ID is provided as value - self._verify2("openstack.proxy2.BaseProxy._delete", - self.proxy.delete_server_interface, - method_args=[interface_id], - method_kwargs={"server": server_id}, - expected_args=[server_interface.ServerInterface], - expected_kwargs={"server_id": server_id, - "port_id": interface_id, - "ignore_missing": True}) + self._verify( + "openstack.proxy.Proxy._delete", + self.proxy.delete_server_interface, + method_args=[interface_id], + method_kwargs={"server": server_id}, + expected_args=[server_interface.ServerInterface, interface_id], + expected_kwargs={"server_id": server_id, "ignore_missing": True}, + ) def test_server_interface_delete_ignore(self): self.proxy._get_uri_attribute = lambda *args: args[1] - self.verify_delete(self.proxy.delete_server_interface, - server_interface.ServerInterface, True, - method_kwargs={"server": "test_id"}, - expected_args=[server_interface.ServerInterface], - expected_kwargs={"server_id": "test_id", - "port_id": "resource_or_id"}) + self.verify_delete( + self.proxy.delete_server_interface, + server_interface.ServerInterface, + True, + method_kwargs={"server": "test_id"}, + expected_kwargs={"server_id": "test_id"}, + ) def test_server_interface_get(self): self.proxy._get_uri_attribute = lambda *args: args[1] @@ -163,41 +954,55 @@ def test_server_interface_get(self): test_interface.server_id = server_id # Case1: ServerInterface instance is provided as value - self._verify2('openstack.proxy2.BaseProxy._get', - self.proxy.get_server_interface, - method_args=[test_interface], - method_kwargs={"server": server_id}, - expected_args=[server_interface.ServerInterface], - expected_kwargs={"port_id": interface_id, - "server_id": server_id}) + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_server_interface, + method_args=[test_interface], + method_kwargs={"server": server_id}, + expected_args=[server_interface.ServerInterface], + expected_kwargs={"port_id": interface_id, "server_id": server_id}, + ) # Case2: ServerInterface ID is provided as value - self._verify2('openstack.proxy2.BaseProxy._get', - self.proxy.get_server_interface, - method_args=[interface_id], - method_kwargs={"server": server_id}, - expected_args=[server_interface.ServerInterface], - expected_kwargs={"port_id": interface_id, - "server_id": server_id}) + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_server_interface, + method_args=[interface_id], + method_kwargs={"server": server_id}, + expected_args=[server_interface.ServerInterface], + expected_kwargs={"port_id": interface_id, "server_id": server_id}, + ) def test_server_interfaces(self): - self.verify_list(self.proxy.server_interfaces, - server_interface.ServerInterface, - paginated=False, method_args=["test_id"], - expected_kwargs={"server_id": "test_id"}) + self.verify_list( + self.proxy.server_interfaces, + server_interface.ServerInterface, + method_args=["test_id"], + expected_args=[], + expected_kwargs={"server_id": "test_id"}, + ) def test_server_ips_with_network_label(self): - self.verify_list(self.proxy.server_ips, server_ip.ServerIP, - paginated=False, method_args=["test_id"], - method_kwargs={"network_label": "test_label"}, - expected_kwargs={"server_id": "test_id", - "network_label": "test_label"}) + self.verify_list( + self.proxy.server_ips, + server_ip.ServerIP, + method_args=["test_id"], + method_kwargs={"network_label": "test_label"}, + expected_args=[], + expected_kwargs={ + "server_id": "test_id", + "network_label": "test_label", + }, + ) def test_server_ips_without_network_label(self): - self.verify_list(self.proxy.server_ips, server_ip.ServerIP, - paginated=False, method_args=["test_id"], - expected_kwargs={"server_id": "test_id", - "network_label": None}) + self.verify_list( + self.proxy.server_ips, + server_ip.ServerIP, + method_args=["test_id"], + expected_args=[], + expected_kwargs={"server_id": "test_id", "network_label": None}, + ) def test_server_create_attrs(self): self.verify_create(self.proxy.create_server, server.Server) @@ -209,56 +1014,106 @@ def test_server_delete_ignore(self): self.verify_delete(self.proxy.delete_server, server.Server, True) def test_server_force_delete(self): - self._verify("openstack.compute.v2.server.Server.force_delete", - self.proxy.delete_server, - method_args=["value", False, True]) + self._verify( + "openstack.compute.v2.server.Server.force_delete", + self.proxy.delete_server, + method_args=["value", False, True], + expected_args=[self.proxy], + ) def test_server_find(self): - self.verify_find(self.proxy.find_server, server.Server) + self.verify_find( + self.proxy.find_server, + server.Server, + method_kwargs={'all_projects': True}, + expected_kwargs={ + 'list_base_path': '/servers/detail', + 'all_projects': True, + }, + ) def test_server_get(self): self.verify_get(self.proxy.get_server, server.Server) def test_servers_detailed(self): - self.verify_list(self.proxy.servers, server.ServerDetail, - paginated=True, - method_kwargs={"details": True, - "changes_since": 1, "image": 2}, - expected_kwargs={"changes_since": 1, "image": 2}) + self.verify_list( + self.proxy.servers, + server.Server, + method_kwargs={"details": True, "changes_since": 1, "image": 2}, + expected_kwargs={ + "changes_since": 1, + "image": 2, + "base_path": "/servers/detail", + }, + ) def test_servers_not_detailed(self): - self.verify_list(self.proxy.servers, server.Server, - paginated=True, - method_kwargs={"details": False, - "changes_since": 1, "image": 2}, - expected_kwargs={"paginated": True, - "changes_since": 1, "image": 2}) + self.verify_list( + self.proxy.servers, + server.Server, + method_kwargs={"details": False, "changes_since": 1, "image": 2}, + expected_kwargs={"changes_since": 1, "image": 2}, + ) def test_server_update(self): self.verify_update(self.proxy.update_server, server.Server) + def test_server_change_password(self): + self._verify( + "openstack.compute.v2.server.Server.change_password", + self.proxy.change_server_password, + method_args=["value", "password"], + expected_args=[self.proxy, "password"], + ) + + def test_server_get_password(self): + self._verify( + "openstack.compute.v2.server.Server.get_password", + self.proxy.get_server_password, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_server_clear_password(self): + self._verify( + "openstack.compute.v2.server.Server.clear_password", + self.proxy.clear_server_password, + method_args=["value"], + expected_args=[self.proxy], + ) + def test_server_wait_for(self): value = server.Server(id='1234') self.verify_wait_for_status( self.proxy.wait_for_server, method_args=[value], - expected_args=[value, 'ACTIVE', ['ERROR'], 2, 120]) + expected_args=[self.proxy, value, 'ACTIVE', ['ERROR'], 2, 120], + expected_kwargs={'callback': None}, + ) def test_server_resize(self): - self._verify("openstack.compute.v2.server.Server.resize", - self.proxy.resize_server, - method_args=["value", "test-flavor"], - expected_args=["test-flavor"]) + self._verify( + "openstack.compute.v2.server.Server.resize", + self.proxy.resize_server, + method_args=["value", "test-flavor"], + expected_args=[self.proxy, "test-flavor"], + ) def test_server_confirm_resize(self): - self._verify("openstack.compute.v2.server.Server.confirm_resize", - self.proxy.confirm_server_resize, - method_args=["value"]) + self._verify( + "openstack.compute.v2.server.Server.confirm_resize", + self.proxy.confirm_server_resize, + method_args=["value"], + expected_args=[self.proxy], + ) def test_server_revert_resize(self): - self._verify("openstack.compute.v2.server.Server.revert_resize", - self.proxy.revert_server_resize, - method_args=["value"]) + self._verify( + "openstack.compute.v2.server.Server.revert_resize", + self.proxy.revert_server_resize, + method_args=["value"], + expected_args=[self.proxy], + ) def test_server_rebuild(self): id = 'test_image_id' @@ -267,233 +1122,775 @@ def test_server_rebuild(self): # Case1: image object is provided # NOTE: Inside of Server.rebuild is where image_obj gets converted # to an ID instead of object. - self._verify('openstack.compute.v2.server.Server.rebuild', - self.proxy.rebuild_server, - method_args=["value", "test_server", "test_pass"], - method_kwargs={"metadata": {"k1": "v1"}, - "image": image_obj}, - expected_args=["test_server", "test_pass"], - expected_kwargs={"metadata": {"k1": "v1"}, - "image": image_obj}) + self._verify( + 'openstack.compute.v2.server.Server.rebuild', + self.proxy.rebuild_server, + method_args=["value"], + method_kwargs={ + "name": "test_server", + "admin_password": "test_pass", + "metadata": {"k1": "v1"}, + "image": image_obj, + }, + expected_args=[self.proxy], + expected_kwargs={ + "name": "test_server", + "admin_password": "test_pass", + "metadata": {"k1": "v1"}, + "image": image_obj, + }, + ) # Case2: image name or id is provided - self._verify('openstack.compute.v2.server.Server.rebuild', - self.proxy.rebuild_server, - method_args=["value", "test_server", "test_pass"], - method_kwargs={"metadata": {"k1": "v1"}, - "image": id}, - expected_args=["test_server", "test_pass"], - expected_kwargs={"metadata": {"k1": "v1"}, - "image": id}) + self._verify( + 'openstack.compute.v2.server.Server.rebuild', + self.proxy.rebuild_server, + method_args=["value"], + method_kwargs={ + "name": "test_server", + "admin_password": "test_pass", + "metadata": {"k1": "v1"}, + "image": id, + }, + expected_args=[self.proxy], + expected_kwargs={ + "name": "test_server", + "admin_password": "test_pass", + "metadata": {"k1": "v1"}, + "image": id, + }, + ) def test_add_fixed_ip_to_server(self): - self._verify("openstack.compute.v2.server.Server.add_fixed_ip", - self.proxy.add_fixed_ip_to_server, - method_args=["value", "network-id"], - expected_args=["network-id"]) + self._verify( + "openstack.compute.v2.server.Server.add_fixed_ip", + self.proxy.add_fixed_ip_to_server, + method_args=["value", "network-id"], + expected_args=[self.proxy, "network-id"], + ) def test_fixed_ip_from_server(self): - self._verify("openstack.compute.v2.server.Server.remove_fixed_ip", - self.proxy.remove_fixed_ip_from_server, - method_args=["value", "address"], - expected_args=["address"]) + self._verify( + "openstack.compute.v2.server.Server.remove_fixed_ip", + self.proxy.remove_fixed_ip_from_server, + method_args=["value", "address"], + expected_args=[self.proxy, "address"], + ) def test_floating_ip_to_server(self): - self._verify("openstack.compute.v2.server.Server.add_floating_ip", - self.proxy.add_floating_ip_to_server, - method_args=["value", "floating-ip"], - expected_args=["floating-ip"], - expected_kwargs={'fixed_address': None}) + self._verify( + "openstack.compute.v2.server.Server.add_floating_ip", + self.proxy.add_floating_ip_to_server, + method_args=["value", "floating-ip"], + expected_args=[self.proxy, "floating-ip"], + expected_kwargs={'fixed_address': None}, + ) def test_add_floating_ip_to_server_with_fixed_addr(self): - self._verify("openstack.compute.v2.server.Server.add_floating_ip", - self.proxy.add_floating_ip_to_server, - method_args=["value", "floating-ip", 'fixed-addr'], - expected_args=["floating-ip"], - expected_kwargs={'fixed_address': 'fixed-addr'}) + self._verify( + "openstack.compute.v2.server.Server.add_floating_ip", + self.proxy.add_floating_ip_to_server, + method_args=["value", "floating-ip", 'fixed-addr'], + expected_args=[self.proxy, "floating-ip"], + expected_kwargs={'fixed_address': 'fixed-addr'}, + ) def test_remove_floating_ip_from_server(self): - self._verify("openstack.compute.v2.server.Server.remove_floating_ip", - self.proxy.remove_floating_ip_from_server, - method_args=["value", "address"], - expected_args=["address"]) + self._verify( + "openstack.compute.v2.server.Server.remove_floating_ip", + self.proxy.remove_floating_ip_from_server, + method_args=["value", "address"], + expected_args=[self.proxy, "address"], + ) + + def test_server_backup(self): + self._verify( + "openstack.compute.v2.server.Server.backup", + self.proxy.backup_server, + method_args=["value", "name", "daily", 1], + expected_args=[self.proxy, "name", "daily", 1], + ) def test_server_pause(self): - self._verify("openstack.compute.v2.server.Server.pause", - self.proxy.pause_server, - method_args=["value"]) + self._verify( + "openstack.compute.v2.server.Server.pause", + self.proxy.pause_server, + method_args=["value"], + expected_args=[self.proxy], + ) def test_server_unpause(self): - self._verify("openstack.compute.v2.server.Server.unpause", - self.proxy.unpause_server, - method_args=["value"]) + self._verify( + "openstack.compute.v2.server.Server.unpause", + self.proxy.unpause_server, + method_args=["value"], + expected_args=[self.proxy], + ) def test_server_suspend(self): - self._verify("openstack.compute.v2.server.Server.suspend", - self.proxy.suspend_server, - method_args=["value"]) + self._verify( + "openstack.compute.v2.server.Server.suspend", + self.proxy.suspend_server, + method_args=["value"], + expected_args=[self.proxy], + ) def test_server_resume(self): - self._verify("openstack.compute.v2.server.Server.resume", - self.proxy.resume_server, - method_args=["value"]) + self._verify( + "openstack.compute.v2.server.Server.resume", + self.proxy.resume_server, + method_args=["value"], + expected_args=[self.proxy], + ) def test_server_lock(self): - self._verify("openstack.compute.v2.server.Server.lock", - self.proxy.lock_server, - method_args=["value"]) + self._verify( + "openstack.compute.v2.server.Server.lock", + self.proxy.lock_server, + method_args=["value"], + expected_args=[self.proxy], + expected_kwargs={"locked_reason": None}, + ) + + def test_server_lock_with_options(self): + self._verify( + "openstack.compute.v2.server.Server.lock", + self.proxy.lock_server, + method_args=["value"], + method_kwargs={"locked_reason": "Because why not"}, + expected_args=[self.proxy], + expected_kwargs={"locked_reason": "Because why not"}, + ) def test_server_unlock(self): - self._verify("openstack.compute.v2.server.Server.unlock", - self.proxy.unlock_server, - method_args=["value"]) + self._verify( + "openstack.compute.v2.server.Server.unlock", + self.proxy.unlock_server, + method_args=["value"], + expected_args=[self.proxy], + ) def test_server_rescue(self): - self._verify("openstack.compute.v2.server.Server.rescue", - self.proxy.rescue_server, - method_args=["value"], - expected_kwargs={"admin_pass": None, "image_ref": None}) + self._verify( + "openstack.compute.v2.server.Server.rescue", + self.proxy.rescue_server, + method_args=["value"], + expected_args=[self.proxy], + expected_kwargs={"admin_pass": None, "image_ref": None}, + ) def test_server_rescue_with_options(self): - self._verify("openstack.compute.v2.server.Server.rescue", - self.proxy.rescue_server, - method_args=["value", 'PASS', 'IMG'], - expected_kwargs={"admin_pass": 'PASS', - "image_ref": 'IMG'}) + self._verify( + "openstack.compute.v2.server.Server.rescue", + self.proxy.rescue_server, + method_args=["value", 'PASS', 'IMG'], + expected_args=[self.proxy], + expected_kwargs={"admin_pass": 'PASS', "image_ref": 'IMG'}, + ) def test_server_unrescue(self): - self._verify("openstack.compute.v2.server.Server.unrescue", - self.proxy.unrescue_server, - method_args=["value"]) + self._verify( + "openstack.compute.v2.server.Server.unrescue", + self.proxy.unrescue_server, + method_args=["value"], + expected_args=[self.proxy], + ) def test_server_evacuate(self): - self._verify("openstack.compute.v2.server.Server.evacuate", - self.proxy.evacuate_server, - method_args=["value"], - expected_kwargs={"host": None, "admin_pass": None, - "force": None}) + self._verify( + "openstack.compute.v2.server.Server.evacuate", + self.proxy.evacuate_server, + method_args=["value"], + expected_args=[self.proxy], + expected_kwargs={ + "host": None, + "admin_pass": None, + "force": None, + "on_shared_storage": None, + }, + ) def test_server_evacuate_with_options(self): - self._verify("openstack.compute.v2.server.Server.evacuate", - self.proxy.evacuate_server, - method_args=["value", 'HOST2', 'NEW_PASS', True], - expected_kwargs={"host": "HOST2", - "admin_pass": 'NEW_PASS', - "force": True}) + self._verify( + "openstack.compute.v2.server.Server.evacuate", + self.proxy.evacuate_server, + method_args=["value", 'HOST2', 'NEW_PASS', True], + method_kwargs={'on_shared_storage': False}, + expected_args=[self.proxy], + expected_kwargs={ + "host": "HOST2", + "admin_pass": 'NEW_PASS', + "force": True, + "on_shared_storage": False, + }, + ) def test_server_start(self): - self._verify("openstack.compute.v2.server.Server.start", - self.proxy.start_server, - method_args=["value"]) + self._verify( + "openstack.compute.v2.server.Server.start", + self.proxy.start_server, + method_args=["value"], + expected_args=[self.proxy], + ) def test_server_stop(self): - self._verify("openstack.compute.v2.server.Server.stop", - self.proxy.stop_server, - method_args=["value"]) + self._verify( + "openstack.compute.v2.server.Server.stop", + self.proxy.stop_server, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_server_restore(self): + self._verify( + "openstack.compute.v2.server.Server.restore", + self.proxy.restore_server, + method_args=["value"], + expected_args=[self.proxy], + ) def test_server_shelve(self): - self._verify("openstack.compute.v2.server.Server.shelve", - self.proxy.shelve_server, - method_args=["value"]) + self._verify( + "openstack.compute.v2.server.Server.shelve", + self.proxy.shelve_server, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_server_shelve_offload(self): + self._verify( + "openstack.compute.v2.server.Server.shelve_offload", + self.proxy.shelve_offload_server, + method_args=["value"], + expected_args=[self.proxy], + ) def test_server_unshelve(self): - self._verify("openstack.compute.v2.server.Server.unshelve", - self.proxy.unshelve_server, - method_args=["value"]) - - def test_availability_zones(self): - self.verify_list_no_kwargs(self.proxy.availability_zones, - az.AvailabilityZone, - paginated=False) + self._verify( + "openstack.compute.v2.server.Server.unshelve", + self.proxy.unshelve_server, + method_args=["value"], + expected_args=[self.proxy], + expected_kwargs={ + "host": None, + "availability_zone": types.UNSET, + }, + ) + + def test_server_unshelve_with_options(self): + self._verify( + "openstack.compute.v2.server.Server.unshelve", + self.proxy.unshelve_server, + method_args=["value"], + method_kwargs={"host": "HOST2", "availability_zone": "AZ2"}, + expected_args=[self.proxy], + expected_kwargs={"host": "HOST2", "availability_zone": "AZ2"}, + ) + + def test_server_trigger_dump(self): + self._verify( + "openstack.compute.v2.server.Server.trigger_crash_dump", + self.proxy.trigger_server_crash_dump, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_server_add_tag(self): + self._verify( + "openstack.compute.v2.server.Server.add_tag", + self.proxy.add_tag_to_server, + method_args=["value", "tag"], + expected_args=[self.proxy, "tag"], + ) + + def test_server_remove_tag(self): + self._verify( + "openstack.compute.v2.server.Server.remove_tag", + self.proxy.remove_tag_from_server, + method_args=["value", "tag"], + expected_args=[self.proxy, "tag"], + ) + + def test_server_remove_tags(self): + self._verify( + "openstack.compute.v2.server.Server.remove_all_tags", + self.proxy.remove_tags_from_server, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_get_server_output(self): + self._verify( + "openstack.compute.v2.server.Server.get_console_output", + self.proxy.get_server_console_output, + method_args=["value"], + expected_args=[self.proxy], + expected_kwargs={"length": None}, + ) + + self._verify( + "openstack.compute.v2.server.Server.get_console_output", + self.proxy.get_server_console_output, + method_args=["value", 1], + expected_args=[self.proxy], + expected_kwargs={"length": 1}, + ) + + def test_availability_zones_not_detailed(self): + self.verify_list( + self.proxy.availability_zones, + az.AvailabilityZone, + method_kwargs={"details": False}, + expected_kwargs={}, + ) + + def test_availability_zones_detailed(self): + self.verify_list( + self.proxy.availability_zones, + az.AvailabilityZoneDetail, + method_kwargs={"details": True}, + expected_kwargs={}, + ) def test_get_all_server_metadata(self): - self._verify2("openstack.compute.v2.server.Server.get_metadata", - self.proxy.get_server_metadata, - method_args=["value"], - method_result=server.Server(id="value", metadata={}), - expected_args=[self.session], - expected_result={}) + self._verify( + "openstack.compute.v2.server.Server.fetch_metadata", + self.proxy.get_server_metadata, + method_args=["value"], + expected_args=[self.proxy], + expected_result=server.Server(id="value", metadata={}), + ) def test_set_server_metadata(self): kwargs = {"a": "1", "b": "2"} id = "an_id" - self._verify2("openstack.compute.v2.server.Server.set_metadata", - self.proxy.set_server_metadata, - method_args=[id], - method_kwargs=kwargs, - method_result=server.Server.existing(id=id, - metadata=kwargs), - expected_args=[self.session], - expected_kwargs=kwargs, - expected_result=kwargs) + self._verify( + "openstack.compute.v2.server.Server.set_metadata", + self.proxy.set_server_metadata, + method_args=[id], + method_kwargs=kwargs, + method_result=server.Server.existing(id=id, metadata=kwargs), + expected_args=[self.proxy], + expected_kwargs={'metadata': kwargs}, + expected_result=server.Server.existing(id=id, metadata=kwargs), + ) def test_delete_server_metadata(self): - self._verify2("openstack.compute.v2.server.Server.delete_metadata", - self.proxy.delete_server_metadata, - expected_result=None, - method_args=["value", "key"], - expected_args=[self.session, "key"]) + self._verify( + "openstack.compute.v2.server.Server.delete_metadata_item", + self.proxy.delete_server_metadata, + expected_result=None, + method_args=["value", ["key"]], + expected_args=[self.proxy, "key"], + ) + + def test_create_image(self): + metadata = {'k1': 'v1'} + with mock.patch( + 'openstack.compute.v2.server.Server.create_image' + ) as ci_mock: + ci_mock.return_value = 'image_id' + connection_mock = mock.Mock() + connection_mock.get_image = mock.Mock(return_value='image') + connection_mock.wait_for_image = mock.Mock() + self.proxy._connection = connection_mock + + rsp = self.proxy.create_server_image( + 'server', 'image_name', metadata, wait=True, timeout=1 + ) + + ci_mock.assert_called_with(self.proxy, 'image_name', metadata) + + self.proxy._connection.get_image.assert_called_with('image_id') + self.proxy._connection.wait_for_image.assert_called_with( + 'image', timeout=1 + ) + + self.assertEqual(connection_mock.wait_for_image(), rsp) def test_server_group_create(self): - self.verify_create(self.proxy.create_server_group, - server_group.ServerGroup) + self.verify_create( + self.proxy.create_server_group, server_group.ServerGroup + ) def test_server_group_delete(self): - self.verify_delete(self.proxy.delete_server_group, - server_group.ServerGroup, False) + self.verify_delete( + self.proxy.delete_server_group, server_group.ServerGroup, False + ) def test_server_group_delete_ignore(self): - self.verify_delete(self.proxy.delete_server_group, - server_group.ServerGroup, True) + self.verify_delete( + self.proxy.delete_server_group, server_group.ServerGroup, True + ) def test_server_group_find(self): - self.verify_find(self.proxy.find_server_group, - server_group.ServerGroup) + self.verify_find( + self.proxy.find_server_group, + server_group.ServerGroup, + method_kwargs={'all_projects': True}, + expected_kwargs={'all_projects': True}, + ) def test_server_group_get(self): - self.verify_get(self.proxy.get_server_group, - server_group.ServerGroup) + self.verify_get(self.proxy.get_server_group, server_group.ServerGroup) def test_server_groups(self): - self.verify_list(self.proxy.server_groups, server_group.ServerGroup, - paginated=False) - - def test_hypervisors(self): - self.verify_list_no_kwargs(self.proxy.hypervisors, - hypervisor.Hypervisor, - paginated=False) - - def test_find_hypervisor(self): - self.verify_find(self.proxy.find_hypervisor, - hypervisor.Hypervisor) - - def test_get_hypervisor(self): - self.verify_get(self.proxy.get_hypervisor, - hypervisor.Hypervisor) - - def test_get_service(self): - self.verify_get(self.proxy.get_service, - service.Service) - - def test_services(self): - self.verify_list_no_kwargs(self.proxy.services, - service.Service, - paginated=False) - - def test_enable_service(self): - self._verify('openstack.compute.v2.service.Service.enable', - self.proxy.enable_service, - method_args=["value", "host1", "nova-compute"], - expected_args=["host1", "nova-compute"]) - - def test_disable_service(self): - self._verify('openstack.compute.v2.service.Service.disable', - self.proxy.disable_service, - method_args=["value", "host1", "nova-compute"], - expected_args=["host1", "nova-compute", None]) - - def test_force_service_down(self): - self._verify('openstack.compute.v2.service.Service.force_down', - self.proxy.force_service_down, - method_args=["value", "host1", "nova-compute"], - expected_args=["host1", "nova-compute"]) + self.verify_list(self.proxy.server_groups, server_group.ServerGroup) + + def test_live_migrate_server(self): + self._verify( + 'openstack.compute.v2.server.Server.live_migrate', + self.proxy.live_migrate_server, + method_args=["value"], + method_kwargs={'host': 'host1', 'force': False}, + expected_args=[self.proxy], + expected_kwargs={ + 'host': 'host1', + 'force': False, + 'block_migration': None, + 'disk_over_commit': None, + }, + ) + + def test_abort_server_migration(self): + self._verify( + 'openstack.proxy.Proxy._delete', + self.proxy.abort_server_migration, + method_args=['server_migration', 'server'], + expected_args=[ + server_migration.ServerMigration, + 'server_migration', + ], + expected_kwargs={ + 'server_id': 'server', + 'ignore_missing': True, + }, + ) + + def test_force_complete_server_migration(self): + self._verify( + 'openstack.compute.v2.server_migration.ServerMigration.force_complete', + self.proxy.force_complete_server_migration, + method_args=['server_migration', 'server'], + expected_args=[self.proxy], + ) + + def test_get_server_migration(self): + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_server_migration, + method_args=['server_migration', 'server'], + expected_args=[ + server_migration.ServerMigration, + 'server_migration', + ], + expected_kwargs={ + 'server_id': 'server', + 'ignore_missing': True, + }, + ) + + def test_server_migrations(self): + self._verify( + 'openstack.proxy.Proxy._list', + self.proxy.server_migrations, + method_args=['server'], + expected_args=[server_migration.ServerMigration], + expected_kwargs={'server_id': 'server'}, + ) + + def test_migrations(self): + self.verify_list(self.proxy.migrations, migration.Migration) + + def test_migrations_kwargs(self): + self.verify_list( + self.proxy.migrations, + migration.Migration, + method_kwargs={'host': 'h1'}, + expected_kwargs={'host': 'h1'}, + ) + + def test_fetch_security_groups(self): + self._verify( + 'openstack.compute.v2.server.Server.fetch_security_groups', + self.proxy.fetch_server_security_groups, + method_args=["value"], + expected_args=[self.proxy], + ) + + def test_add_security_groups(self): + self._verify( + 'openstack.compute.v2.server.Server.add_security_group', + self.proxy.add_security_group_to_server, + method_args=["value", 'sg'], + expected_args=[self.proxy, 'sg'], + ) + + def test_remove_security_groups(self): + self._verify( + 'openstack.compute.v2.server.Server.remove_security_group', + self.proxy.remove_security_group_from_server, + method_args=["value", 'sg'], + expected_args=[self.proxy, 'sg'], + ) + + def test_usages(self): + self.verify_list(self.proxy.usages, usage.Usage) + + def test_usages__with_kwargs(self): + now = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) + start = now - datetime.timedelta(weeks=4) + end = end = now + datetime.timedelta(days=1) + self.verify_list( + self.proxy.usages, + usage.Usage, + method_kwargs={'start': start, 'end': end}, + expected_kwargs={ + 'start': start.isoformat(), + 'end': end.isoformat(), + }, + ) + + def test_get_usage(self): + self._verify( + "openstack.compute.v2.usage.Usage.fetch", + self.proxy.get_usage, + method_args=['value'], + method_kwargs={}, + expected_args=[self.proxy], + expected_kwargs={}, + ) + + def test_get_usage__with_kwargs(self): + now = datetime.datetime.now(datetime.timezone.utc).replace(tzinfo=None) + start = now - datetime.timedelta(weeks=4) + end = end = now + datetime.timedelta(days=1) + self._verify( + "openstack.compute.v2.usage.Usage.fetch", + self.proxy.get_usage, + method_args=['value'], + method_kwargs={'start': start, 'end': end}, + expected_args=[self.proxy], + expected_kwargs={ + 'start': start.isoformat(), + 'end': end.isoformat(), + }, + ) + + def test_create_server_remote_console(self): + self.verify_create( + self.proxy.create_server_remote_console, + server_remote_console.ServerRemoteConsole, + method_kwargs={"server": "test_id", "type": "fake"}, + expected_kwargs={"server_id": "test_id", "type": "fake"}, + ) + + def test_get_console_url(self): + self._verify( + 'openstack.compute.v2.server.Server.get_console_url', + self.proxy.get_server_console_url, + method_args=["value", "console_type"], + expected_args=[self.proxy, "console_type"], + ) + + @mock.patch('openstack.utils.supports_microversion', autospec=True) + @mock.patch('openstack.compute.v2._proxy.Proxy._create', autospec=True) + @mock.patch( + 'openstack.compute.v2.server.Server.get_console_url', autospec=True + ) + def test_create_console_mv_old(self, sgc, rcc, smv): + console_fake = {'url': 'a', 'type': 'b', 'protocol': 'c'} + smv.return_value = False + sgc.return_value = console_fake + ret = self.proxy.create_console('fake_server', 'fake_type') + smv.assert_called_once_with(self.proxy, '2.6') + rcc.assert_not_called() + sgc.assert_called_with(mock.ANY, self.proxy, 'fake_type') + self.assertDictEqual(console_fake, ret) + + @mock.patch('openstack.utils.supports_microversion', autospec=True) + @mock.patch('openstack.compute.v2._proxy.Proxy._create', autospec=True) + @mock.patch( + 'openstack.compute.v2.server.Server.get_console_url', autospec=True + ) + def test_create_console_mv_2_6(self, sgc, rcc, smv): + console_fake = {'url': 'a', 'type': 'b', 'protocol': 'c'} + + # Test server_remote_console is triggered when mv>=2.6 + smv.return_value = True + rcc.return_value = server_remote_console.ServerRemoteConsole( + **console_fake + ) + ret = self.proxy.create_console('fake_server', 'fake_type') + smv.assert_called_once_with(self.proxy, '2.6') + sgc.assert_not_called() + rcc.assert_called_with( + mock.ANY, + server_remote_console.ServerRemoteConsole, + server_id='fake_server', + type='fake_type', + protocol=None, + ) + self.assertEqual(console_fake['url'], ret['url']) + + +class TestQuotaClassSet(TestComputeProxy): + def test_quota_class_set_get(self): + self.verify_get( + self.proxy.get_quota_class_set, quota_class_set.QuotaClassSet + ) + + def test_quota_class_set_update(self): + self.verify_update( + self.proxy.update_quota_class_set, + quota_class_set.QuotaClassSet, + False, + ) + + +class TestQuotaSet(TestComputeProxy): + def test_quota_set_get(self): + self._verify( + 'openstack.resource.Resource.fetch', + self.proxy.get_quota_set, + method_args=['prj'], + expected_args=[ + self.proxy, + False, + None, + None, + False, + ], + expected_kwargs={ + 'microversion': None, + 'resource_response_key': None, + }, + method_result=quota_set.QuotaSet(), + expected_result=quota_set.QuotaSet(), + ) + + def test_quota_set_get_query(self): + self._verify( + 'openstack.resource.Resource.fetch', + self.proxy.get_quota_set, + method_args=['prj'], + method_kwargs={'usage': True, 'user_id': 'uid'}, + expected_args=[ + self.proxy, + False, + '/os-quota-sets/%(project_id)s/detail', + None, + False, + ], + expected_kwargs={ + 'microversion': None, + 'resource_response_key': None, + 'user_id': 'uid', + }, + ) + + def test_quota_set_get_defaults(self): + self._verify( + 'openstack.resource.Resource.fetch', + self.proxy.get_quota_set_defaults, + method_args=['prj'], + expected_args=[ + self.proxy, + False, + '/os-quota-sets/%(project_id)s/defaults', + None, + False, + ], + expected_kwargs={ + 'microversion': None, + 'resource_response_key': None, + }, + ) + + def test_quota_set_reset(self): + self._verify( + 'openstack.resource.Resource.delete', + self.proxy.revert_quota_set, + method_args=['prj'], + method_kwargs={'user_id': 'uid'}, + expected_args=[self.proxy], + expected_kwargs={'user_id': 'uid'}, + ) + + @mock.patch.object(proxy_base.Proxy, "_get_resource") + def test_quota_set_update(self, mock_get): + fake_project = project.Project(id='prj') + fake_quota_set = quota_set.QuotaSet(project_id='prj') + mock_get.side_effect = [fake_project, fake_quota_set] + + self._verify( + 'openstack.resource.Resource.commit', + self.proxy.update_quota_set, + method_args=['prj'], + method_kwargs={'ram': 123}, + expected_args=[self.proxy], + expected_kwargs={}, + ) + mock_get.assert_has_calls( + [ + mock.call(project.Project, 'prj'), + mock.call(quota_set.QuotaSet, None, project_id='prj', ram=123), + ] + ) + + @mock.patch.object(proxy_base.Proxy, "_get_resource") + def test_quota_set_update__legacy(self, mock_get): + fake_quota_set = quota_set.QuotaSet(project_id='prj') + mock_get.side_effect = [fake_quota_set] + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + + self._verify( + 'openstack.resource.Resource.commit', + self.proxy.update_quota_set, + method_args=[fake_quota_set], + method_kwargs={'ram': 123}, + expected_args=[self.proxy], + expected_kwargs={}, + ) + + self.assertEqual(1, len(w)) + self.assertEqual( + os_warnings.RemovedInSDK50Warning, + w[-1].category, + ) + self.assertIn( + "The signature of 'update_quota_set' has changed ", + str(w[-1]), + ) + + +class TestServerAction(TestComputeProxy): + def test_server_action_get(self): + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_server_action, + method_args=['request_id'], + method_kwargs={'server': 'server_id'}, + expected_args=[server_action.ServerAction], + expected_kwargs={ + 'request_id': 'request_id', + 'server_id': 'server_id', + }, + ) + + def test_server_actions(self): + self.verify_list( + self.proxy.server_actions, + server_action.ServerAction, + method_kwargs={'server': 'server_a'}, + expected_kwargs={'server_id': 'server_a'}, + ) + + +class TestValidateConsoleAuthToken(TestComputeProxy): + def test_validate_console_auth_token(self): + self.verify_get( + self.proxy.validate_console_auth_token, + console_auth_token.ConsoleAuthToken, + ) diff --git a/openstack/tests/unit/compute/v2/test_server.py b/openstack/tests/unit/compute/v2/test_server.py index 124903a53f..98a96acaea 100644 --- a/openstack/tests/unit/compute/v2/test_server.py +++ b/openstack/tests/unit/compute/v2/test_server.py @@ -10,94 +10,183 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +import http +from unittest import mock +from openstack.compute.v2 import flavor from openstack.compute.v2 import server +from openstack.image.v2 import image +from openstack.tests.unit import base +from openstack.tests.unit import fakes IDENTIFIER = 'IDENTIFIER' EXAMPLE = { - 'accessIPv4': '1', - 'accessIPv6': '2', - 'addresses': {'region': '3'}, - 'config_drive': True, - 'created': '2015-03-09T12:14:57.233772', + 'OS-DCF:diskConfig': 'AUTO', + 'OS-EXT-AZ:availability_zone': 'us-west', + 'OS-EXT-SRV-ATTR:host': 'compute', + 'OS-EXT-SRV-ATTR:hostname': 'new-server-test', + 'OS-EXT-SRV-ATTR:hypervisor_hostname': 'fake-mini', + 'OS-EXT-SRV-ATTR:instance_name': 'instance-00000001', + 'OS-EXT-SRV-ATTR:kernel_id': '', + 'OS-EXT-SRV-ATTR:launch_index': 0, + 'OS-EXT-SRV-ATTR:ramdisk_id': '', + 'OS-EXT-SRV-ATTR:reservation_id': 'r-ov3q80zj', + 'OS-EXT-SRV-ATTR:root_device_name': '/dev/sda', + 'OS-EXT-SRV-ATTR:user_data': 'IyEvYmluL2Jhc2gKL2Jpbi9IHlvdSEiCg==', + 'OS-EXT-STS:power_state': 1, + 'OS-EXT-STS:task_state': None, + 'OS-EXT-STS:vm_state': 'active', + 'OS-SRV-USG:launched_at': '2017-02-14T19:23:59.895661', + 'OS-SRV-USG:terminated_at': '2015-03-09T12:15:57.233772', + 'OS-SCH-HNT:scheduler_hints': {'key': '30'}, + 'accessIPv4': '1.2.3.4', + 'accessIPv6': '80fe::', + 'adminPass': '27', + 'addresses': { + 'private': [ + { + 'OS-EXT-IPS-MAC:mac_addr': 'aa:bb:cc:dd:ee:ff', + 'OS-EXT-IPS:type': 'fixed', + 'addr': '192.168.0.3', + 'version': 4, + } + ] + }, + 'block_device_mapping_v2': {'key': '29'}, + 'config_drive': '', + 'created': '2017-02-14T19:23:58Z', + 'description': 'dummy', 'flavorRef': '5', - 'flavor': {'id': 'FLAVOR_ID', 'links': {}}, - 'hostId': '6', + 'flavor': { + 'disk': 1, + 'ephemeral': 0, + 'extra_specs': { + 'hw:cpu_policy': 'dedicated', + 'hw:mem_page_size': '2048', + }, + 'original_name': 'm1.tiny.specs', + 'ram': 512, + 'swap': 0, + }, + 'hostId': '2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6', + 'host_status': 'UP', 'id': IDENTIFIER, 'imageRef': '8', - 'image': {'id': 'IMAGE_ID', 'links': {}}, - 'links': '9', - 'metadata': {'key': '10'}, + 'image': { + 'id': '70a599e0-31e7-49b7-b260-868f441e862b', + 'links': [ + { + 'href': 'http://openstack.example.com/images/70a599e0', + 'rel': 'bookmark', + } + ], + }, + 'key_name': 'dummy', + 'links': [ + { + 'href': 'http://openstack.example.com/v2.1/servers/9168b536', + 'rel': 'self', + }, + { + 'href': 'http://openstack.example.com/servers/9168b536', + 'rel': 'bookmark', + }, + ], + 'locked': True, + 'metadata': {'My Server Name': 'Apache1'}, + 'name': 'new-server-test', 'networks': 'auto', - 'name': '11', - 'progress': 12, - 'tenant_id': '13', - 'status': '14', - 'updated': '2015-03-09T12:15:57.233772', - 'user_id': '16', - 'key_name': '17', - 'OS-DCF:diskConfig': '18', - 'OS-EXT-AZ:availability_zone': '19', - 'OS-EXT-STS:power_state': '20', - 'OS-EXT-STS:task_state': '21', - 'OS-EXT-STS:vm_state': '22', - 'os-extended-volumes:volumes_attached': '23', - 'OS-SRV-USG:launched_at': '2015-03-09T12:15:57.233772', - 'OS-SRV-USG:terminated_at': '2015-03-09T12:15:57.233772', - 'security_groups': '26', - 'adminPass': '27', - 'personality': '28', - 'block_device_mapping_v2': {'key': '29'}, - 'os:scheduler_hints': {'key': '30'}, - 'user_data': '31' + 'os-extended-volumes:volumes_attached': [], + 'progress': 0, + 'security_groups': [{'name': 'default'}], + 'server_groups': ['3caf4187-8010-491f-b6f5-a4a68a40371e'], + 'status': 'ACTIVE', + 'tags': [], + 'tenant_id': '6f70656e737461636b20342065766572', + 'trusted_image_certificates': [ + '0b5d2c72-12cc-4ba6-a8d7-3ff5cc1d8cb8', + '674736e3-f25c-405c-8362-bbf991e0ce0a', + ], + 'updated': '2017-02-14T19:24:00Z', + 'user_id': 'fake', } -class TestServer(testtools.TestCase): - +class TestServer(base.TestCase): def setUp(self): - super(TestServer, self).setUp() + super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.status_code = 200 self.sess = mock.Mock() self.sess.post = mock.Mock(return_value=self.resp) + # totally arbitrary + self.sess.default_microversion = '2.88' def test_basic(self): sot = server.Server() self.assertEqual('server', sot.resource_key) self.assertEqual('servers', sot.resources_key) self.assertEqual('/servers', sot.base_path) - self.assertEqual('compute', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertDictEqual({"image": "image", - "flavor": "flavor", - "name": "name", - "status": "status", - "host": "host", - "all_tenants": "all_tenants", - "changes_since": "changes-since", - "limit": "limit", - "marker": "marker", - "sort_key": "sort_key", - "sort_dir": "sort_dir", - "reservation_id": "reservation_id", - "tags": "tags", - "tags_any": "tags-any", - "not_tags": "not-tags", - "not_tags_any": "not-tags-any", - "is_deleted": "deleted", - "ipv4_address": "ip", - "ipv6_address": "ip6", - }, - sot._query_mapping._mapping) + self.assertDictEqual( + { + "access_ipv4": "access_ip_v4", + "access_ipv6": "access_ip_v6", + "auto_disk_config": "auto_disk_config", + "availability_zone": "availability_zone", + "changes_before": "changes-before", + "changes_since": "changes-since", + "compute_host": "host", + "has_config_drive": "config_drive", + "created_at": "created_at", + "description": "description", + "flavor": "flavor", + "hostname": "hostname", + "image": "image", + "ipv4_address": "ip", + "ipv6_address": "ip6", + "id": "uuid", + "deleted_only": "deleted", + "is_soft_deleted": "soft_deleted", + "kernel_id": "kernel_id", + "key_name": "key_name", + "launch_index": "launch_index", + "launched_at": "launched_at", + "limit": "limit", + "locked": "locked", + "locked_by": "locked_by", + "marker": "marker", + "name": "name", + "node": "node", + "power_state": "power_state", + "progress": "progress", + "project_id": "project_id", + "ramdisk_id": "ramdisk_id", + "pinned_availability_zone": "pinned_availability_zone", + "reservation_id": "reservation_id", + "root_device_name": "root_device_name", + "sort_dir": "sort_dir", + "sort_key": "sort_key", + "status": "status", + "task_state": "task_state", + "terminated_at": "terminated_at", + "user_id": "user_id", + "vm_state": "vm_state", + "all_projects": "all_tenants", + "tags": "tags", + "any_tags": "tags-any", + "not_tags": "not-tags", + "not_any_tags": "not-tags-any", + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = server.Server(**EXAMPLE) @@ -107,51 +196,121 @@ def test_make_it(self): self.assertEqual(EXAMPLE['created'], sot.created_at) self.assertEqual(EXAMPLE['config_drive'], sot.has_config_drive) self.assertEqual(EXAMPLE['flavorRef'], sot.flavor_id) - self.assertEqual(EXAMPLE['flavor'], sot.flavor) + self.assertEqual(flavor.Flavor(**EXAMPLE['flavor']), sot.flavor) self.assertEqual(EXAMPLE['hostId'], sot.host_id) + self.assertEqual(EXAMPLE['host_status'], sot.host_status) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['imageRef'], sot.image_id) - self.assertEqual(EXAMPLE['image'], sot.image) + self.assertEqual(image.Image(**EXAMPLE['image']), sot.image) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['metadata'], sot.metadata) self.assertEqual(EXAMPLE['networks'], sot.networks) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['progress'], sot.progress) self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['server_groups'], sot.server_groups) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['updated'], sot.updated_at) self.assertEqual(EXAMPLE['user_id'], sot.user_id) self.assertEqual(EXAMPLE['key_name'], sot.key_name) self.assertEqual(EXAMPLE['OS-DCF:diskConfig'], sot.disk_config) - self.assertEqual(EXAMPLE['OS-EXT-AZ:availability_zone'], - sot.availability_zone) + self.assertEqual( + EXAMPLE['OS-EXT-AZ:availability_zone'], sot.availability_zone + ) self.assertEqual(EXAMPLE['OS-EXT-STS:power_state'], sot.power_state) self.assertEqual(EXAMPLE['OS-EXT-STS:task_state'], sot.task_state) self.assertEqual(EXAMPLE['OS-EXT-STS:vm_state'], sot.vm_state) - self.assertEqual(EXAMPLE['os-extended-volumes:volumes_attached'], - sot.attached_volumes) + self.assertEqual( + EXAMPLE['os-extended-volumes:volumes_attached'], + sot.attached_volumes, + ) self.assertEqual(EXAMPLE['OS-SRV-USG:launched_at'], sot.launched_at) - self.assertEqual(EXAMPLE['OS-SRV-USG:terminated_at'], - sot.terminated_at) + self.assertEqual( + EXAMPLE['OS-SRV-USG:terminated_at'], sot.terminated_at + ) self.assertEqual(EXAMPLE['security_groups'], sot.security_groups) self.assertEqual(EXAMPLE['adminPass'], sot.admin_password) - self.assertEqual(EXAMPLE['personality'], sot.personality) - self.assertEqual(EXAMPLE['block_device_mapping_v2'], - sot.block_device_mapping) - self.assertEqual(EXAMPLE['os:scheduler_hints'], sot.scheduler_hints) - self.assertEqual(EXAMPLE['user_data'], sot.user_data) - - def test_detail(self): - sot = server.ServerDetail() - self.assertEqual('server', sot.resource_key) - self.assertEqual('servers', sot.resources_key) - self.assertEqual('/servers/detail', sot.base_path) - self.assertEqual('compute', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) + self.assertEqual( + EXAMPLE['block_device_mapping_v2'], sot.block_device_mapping + ) + self.assertEqual(EXAMPLE['OS-EXT-SRV-ATTR:host'], sot.compute_host) + self.assertEqual(EXAMPLE['OS-EXT-SRV-ATTR:hostname'], sot.hostname) + self.assertEqual( + EXAMPLE['OS-EXT-SRV-ATTR:hypervisor_hostname'], + sot.hypervisor_hostname, + ) + self.assertEqual( + EXAMPLE['OS-EXT-SRV-ATTR:instance_name'], sot.instance_name + ) + self.assertEqual(EXAMPLE['OS-EXT-SRV-ATTR:kernel_id'], sot.kernel_id) + self.assertEqual( + EXAMPLE['OS-EXT-SRV-ATTR:launch_index'], sot.launch_index + ) + self.assertEqual(EXAMPLE['OS-EXT-SRV-ATTR:ramdisk_id'], sot.ramdisk_id) + self.assertEqual( + EXAMPLE['OS-EXT-SRV-ATTR:reservation_id'], sot.reservation_id + ) + self.assertEqual( + EXAMPLE['OS-EXT-SRV-ATTR:root_device_name'], sot.root_device_name + ) + self.assertEqual( + EXAMPLE['OS-SCH-HNT:scheduler_hints'], sot.scheduler_hints + ) + self.assertEqual(EXAMPLE['OS-EXT-SRV-ATTR:user_data'], sot.user_data) + self.assertEqual(EXAMPLE['locked'], sot.is_locked) + self.assertEqual( + EXAMPLE['trusted_image_certificates'], + sot.trusted_image_certificates, + ) + + def test_to_dict_flavor(self): + # Ensure to_dict properly resolves flavor and uses defaults for not + # specified flavor proerties. + sot = server.Server(**EXAMPLE) + dct = sot.to_dict() + self.assertEqual(0, dct['flavor']['vcpus']) + + def test__prepare_server(self): + zone = 1 + data = 2 + hints = {"hint": 3} + hostname = 'foo' + + sot = server.Server( + id=1, + availability_zone=zone, + user_data=data, + scheduler_hints=hints, + min_count=2, + max_count=3, + hostname=hostname, + ) + request = sot._prepare_request() + + self.assertNotIn( + "OS-EXT-AZ:availability_zone", request.body[sot.resource_key] + ) + self.assertEqual( + request.body[sot.resource_key]["availability_zone"], zone + ) + + self.assertNotIn( + "OS-EXT-SRV-ATTR:user_data", request.body[sot.resource_key] + ) + self.assertEqual(request.body[sot.resource_key]["user_data"], data) + + self.assertNotIn( + "OS-SCH-HNT:scheduler_hints", request.body[sot.resource_key] + ) + self.assertEqual(request.body["OS-SCH-HNT:scheduler_hints"], hints) + + self.assertNotIn( + "OS-EXT-SRV-ATTR:hostname", request.body[sot.resource_key] + ) + self.assertEqual(request.body[sot.resource_key]["hostname"], hostname) + + self.assertEqual(2, request.body[sot.resource_key]['min_count']) + self.assertEqual(3, request.body[sot.resource_key]['max_count']) def test_change_password(self): sot = server.Server(**EXAMPLE) @@ -162,7 +321,38 @@ def test_change_password(self): body = {"changePassword": {"adminPass": "a"}} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_get_password(self): + sot = server.Server(**EXAMPLE) + self.sess.get.return_value = fakes.FakeResponse( + data={'password': 'foo'} + ) + + result = sot.get_password(self.sess) + self.assertEqual('foo', result) + + url = 'servers/IDENTIFIER/os-server-password' + self.sess.get.assert_called_with( + url, microversion=self.sess.default_microversion + ) + + def test_clear_password(self): + sot = server.Server(**EXAMPLE) + self.sess.delete.return_value = fakes.FakeResponse( + status_code=http.HTTPStatus.NO_CONTENT, + ) + + self.assertIsNone(sot.clear_password(self.sess)) + + url = 'servers/IDENTIFIER/os-server-password' + self.sess.delete.assert_called_with( + url, microversion=self.sess.default_microversion + ) def test_reboot(self): sot = server.Server(**EXAMPLE) @@ -173,7 +363,11 @@ def test_reboot(self): body = {"reboot": {"type": "HARD"}} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_force_delete(self): sot = server.Server(**EXAMPLE) @@ -184,19 +378,32 @@ def test_force_delete(self): body = {'forceDelete': None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_rebuild(self): sot = server.Server(**EXAMPLE) # Let the translate pass through, that portion is tested elsewhere sot._translate_response = lambda arg: arg - result = sot.rebuild(self.sess, name='noo', admin_password='seekr3t', - image='http://image/1', access_ipv4="12.34.56.78", - access_ipv6="fe80::100", - metadata={"meta var": "meta val"}, - personality=[{"path": "/etc/motd", - "contents": "foo"}]) + result = sot.rebuild( + self.sess, + '123', + name='noo', + admin_password='seekr3t', + preserve_ephemeral=False, + access_ipv4="12.34.56.78", + access_ipv6="fe80::100", + metadata={"meta var": "meta val"}, + user_data="ZWNobyAiaGVsbG8gd29ybGQi", + key_name='my-ecdsa-key', + description='an updated description', + trusted_image_certificates=['foo'], + hostname='new-hostname', + ) self.assertIsInstance(result, server.Server) @@ -204,27 +411,38 @@ def test_rebuild(self): body = { "rebuild": { "name": "noo", - "imageRef": "http://image/1", + "imageRef": "123", "adminPass": "seekr3t", "accessIPv4": "12.34.56.78", "accessIPv6": "fe80::100", "metadata": {"meta var": "meta val"}, - "personality": [{"path": "/etc/motd", "contents": "foo"}], - "preserve_ephemeral": False + "user_data": "ZWNobyAiaGVsbG8gd29ybGQi", + "preserve_ephemeral": False, + "key_name": 'my-ecdsa-key', + "description": 'an updated description', + "trusted_image_certificates": ['foo'], + "hostname": "new-hostname", } } headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_rebuild_minimal(self): sot = server.Server(**EXAMPLE) # Let the translate pass through, that portion is tested elsewhere sot._translate_response = lambda arg: arg - result = sot.rebuild(self.sess, name='nootoo', - admin_password='seekr3two', - image='http://image/2') + result = sot.rebuild( + self.sess, + '123', + name='nootoo', + admin_password='seekr3two', + ) self.assertIsInstance(result, server.Server) @@ -232,14 +450,55 @@ def test_rebuild_minimal(self): body = { "rebuild": { "name": "nootoo", - "imageRef": "http://image/2", + "imageRef": "123", "adminPass": "seekr3two", - "preserve_ephemeral": False } } headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_rebuild_none_values(self): + sot = server.Server(**EXAMPLE) + # Let the translate pass through, that portion is tested elsewhere + sot._translate_response = lambda arg: arg + + result = sot.rebuild( + self.sess, + '123', + admin_password=None, + access_ipv4=None, + access_ipv6=None, + metadata=None, + user_data=None, + description=None, + ) + + self.assertIsInstance(result, server.Server) + + url = 'servers/IDENTIFIER/action' + body = { + "rebuild": { + "imageRef": "123", + "adminPass": None, + "accessIPv4": None, + "accessIPv6": None, + "metadata": None, + "user_data": None, + "description": None, + } + } + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_resize(self): sot = server.Server(**EXAMPLE) @@ -250,7 +509,11 @@ def test_resize(self): body = {"resize": {"flavorRef": "2"}} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_confirm_resize(self): sot = server.Server(**EXAMPLE) @@ -261,7 +524,11 @@ def test_confirm_resize(self): body = {"confirmResize": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_revert_resize(self): sot = server.Server(**EXAMPLE) @@ -272,32 +539,120 @@ def test_revert_resize(self): body = {"revertResize": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) - def test_create_image(self): + def test_shelve_offload(self): + sot = server.Server(**EXAMPLE) + + self.assertIsNone(sot.shelve_offload(self.sess)) + + url = 'servers/IDENTIFIER/action' + body = {"shelveOffload": None} + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_create_image_header(self): sot = server.Server(**EXAMPLE) name = 'noo' metadata = {'nu': 'image', 'created': 'today'} - self.assertIsNone(sot.create_image(self.sess, name, metadata)) - url = 'servers/IDENTIFIER/action' body = {"createImage": {'name': name, 'metadata': metadata}} headers = {'Accept': ''} + + rsp = mock.Mock() + rsp.json.return_value = None + rsp.headers = {'Location': 'dummy/dummy2'} + rsp.status_code = 200 + + self.sess.post.return_value = rsp + + self.endpoint_data = mock.Mock( + spec=['min_microversion', 'max_microversion'], + min_microversion=None, + max_microversion='2.44', + ) + self.sess.get_endpoint_data.return_value = self.endpoint_data + + image_id = sot.create_image(self.sess, name, metadata) + self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) - def test_create_image_minimal(self): + self.assertEqual('dummy2', image_id) + + def test_create_image_microver(self): sot = server.Server(**EXAMPLE) name = 'noo' + metadata = {'nu': 'image', 'created': 'today'} - self.assertIsNone(self.resp.body, sot.create_image(self.sess, name)) + url = 'servers/IDENTIFIER/action' + body = {"createImage": {'name': name, 'metadata': metadata}} + headers = {'Accept': ''} + + rsp = mock.Mock() + rsp.json.return_value = {'image_id': 'dummy3'} + rsp.headers = {'Location': 'dummy/dummy2'} + rsp.status_code = 200 + + self.sess.post.return_value = rsp + + self.endpoint_data = mock.Mock( + spec=['min_microversion', 'max_microversion'], + min_microversion='2.1', + max_microversion='2.56', + ) + self.sess.get_endpoint_data.return_value = self.endpoint_data + self.sess.default_microversion = None + + image_id = sot.create_image(self.sess, name, metadata) + + self.sess.post.assert_called_with( + url, json=body, headers=headers, microversion='2.45' + ) + self.assertEqual('dummy3', image_id) + + def test_create_image_minimal(self): + sot = server.Server(**EXAMPLE) + name = 'noo' url = 'servers/IDENTIFIER/action' body = {"createImage": {'name': name}} headers = {'Accept': ''} + + rsp = mock.Mock() + rsp.json.return_value = None + rsp.headers = {'Location': 'dummy/dummy2'} + rsp.status_code = 200 + + self.sess.post.return_value = rsp + + self.endpoint_data = mock.Mock( + spec=['min_microversion', 'max_microversion'], + min_microversion='2.1', + max_microversion='2.56', + ) + self.sess.get_endpoint_data.return_value = self.endpoint_data + self.sess.default_microversion = None + + self.assertIsNone(self.resp.body, sot.create_image(self.sess, name)) + self.sess.post.assert_called_with( - url, endpoint_filter=dict(sot.service), json=body, headers=headers) + url, json=body, headers=headers, microversion='2.45' + ) def test_add_security_group(self): sot = server.Server(**EXAMPLE) @@ -308,7 +663,11 @@ def test_add_security_group(self): body = {"addSecurityGroup": {"name": "group"}} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_remove_security_group(self): sot = server.Server(**EXAMPLE) @@ -319,7 +678,11 @@ def test_remove_security_group(self): body = {"removeSecurityGroup": {"name": "group"}} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_reset_state(self): sot = server.Server(**EXAMPLE) @@ -330,7 +693,11 @@ def test_reset_state(self): body = {"os-resetState": {"state": 'active'}} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_add_fixed_ip(self): sot = server.Server(**EXAMPLE) @@ -342,7 +709,11 @@ def test_add_fixed_ip(self): body = {"addFixedIp": {"networkId": "NETWORK-ID"}} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_remove_fixed_ip(self): sot = server.Server(**EXAMPLE) @@ -354,7 +725,11 @@ def test_remove_fixed_ip(self): body = {"removeFixedIp": {"address": "ADDRESS"}} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_add_floating_ip(self): sot = server.Server(**EXAMPLE) @@ -366,7 +741,11 @@ def test_add_floating_ip(self): body = {"addFloatingIp": {"address": "FLOATING-IP"}} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_add_floating_ip_with_fixed_addr(self): sot = server.Server(**EXAMPLE) @@ -375,11 +754,19 @@ def test_add_floating_ip_with_fixed_addr(self): self.assertIsNone(res) url = 'servers/IDENTIFIER/action' - body = {"addFloatingIp": {"address": "FLOATING-IP", - "fixed_address": "FIXED-ADDR"}} + body = { + "addFloatingIp": { + "address": "FLOATING-IP", + "fixed_address": "FIXED-ADDR", + } + } headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_remove_floating_ip(self): sot = server.Server(**EXAMPLE) @@ -391,7 +778,33 @@ def test_remove_floating_ip(self): body = {"removeFloatingIp": {"address": "I-AM-FLOATING"}} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_backup(self): + sot = server.Server(**EXAMPLE) + + res = sot.backup(self.sess, "name", "daily", 1) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = { + "createBackup": { + "name": "name", + "backup_type": "daily", + "rotation": 1, + } + } + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_pause(self): sot = server.Server(**EXAMPLE) @@ -403,7 +816,11 @@ def test_pause(self): body = {"pause": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_unpause(self): sot = server.Server(**EXAMPLE) @@ -415,7 +832,11 @@ def test_unpause(self): body = {"unpause": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_suspend(self): sot = server.Server(**EXAMPLE) @@ -427,7 +848,11 @@ def test_suspend(self): body = {"suspend": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_resume(self): sot = server.Server(**EXAMPLE) @@ -439,7 +864,11 @@ def test_resume(self): body = {"resume": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_lock(self): sot = server.Server(**EXAMPLE) @@ -451,7 +880,27 @@ def test_lock(self): body = {"lock": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_lock_with_options(self): + sot = server.Server(**EXAMPLE) + + res = sot.lock(self.sess, locked_reason='Because why not') + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = {'lock': {'locked_reason': 'Because why not'}} + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_unlock(self): sot = server.Server(**EXAMPLE) @@ -463,7 +912,11 @@ def test_unlock(self): body = {"unlock": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_rescue(self): sot = server.Server(**EXAMPLE) @@ -475,7 +928,11 @@ def test_rescue(self): body = {"rescue": {}} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_rescue_with_options(self): sot = server.Server(**EXAMPLE) @@ -484,11 +941,16 @@ def test_rescue_with_options(self): self.assertIsNone(res) url = 'servers/IDENTIFIER/action' - body = {"rescue": {'adminPass': 'SECRET', - 'rescue_image_ref': 'IMG-ID'}} + body = { + "rescue": {'adminPass': 'SECRET', 'rescue_image_ref': 'IMG-ID'} + } headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_unrescue(self): sot = server.Server(**EXAMPLE) @@ -500,7 +962,11 @@ def test_unrescue(self): body = {"unrescue": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_evacuate(self): sot = server.Server(**EXAMPLE) @@ -512,21 +978,40 @@ def test_evacuate(self): body = {"evacuate": {}} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_evacuate_with_options(self): sot = server.Server(**EXAMPLE) - res = sot.evacuate(self.sess, host='HOST2', admin_pass='NEW_PASS', - force=True) + res = sot.evacuate( + self.sess, + host='HOST2', + admin_pass='NEW_PASS', + force=True, + on_shared_storage=False, + ) self.assertIsNone(res) url = 'servers/IDENTIFIER/action' - body = {"evacuate": {'host': 'HOST2', 'adminPass': 'NEW_PASS', - 'force': True}} + body = { + "evacuate": { + 'host': 'HOST2', + 'adminPass': 'NEW_PASS', + 'force': True, + 'onSharedStorage': False, + } + } headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_start(self): sot = server.Server(**EXAMPLE) @@ -538,7 +1023,11 @@ def test_start(self): body = {"os-start": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_stop(self): sot = server.Server(**EXAMPLE) @@ -550,7 +1039,27 @@ def test_stop(self): body = {"os-stop": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_restore(self): + sot = server.Server(**EXAMPLE) + + res = sot.restore(self.sess) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = {'restore': None} + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_shelve(self): sot = server.Server(**EXAMPLE) @@ -562,7 +1071,11 @@ def test_shelve(self): body = {"shelve": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) def test_unshelve(self): sot = server.Server(**EXAMPLE) @@ -574,4 +1087,457 @@ def test_unshelve(self): body = {"unshelve": None} headers = {'Accept': ''} self.sess.post.assert_called_with( - url, endpoint_filter=sot.service, json=body, headers=headers) + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_unshelve_availability_zone(self): + sot = server.Server(**EXAMPLE) + + res = sot.unshelve(self.sess, sot.availability_zone) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = {"unshelve": {"availability_zone": sot.availability_zone}} + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_unshelve_unpin_az(self): + sot = server.Server(**EXAMPLE) + + res = sot.unshelve(self.sess, availability_zone=None) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = {"unshelve": {"availability_zone": None}} + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_unshelve_host(self): + sot = server.Server(**EXAMPLE) + + res = sot.unshelve(self.sess, host=sot.hypervisor_hostname) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = {"unshelve": {"host": sot.hypervisor_hostname}} + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_unshelve_host_and_availability_zone(self): + sot = server.Server(**EXAMPLE) + + res = sot.unshelve( + self.sess, + availability_zone=sot.availability_zone, + host=sot.hypervisor_hostname, + ) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = { + "unshelve": { + "availability_zone": sot.availability_zone, + "host": sot.hypervisor_hostname, + } + } + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_migrate(self): + sot = server.Server(**EXAMPLE) + + res = sot.migrate(self.sess) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = {"migrate": None} + + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_trigger_crash_dump(self): + sot = server.Server(**EXAMPLE) + + res = sot.trigger_crash_dump(self.sess) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = {'trigger_crash_dump': None} + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_get_console_output(self): + sot = server.Server(**EXAMPLE) + + res = sot.get_console_output(self.sess) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = {'os-getConsoleOutput': {}} + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + res = sot.get_console_output(self.sess, length=1) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = {'os-getConsoleOutput': {'length': 1}} + + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_get_console_url(self): + sot = server.Server(**EXAMPLE) + + resp = mock.Mock() + resp.body = {'console': {'a': 'b'}} + resp.json = mock.Mock(return_value=resp.body) + resp.status_code = 200 + self.sess.post.return_value = resp + + res = sot.get_console_url(self.sess, 'novnc') + self.sess.post.assert_called_with( + 'servers/IDENTIFIER/action', + json={'os-getVNCConsole': {'type': 'novnc'}}, + headers={'Accept': ''}, + microversion=self.sess.default_microversion, + ) + self.assertDictEqual(resp.body['console'], res) + + sot.get_console_url(self.sess, 'xvpvnc') + self.sess.post.assert_called_with( + 'servers/IDENTIFIER/action', + json={'os-getVNCConsole': {'type': 'xvpvnc'}}, + headers={'Accept': ''}, + microversion=self.sess.default_microversion, + ) + + sot.get_console_url(self.sess, 'spice-html5') + self.sess.post.assert_called_with( + 'servers/IDENTIFIER/action', + json={'os-getSPICEConsole': {'type': 'spice-html5'}}, + headers={'Accept': ''}, + microversion=self.sess.default_microversion, + ) + + sot.get_console_url(self.sess, 'spice-direct') + self.sess.post.assert_called_with( + 'servers/IDENTIFIER/action', + json={'os-getSPICEConsole': {'type': 'spice-direct'}}, + headers={'Accept': ''}, + microversion=self.sess.default_microversion, + ) + + sot.get_console_url(self.sess, 'rdp-html5') + self.sess.post.assert_called_with( + 'servers/IDENTIFIER/action', + json={'os-getRDPConsole': {'type': 'rdp-html5'}}, + headers={'Accept': ''}, + microversion=self.sess.default_microversion, + ) + + sot.get_console_url(self.sess, 'serial') + self.sess.post.assert_called_with( + 'servers/IDENTIFIER/action', + json={'os-getSerialConsole': {'type': 'serial'}}, + headers={'Accept': ''}, + microversion=self.sess.default_microversion, + ) + + self.assertRaises( + ValueError, sot.get_console_url, self.sess, 'fake_type' + ) + + def test_live_migrate_no_force(self): + sot = server.Server(**EXAMPLE) + + class FakeEndpointData: + min_microversion = None + max_microversion = None + + self.sess.get_endpoint_data.return_value = FakeEndpointData() + + ex = self.assertRaises( + ValueError, + sot.live_migrate, + self.sess, + host='HOST2', + force=False, + block_migration=False, + ) + self.assertIn("Live migration on this cloud implies 'force'", str(ex)) + + def test_live_migrate_no_microversion_force_true(self): + sot = server.Server(**EXAMPLE) + + class FakeEndpointData: + min_microversion = None + max_microversion = None + + self.sess.get_endpoint_data.return_value = FakeEndpointData() + + res = sot.live_migrate( + self.sess, + host='HOST2', + force=True, + block_migration=True, + disk_over_commit=True, + ) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = { + 'os-migrateLive': { + 'host': 'HOST2', + 'disk_over_commit': True, + 'block_migration': True, + } + } + + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_live_migrate_25(self): + sot = server.Server(**EXAMPLE) + + class FakeEndpointData: + min_microversion = '2.1' + max_microversion = '2.25' + + self.sess.get_endpoint_data.return_value = FakeEndpointData() + self.sess.default_microversion = None + + res = sot.live_migrate( + self.sess, host='HOST2', force=True, block_migration=False + ) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = { + "os-migrateLive": { + 'block_migration': False, + 'host': 'HOST2', + } + } + + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, json=body, headers=headers, microversion='2.25' + ) + + def test_live_migrate_25_default_block(self): + sot = server.Server(**EXAMPLE) + + class FakeEndpointData: + min_microversion = '2.1' + max_microversion = '2.25' + + self.sess.get_endpoint_data.return_value = FakeEndpointData() + self.sess.default_microversion = None + + res = sot.live_migrate( + self.sess, host='HOST2', force=True, block_migration=None + ) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = { + "os-migrateLive": { + 'block_migration': 'auto', + 'host': 'HOST2', + } + } + + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, json=body, headers=headers, microversion='2.25' + ) + + def test_live_migrate_30(self): + sot = server.Server(**EXAMPLE) + + class FakeEndpointData: + min_microversion = '2.1' + max_microversion = '2.30' + + self.sess.get_endpoint_data.return_value = FakeEndpointData() + self.sess.default_microversion = None + + res = sot.live_migrate( + self.sess, host='HOST2', force=False, block_migration=False + ) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = {'os-migrateLive': {'block_migration': False, 'host': 'HOST2'}} + + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, json=body, headers=headers, microversion='2.30' + ) + + def test_live_migrate_30_force(self): + sot = server.Server(**EXAMPLE) + + class FakeEndpointData: + min_microversion = '2.1' + max_microversion = '2.30' + + self.sess.get_endpoint_data.return_value = FakeEndpointData() + self.sess.default_microversion = None + + res = sot.live_migrate( + self.sess, host='HOST2', force=True, block_migration=None + ) + + self.assertIsNone(res) + url = 'servers/IDENTIFIER/action' + body = { + 'os-migrateLive': { + 'block_migration': 'auto', + 'host': 'HOST2', + 'force': True, + } + } + + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, json=body, headers=headers, microversion='2.30' + ) + + def test_get_topology(self): + sot = server.Server(**EXAMPLE) + + class FakeEndpointData: + min_microversion = '2.1' + max_microversion = '2.78' + + self.sess.get_endpoint_data.return_value = FakeEndpointData() + self.sess.default_microversion = None + + response = mock.Mock() + + topology = { + "nodes": [ + { + "cpu_pinning": {"0": 0, "1": 5}, + "host_node": 0, + "memory_mb": 1024, + "siblings": [[0, 1]], + "vcpu_set": [0, 1], + }, + { + "cpu_pinning": {"2": 1, "3": 8}, + "host_node": 1, + "memory_mb": 2048, + "siblings": [[2, 3]], + "vcpu_set": [2, 3], + }, + ], + "pagesize_kb": 4, + } + + response.status_code = 200 + response.json.return_value = topology + + self.sess.get.return_value = response + + fetched_topology = sot.fetch_topology(self.sess) + + url = 'servers/IDENTIFIER/topology' + self.sess.get.assert_called_with(url) + + self.assertEqual(fetched_topology, topology) + + def test_get_security_groups(self): + sot = server.Server(**EXAMPLE) + + response = mock.Mock() + + sgs = [ + { + 'description': 'default', + 'id': 1, + 'name': 'default', + 'rules': [ + { + 'direction': 'egress', + 'ethertype': 'IPv6', + 'id': '3c0e45ff-adaf-4124-b083-bf390e5482ff', + 'port_range_max': None, + 'port_range_min': None, + 'protocol': None, + 'remote_group_id': None, + 'remote_ip_prefix': None, + 'security_group_id': '1', + 'project_id': 'e4f50856753b4dc6afee5fa6b9b6c550', + 'revision_number': 1, + 'tags': ['tag1,tag2'], + 'tenant_id': 'e4f50856753b4dc6afee5fa6b9b6c550', + 'created_at': '2018-03-19T19:16:56Z', + 'updated_at': '2018-03-19T19:16:56Z', + 'description': '', + } + ], + 'tenant_id': 'e4f50856753b4dc6afee5fa6b9b6c550', + } + ] + + response.status_code = 200 + response.json.return_value = {'security_groups': sgs} + self.sess.get.return_value = response + + sot.fetch_security_groups(self.sess) + + url = 'servers/IDENTIFIER/os-security-groups' + self.sess.get.assert_called_with(url) + + self.assertEqual(sot.security_groups, sgs) diff --git a/openstack/tests/unit/compute/v2/test_server_actions.py b/openstack/tests/unit/compute/v2/test_server_actions.py new file mode 100644 index 0000000000..14d94b0ecf --- /dev/null +++ b/openstack/tests/unit/compute/v2/test_server_actions.py @@ -0,0 +1,90 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.compute.v2 import server_action +from openstack.tests.unit import base + +EXAMPLE = { + 'action': 'stop', + 'events': [ + { + 'event': 'compute_stop_instance', + 'finish_time': '2018-04-25T01:26:36.790544', + 'host': 'compute', + 'hostId': '2091634baaccdc4c5a1d57069c833e402921df696b7f970791b12ec6', # noqa: E501 + 'result': 'Success', + 'start_time': '2018-04-25T01:26:36.539271', + 'traceback': None, + 'details': None, + } + ], + 'instance_uuid': '4bf3473b-d550-4b65-9409-292d44ab14a2', + 'message': None, + 'project_id': '6f70656e737461636b20342065766572', + 'request_id': 'req-0d819d5c-1527-4669-bdf0-ffad31b5105b', + 'start_time': '2018-04-25T01:26:36.341290', + 'updated_at': '2018-04-25T01:26:36.790544', + 'user_id': 'admin', +} + + +class TestServerAction(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.status_code = 200 + self.sess = mock.Mock() + self.sess.post = mock.Mock(return_value=self.resp) + + def test_basic(self): + sot = server_action.ServerAction() + self.assertEqual('instanceAction', sot.resource_key) + self.assertEqual('instanceActions', sot.resources_key) + self.assertEqual( + '/servers/%(server_id)s/os-instance-actions', + sot.base_path, + ) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + + self.assertDictEqual( + { + 'changes_before': 'changes-before', + 'changes_since': 'changes-since', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = server_action.ServerAction(**EXAMPLE) + self.assertEqual(EXAMPLE['action'], sot.action) + # FIXME: This isn't populated since it conflicts with the server_id URI + # argument + # self.assertEqual(EXAMPLE['instance_uuid'], sot.server_id) + self.assertEqual(EXAMPLE['message'], sot.message) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['request_id'], sot.request_id) + self.assertEqual(EXAMPLE['start_time'], sot.start_time) + self.assertEqual(EXAMPLE['user_id'], sot.user_id) + self.assertEqual( + [server_action.ServerActionEvent(**e) for e in EXAMPLE['events']], + sot.events, + ) diff --git a/openstack/tests/unit/compute/v2/test_server_diagnostics.py b/openstack/tests/unit/compute/v2/test_server_diagnostics.py new file mode 100644 index 0000000000..f71939df32 --- /dev/null +++ b/openstack/tests/unit/compute/v2/test_server_diagnostics.py @@ -0,0 +1,79 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.compute.v2 import server_diagnostics +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + "config_drive": True, + "cpu_details": [{"id": 0, "time": 17300000000, "utilisation": 15}], + "disk_details": [ + { + "errors_count": 1, + "read_bytes": 262144, + "read_requests": 112, + "write_bytes": 5778432, + "write_requests": 488, + } + ], + "driver": "libvirt", + "hypervisor": "kvm", + "hypervisor_os": "ubuntu", + "memory_details": {"maximum": 524288, "used": 0}, + "nic_details": [ + { + "mac_address": "01:23:45:67:89:ab", + "rx_drop": 200, + "rx_errors": 100, + "rx_octets": 2070139, + "rx_packets": 26701, + "rx_rate": 300, + "tx_drop": 500, + "tx_errors": 400, + "tx_octets": 140208, + "tx_packets": 662, + "tx_rate": 600, + } + ], + "num_cpus": 1, + "num_disks": 1, + "num_nics": 1, + "state": "running", + "uptime": 46664, +} + + +class TestServerInterface(base.TestCase): + def test_basic(self): + sot = server_diagnostics.ServerDiagnostics() + self.assertEqual('diagnostics', sot.resource_key) + self.assertEqual('/servers/%(server_id)s/diagnostics', sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.requires_id) + + def test_make_it(self): + sot = server_diagnostics.ServerDiagnostics(**EXAMPLE) + self.assertEqual(EXAMPLE['config_drive'], sot.has_config_drive) + self.assertEqual(EXAMPLE['cpu_details'], sot.cpu_details) + self.assertEqual(EXAMPLE['disk_details'], sot.disk_details) + self.assertEqual(EXAMPLE['driver'], sot.driver) + self.assertEqual(EXAMPLE['hypervisor'], sot.hypervisor) + self.assertEqual(EXAMPLE['hypervisor_os'], sot.hypervisor_os) + self.assertEqual(EXAMPLE['memory_details'], sot.memory_details) + self.assertEqual(EXAMPLE['nic_details'], sot.nic_details) + self.assertEqual(EXAMPLE['num_cpus'], sot.num_cpus) + self.assertEqual(EXAMPLE['num_disks'], sot.num_disks) + self.assertEqual(EXAMPLE['num_nics'], sot.num_nics) + self.assertEqual(EXAMPLE['state'], sot.state) + self.assertEqual(EXAMPLE['uptime'], sot.uptime) diff --git a/openstack/tests/unit/compute/v2/test_server_group.py b/openstack/tests/unit/compute/v2/test_server_group.py index 3fabbfc12b..b1eb6737e7 100644 --- a/openstack/tests/unit/compute/v2/test_server_group.py +++ b/openstack/tests/unit/compute/v2/test_server_group.py @@ -10,36 +10,42 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.compute.v2 import server_group +from openstack.tests.unit import base + EXAMPLE = { 'id': 'IDENTIFIER', 'name': 'test', 'members': ['server1', 'server2'], - 'metadata': {'k': 'v'}, + 'metadata': {}, 'policies': ['anti-affinity'], + 'rules': { + 'max_server_per_host': 5, + }, } -class TestServerGroup(testtools.TestCase): - +class TestServerGroup(base.TestCase): def test_basic(self): sot = server_group.ServerGroup() self.assertEqual('server_group', sot.resource_key) self.assertEqual('server_groups', sot.resources_key) self.assertEqual('/os-server-groups', sot.base_path) - self.assertEqual('compute', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertDictEqual({"all_projects": "all_projects", - "limit": "limit", "marker": "marker"}, - sot._query_mapping._mapping) + self.assertDictEqual( + { + "all_projects": "all_projects", + "limit": "limit", + "marker": "marker", + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = server_group.ServerGroup(**EXAMPLE) @@ -48,3 +54,4 @@ def test_make_it(self): self.assertEqual(EXAMPLE['members'], sot.member_ids) self.assertEqual(EXAMPLE['metadata'], sot.metadata) self.assertEqual(EXAMPLE['policies'], sot.policies) + self.assertEqual(EXAMPLE['rules'], sot.rules) diff --git a/openstack/tests/unit/compute/v2/test_server_interface.py b/openstack/tests/unit/compute/v2/test_server_interface.py index 64467541a5..74194a8129 100644 --- a/openstack/tests/unit/compute/v2/test_server_interface.py +++ b/openstack/tests/unit/compute/v2/test_server_interface.py @@ -10,16 +10,16 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.compute.v2 import server_interface +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'fixed_ips': [ { 'ip_address': '192.168.1.1', - 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef' + 'subnet_id': 'f8a6e8f8-c2ec-497c-9f23-da9616de54ef', } ], 'mac_addr': '2', @@ -27,20 +27,19 @@ 'port_id': '4', 'port_state': '5', 'server_id': '6', + 'tag': '7', } -class TestServerInterface(testtools.TestCase): - +class TestServerInterface(base.TestCase): def test_basic(self): sot = server_interface.ServerInterface() self.assertEqual('interfaceAttachment', sot.resource_key) self.assertEqual('interfaceAttachments', sot.resources_key) self.assertEqual('/servers/%(server_id)s/os-interface', sot.base_path) - self.assertEqual('compute', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -52,3 +51,4 @@ def test_make_it(self): self.assertEqual(EXAMPLE['port_id'], sot.port_id) self.assertEqual(EXAMPLE['port_state'], sot.port_state) self.assertEqual(EXAMPLE['server_id'], sot.server_id) + self.assertEqual(EXAMPLE['tag'], sot.tag) diff --git a/openstack/tests/unit/compute/v2/test_server_ip.py b/openstack/tests/unit/compute/v2/test_server_ip.py index 770b67a217..833fae7696 100644 --- a/openstack/tests/unit/compute/v2/test_server_ip.py +++ b/openstack/tests/unit/compute/v2/test_server_ip.py @@ -10,10 +10,10 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +from unittest import mock from openstack.compute.v2 import server_ip +from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,16 +23,14 @@ } -class TestServerIP(testtools.TestCase): - +class TestServerIP(base.TestCase): def test_basic(self): sot = server_ip.ServerIP() self.assertEqual('addresses', sot.resources_key) self.assertEqual('/servers/%(server_id)s/ips', sot.base_path) - self.assertEqual('compute', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -47,29 +45,36 @@ def test_list(self): resp = mock.Mock() sess.get.return_value = resp resp.json.return_value = { - "addresses": {"label1": [{"version": 1, "addr": "a1"}, - {"version": 2, "addr": "a2"}], - "label2": [{"version": 3, "addr": "a3"}, - {"version": 4, "addr": "a4"}]}} + "addresses": { + "label1": [ + {"version": 1, "addr": "a1"}, + {"version": 2, "addr": "a2"}, + ], + "label2": [ + {"version": 3, "addr": "a3"}, + {"version": 4, "addr": "a4"}, + ], + } + } ips = list(server_ip.ServerIP.list(sess, server_id=IDENTIFIER)) self.assertEqual(4, len(ips)) ips = sorted(ips, key=lambda ip: ip.version) - self.assertEqual(type(ips[0]), server_ip.ServerIP) + self.assertIsInstance(ips[0], server_ip.ServerIP) self.assertEqual(ips[0].network_label, "label1") self.assertEqual(ips[0].address, "a1") self.assertEqual(ips[0].version, 1) - self.assertEqual(type(ips[1]), server_ip.ServerIP) + self.assertIsInstance(ips[1], server_ip.ServerIP) self.assertEqual(ips[1].network_label, "label1") self.assertEqual(ips[1].address, "a2") self.assertEqual(ips[1].version, 2) - self.assertEqual(type(ips[2]), server_ip.ServerIP) + self.assertIsInstance(ips[2], server_ip.ServerIP) self.assertEqual(ips[2].network_label, "label2") self.assertEqual(ips[2].address, "a3") self.assertEqual(ips[2].version, 3) - self.assertEqual(type(ips[3]), server_ip.ServerIP) + self.assertIsInstance(ips[3], server_ip.ServerIP) self.assertEqual(ips[3].network_label, "label2") self.assertEqual(ips[3].address, "a4") self.assertEqual(ips[3].version, 4) @@ -79,22 +84,24 @@ def test_list_network_label(self): sess = mock.Mock() resp = mock.Mock() sess.get.return_value = resp - resp.json.return_value = {label: [{"version": 1, - "addr": "a1"}, - {"version": 2, - "addr": "a2"}]} + resp.json.return_value = { + label: [{"version": 1, "addr": "a1"}, {"version": 2, "addr": "a2"}] + } - ips = list(server_ip.ServerIP.list(sess, server_id=IDENTIFIER, - network_label=label)) + ips = list( + server_ip.ServerIP.list( + sess, server_id=IDENTIFIER, network_label=label + ) + ) self.assertEqual(2, len(ips)) ips = sorted(ips, key=lambda ip: ip.version) - self.assertEqual(type(ips[0]), server_ip.ServerIP) + self.assertIsInstance(ips[0], server_ip.ServerIP) self.assertEqual(ips[0].network_label, label) self.assertEqual(ips[0].address, "a1") self.assertEqual(ips[0].version, 1) - self.assertEqual(type(ips[1]), server_ip.ServerIP) + self.assertIsInstance(ips[1], server_ip.ServerIP) self.assertEqual(ips[1].network_label, label) self.assertEqual(ips[1].address, "a2") self.assertEqual(ips[1].version, 2) diff --git a/openstack/tests/unit/compute/v2/test_server_migration.py b/openstack/tests/unit/compute/v2/test_server_migration.py new file mode 100644 index 0000000000..7fa486ffa5 --- /dev/null +++ b/openstack/tests/unit/compute/v2/test_server_migration.py @@ -0,0 +1,117 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.compute.v2 import server_migration +from openstack.tests.unit import base + +EXAMPLE = { + 'id': 4, + 'server_id': '4cfba335-03d8-49b2-8c52-e69043d1e8fe', + 'server_uuid': '4cfba335-03d8-49b2-8c52-e69043d1e8fe', + 'user_id': '8dbaa0f0-ab95-4ffe-8cb4-9c89d2ac9d24', + 'project_id': '5f705771-3aa9-4f4c-8660-0d9522ffdbea', + 'created_at': '2016-01-29T13:42:02.000000', + 'updated_at': '2016-01-29T13:42:02.000000', + 'status': 'migrating', + 'source_compute': 'compute1', + 'source_node': 'node1', + 'dest_host': '1.2.3.4', + 'dest_compute': 'compute2', + 'dest_node': 'node2', + 'memory_processed_bytes': 12345, + 'memory_remaining_bytes': 111111, + 'memory_total_bytes': 123456, + 'disk_processed_bytes': 23456, + 'disk_remaining_bytes': 211111, + 'disk_total_bytes': 234567, +} + + +class TestServerMigration(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.status_code = 200 + self.sess = mock.Mock() + self.sess.post = mock.Mock(return_value=self.resp) + + def test_basic(self): + sot = server_migration.ServerMigration() + self.assertEqual('migration', sot.resource_key) + self.assertEqual('migrations', sot.resources_key) + self.assertEqual('/servers/%(server_id)s/migrations', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_commit) + self.assertTrue(sot.allow_delete) + + def test_make_it(self): + sot = server_migration.ServerMigration(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + # FIXME(stephenfin): This conflicts since there is a server ID in the + # URI *and* in the body. We need a field that handles both or we need + # to use different names. + # self.assertEqual(EXAMPLE['server_uuid'], sot.server_id) + self.assertEqual(EXAMPLE['user_id'], sot.user_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['source_compute'], sot.source_compute) + self.assertEqual(EXAMPLE['source_node'], sot.source_node) + self.assertEqual(EXAMPLE['dest_host'], sot.dest_host) + self.assertEqual(EXAMPLE['dest_compute'], sot.dest_compute) + self.assertEqual(EXAMPLE['dest_node'], sot.dest_node) + self.assertEqual( + EXAMPLE['memory_processed_bytes'], + sot.memory_processed_bytes, + ) + self.assertEqual( + EXAMPLE['memory_remaining_bytes'], + sot.memory_remaining_bytes, + ) + self.assertEqual(EXAMPLE['memory_total_bytes'], sot.memory_total_bytes) + self.assertEqual( + EXAMPLE['disk_processed_bytes'], + sot.disk_processed_bytes, + ) + self.assertEqual( + EXAMPLE['disk_remaining_bytes'], + sot.disk_remaining_bytes, + ) + self.assertEqual(EXAMPLE['disk_total_bytes'], sot.disk_total_bytes) + + @mock.patch.object( + server_migration.ServerMigration, + '_get_session', + lambda self, x: x, + ) + def test_force_complete(self): + sot = server_migration.ServerMigration(**EXAMPLE) + + self.assertIsNone(sot.force_complete(self.sess)) + + url = 'servers/{}/migrations/{}/action'.format( + EXAMPLE['server_id'], + EXAMPLE['id'], + ) + body = {'force_complete': None} + self.sess.post.assert_called_with( + url, + microversion=mock.ANY, + json=body, + ) diff --git a/openstack/tests/unit/compute/v2/test_server_remote_console.py b/openstack/tests/unit/compute/v2/test_server_remote_console.py new file mode 100644 index 0000000000..eac32b556c --- /dev/null +++ b/openstack/tests/unit/compute/v2/test_server_remote_console.py @@ -0,0 +1,65 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.compute.v2 import server_remote_console +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = {'protocol': 'rdp', 'type': 'rdp', 'url': 'fake'} + + +class TestServerRemoteConsole(base.TestCase): + def setUp(self): + super().setUp() + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = '2.9' + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.status_code = 200 + self.sess = mock.Mock() + self.sess.post = mock.Mock(return_value=self.resp) + self.sess._get_connection = mock.Mock(return_value=self.cloud) + + def test_basic(self): + sot = server_remote_console.ServerRemoteConsole() + self.assertEqual('remote_console', sot.resource_key) + self.assertEqual( + '/servers/%(server_id)s/remote-consoles', sot.base_path + ) + self.assertTrue(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) + + def test_make_it(self): + sot = server_remote_console.ServerRemoteConsole(**EXAMPLE) + self.assertEqual(EXAMPLE['url'], sot.url) + + def test_create_type_mks_old(self): + sot = server_remote_console.ServerRemoteConsole( + server_id='fake_server', type='webmks' + ) + + class FakeEndpointData: + min_microversion = '2' + max_microversion = '2.5' + + self.sess.get_endpoint_data.return_value = FakeEndpointData() + + self.assertRaises(ValueError, sot.create, self.sess) diff --git a/openstack/tests/unit/compute/v2/test_service.py b/openstack/tests/unit/compute/v2/test_service.py index b993b5ac60..02c62895f9 100644 --- a/openstack/tests/unit/compute/v2/test_service.py +++ b/openstack/tests/unit/compute/v2/test_service.py @@ -10,10 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +from unittest import mock from openstack.compute.v2 import service +from openstack import exceptions +from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -22,59 +23,198 @@ 'host': 'host1', 'status': 'enabled', 'state': 'up', - 'zone': 'nova' + 'zone': 'nova', } -class TestService(testtools.TestCase): - +class TestService(base.TestCase): def setUp(self): - super(TestService, self).setUp() + super().setUp() self.resp = mock.Mock() - self.resp.body = None + self.resp.body = {'service': {}} self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.status_code = 200 + self.resp.headers = {} self.sess = mock.Mock() self.sess.put = mock.Mock(return_value=self.resp) + self.sess.default_microversion = '2.1' def test_basic(self): sot = service.Service() self.assertEqual('service', sot.resource_key) self.assertEqual('services', sot.resources_key) self.assertEqual('/os-services', sot.base_path) - self.assertEqual('compute', sot.service.service_type) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_fetch) + + self.assertDictEqual( + { + 'binary': 'binary', + 'host': 'host', + 'limit': 'limit', + 'marker': 'marker', + 'name': 'binary', + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = service.Service(**EXAMPLE) self.assertEqual(EXAMPLE['host'], sot.host) self.assertEqual(EXAMPLE['binary'], sot.binary) + self.assertEqual(EXAMPLE['binary'], sot.name) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['state'], sot.state) - self.assertEqual(EXAMPLE['zone'], sot.zone) + self.assertEqual(EXAMPLE['zone'], sot.availability_zone) self.assertEqual(EXAMPLE['id'], sot.id) - def test_force_down(self): + def test_find_single_match(self): + data = [ + service.Service(name='bin1', host='host', id=1), + service.Service(name='bin2', host='host', id=2), + ] + with mock.patch.object(service.Service, 'list') as list_mock: + list_mock.return_value = data + + sot = service.Service.find( + self.sess, 'bin1', ignore_missing=True, host='host' + ) + + self.assertEqual(data[0], sot) + + def test_find_with_id_single_match(self): + data = [ + service.Service(name='bin1', host='host', id=1), + service.Service(name='bin2', host='host', id='2'), + ] + with mock.patch.object(service.Service, 'list') as list_mock: + list_mock.return_value = data + + sot = service.Service.find( + self.sess, '2', ignore_missing=True, binary='bin1', host='host' + ) + + self.assertEqual(data[1], sot) + + # Verify find when ID is int + sot = service.Service.find( + self.sess, 1, ignore_missing=True, binary='bin1', host='host' + ) + + self.assertEqual(data[0], sot) + + def test_find_no_match(self): + data = [ + service.Service(name='bin1', host='host', id=1), + service.Service(name='bin2', host='host', id=2), + ] + with mock.patch.object(service.Service, 'list') as list_mock: + list_mock.return_value = data + + self.assertIsNone( + service.Service.find( + self.sess, 'fake', ignore_missing=True, host='host' + ) + ) + + def test_find_no_match_exception(self): + data = [ + service.Service(name='bin1', host='host', id=1), + service.Service(name='bin2', host='host', id=2), + ] + with mock.patch.object(service.Service, 'list') as list_mock: + list_mock.return_value = data + + self.assertRaises( + exceptions.NotFoundException, + service.Service.find, + self.sess, + 'fake', + ignore_missing=False, + host='host', + ) + + def test_find_multiple_match(self): + data = [ + service.Service(name='bin1', host='host', id=1), + service.Service(name='bin1', host='host', id=2), + ] + with mock.patch.object(service.Service, 'list') as list_mock: + list_mock.return_value = data + + self.assertRaises( + exceptions.DuplicateResource, + service.Service.find, + self.sess, + 'bin1', + ignore_missing=False, + host='host', + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=False, + ) + def test_set_forced_down_before_211(self, mv_mock): sot = service.Service(**EXAMPLE) - res = sot.force_down(self.sess, 'host1', 'nova-compute') - self.assertIsNone(res.body) + res = sot.set_forced_down(self.sess, 'host1', 'nova-compute', True) + self.assertIsNotNone(res) url = 'os-services/force-down' body = { 'binary': 'nova-compute', 'host': 'host1', - 'forced_down': True, } self.sess.put.assert_called_with( - url, endpoint_filter=sot.service, json=body) + url, json=body, microversion=self.sess.default_microversion + ) + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + def test_set_forced_down_after_211(self, mv_mock): + sot = service.Service(**EXAMPLE) + + res = sot.set_forced_down(self.sess, 'host1', 'nova-compute', True) + self.assertIsNotNone(res) + + url = 'os-services/force-down' + body = { + 'binary': 'nova-compute', + 'host': 'host1', + 'forced_down': True, + } + self.sess.put.assert_called_with(url, json=body, microversion='2.11') + + @mock.patch( + 'openstack.utils.supports_microversion', + autospec=True, + return_value=True, + ) + def test_set_forced_down_after_253(self, mv_mock): + sot = service.Service(**EXAMPLE) + + res = sot.set_forced_down(self.sess, None, None, True) + self.assertIsNotNone(res) + + url = 'os-services/force-down' + body = { + 'binary': sot.binary, + 'host': sot.host, + 'forced_down': True, + } + self.sess.put.assert_called_with(url, json=body, microversion='2.11') def test_enable(self): sot = service.Service(**EXAMPLE) res = sot.enable(self.sess, 'host1', 'nova-compute') - self.assertIsNone(res.body) + self.assertIsNotNone(res) url = 'os-services/enable' body = { @@ -82,13 +222,14 @@ def test_enable(self): 'host': 'host1', } self.sess.put.assert_called_with( - url, endpoint_filter=sot.service, json=body) + url, json=body, microversion=self.sess.default_microversion + ) def test_disable(self): sot = service.Service(**EXAMPLE) res = sot.disable(self.sess, 'host1', 'nova-compute') - self.assertIsNone(res.body) + self.assertIsNotNone(res) url = 'os-services/disable' body = { @@ -96,7 +237,8 @@ def test_disable(self): 'host': 'host1', } self.sess.put.assert_called_with( - url, endpoint_filter=sot.service, json=body) + url, json=body, microversion=self.sess.default_microversion + ) def test_disable_with_reason(self): sot = service.Service(**EXAMPLE) @@ -104,13 +246,14 @@ def test_disable_with_reason(self): res = sot.disable(self.sess, 'host1', 'nova-compute', reason=reason) - self.assertIsNone(res.body) + self.assertIsNotNone(res) url = 'os-services/disable-log-reason' body = { 'binary': 'nova-compute', 'host': 'host1', - 'disabled_reason': reason + 'disabled_reason': reason, } self.sess.put.assert_called_with( - url, endpoint_filter=sot.service, json=body) + url, json=body, microversion=self.sess.default_microversion + ) diff --git a/openstack/tests/unit/compute/v2/test_usage.py b/openstack/tests/unit/compute/v2/test_usage.py new file mode 100644 index 0000000000..512bd53f01 --- /dev/null +++ b/openstack/tests/unit/compute/v2/test_usage.py @@ -0,0 +1,99 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.compute.v2 import usage +from openstack.tests.unit import base + + +EXAMPLE = { + "tenant_id": "781c9299e68d4b7c80ef52712889647f", + "server_usages": [ + { + "hours": 79.51840531333333, + "flavor": "m1.tiny", + "instance_id": "76638c30-d199-4c2e-8154-7dea963bfe2f", + "name": "test-server", + "tenant_id": "781c9299e68d4b7c80ef52712889647f", + "memory_mb": 512, + "local_gb": 1, + "vcpus": 1, + "started_at": "2022-05-16T10:35:31.000000", + "ended_at": None, + "state": "active", + "uptime": 286266, + } + ], + "total_local_gb_usage": 79.51840531333333, + "total_vcpus_usage": 79.51840531333333, + "total_memory_mb_usage": 40713.423520426666, + "total_hours": 79.51840531333333, + "start": "2022-04-21T18:06:47.064959", + "stop": "2022-05-19T18:06:37.259128", +} + + +class TestUsage(base.TestCase): + def test_basic(self): + sot = usage.Usage() + self.assertEqual('tenant_usage', sot.resource_key) + self.assertEqual('tenant_usages', sot.resources_key) + self.assertEqual('/os-simple-tenant-usage', sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = usage.Usage(**EXAMPLE) + + self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual( + EXAMPLE['total_local_gb_usage'], + sot.total_local_gb_usage, + ) + self.assertEqual(EXAMPLE['total_vcpus_usage'], sot.total_vcpus_usage) + self.assertEqual( + EXAMPLE['total_memory_mb_usage'], + sot.total_memory_mb_usage, + ) + self.assertEqual(EXAMPLE['total_hours'], sot.total_hours) + self.assertEqual(EXAMPLE['start'], sot.start) + self.assertEqual(EXAMPLE['stop'], sot.stop) + + # now do the embedded objects + self.assertIsInstance(sot.server_usages, list) + self.assertEqual(1, len(sot.server_usages)) + + ssot = sot.server_usages[0] + self.assertIsInstance(ssot, usage.ServerUsage) + self.assertEqual(EXAMPLE['server_usages'][0]['hours'], ssot.hours) + self.assertEqual(EXAMPLE['server_usages'][0]['flavor'], ssot.flavor) + self.assertEqual( + EXAMPLE['server_usages'][0]['instance_id'], ssot.instance_id + ) + self.assertEqual(EXAMPLE['server_usages'][0]['name'], ssot.name) + self.assertEqual( + EXAMPLE['server_usages'][0]['tenant_id'], ssot.project_id + ) + self.assertEqual( + EXAMPLE['server_usages'][0]['memory_mb'], ssot.memory_mb + ) + self.assertEqual( + EXAMPLE['server_usages'][0]['local_gb'], ssot.local_gb + ) + self.assertEqual(EXAMPLE['server_usages'][0]['vcpus'], ssot.vcpus) + self.assertEqual( + EXAMPLE['server_usages'][0]['started_at'], ssot.started_at + ) + self.assertEqual( + EXAMPLE['server_usages'][0]['ended_at'], ssot.ended_at + ) + self.assertEqual(EXAMPLE['server_usages'][0]['state'], ssot.state) + self.assertEqual(EXAMPLE['server_usages'][0]['uptime'], ssot.uptime) diff --git a/openstack/tests/unit/compute/v2/test_volume_attachment.py b/openstack/tests/unit/compute/v2/test_volume_attachment.py new file mode 100644 index 0000000000..5091463ef9 --- /dev/null +++ b/openstack/tests/unit/compute/v2/test_volume_attachment.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.compute.v2 import volume_attachment +from openstack.tests.unit import base + + +EXAMPLE = { + 'attachment_id': '979ce4f8-033a-409d-85e6-6b5c0f6a6302', + 'delete_on_termination': False, + 'device': '/dev/sdc', + 'serverId': '7696780b-3f53-4688-ab25-019bfcbbd806', + 'tag': 'foo', + 'volumeId': 'a07f71dc-8151-4e7d-a0cc-cd24a3f11113', + 'bdm_uuid': 'c088db45-92b8-49e8-81e2-a1b77a144b3b', +} + + +class TestServerInterface(base.TestCase): + def test_basic(self): + sot = volume_attachment.VolumeAttachment() + self.assertEqual('volumeAttachment', sot.resource_key) + self.assertEqual('volumeAttachments', sot.resources_key) + self.assertEqual( + '/servers/%(server_id)s/os-volume_attachments', + sot.base_path, + ) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertDictEqual( + {"limit": "limit", "offset": "offset", "marker": "marker"}, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = volume_attachment.VolumeAttachment(**EXAMPLE) + self.assertEqual(EXAMPLE['volumeId'], sot.id) + self.assertEqual(EXAMPLE['attachment_id'], sot.attachment_id) + self.assertEqual( + EXAMPLE['delete_on_termination'], + sot.delete_on_termination, + ) + self.assertEqual(EXAMPLE['device'], sot.device) + # FIXME(stephenfin): This conflicts since there is a server ID in the + # URI *and* in the body. We need a field that handles both or we need + # to use different names. + # self.assertEqual(EXAMPLE['serverId'], sot.server_id) + self.assertEqual(EXAMPLE['tag'], sot.tag) + self.assertEqual(EXAMPLE['volumeId'], sot.volume_id) + self.assertEqual(EXAMPLE['bdm_uuid'], sot.bdm_id) diff --git a/openstack/tests/unit/config/__init__.py b/openstack/tests/unit/config/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/config/base.py b/openstack/tests/unit/config/base.py new file mode 100644 index 0000000000..8f0f470f53 --- /dev/null +++ b/openstack/tests/unit/config/base.py @@ -0,0 +1,279 @@ +# Copyright 2010-2011 OpenStack Foundation +# Copyright (c) 2013 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +import os +import tempfile + +import fixtures +import yaml + +from openstack.config import cloud_region +from openstack.tests.unit import base + +VENDOR_CONF = { + 'public-clouds': { + '_test_cloud_in_our_cloud': { + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testotheruser', + 'project_name': 'testproject', + }, + }, + } +} +USER_CONF = { + 'cache': { + 'max_age': '1', + 'expiration': { + 'server': 5, + 'image': '7', + }, + }, + 'client': { + 'force_ipv4': True, + }, + 'metrics': { + 'statsd': {'host': '127.0.0.1', 'port': '1234'}, + 'influxdb': { + 'host': '127.0.0.1', + 'port': '1234', + 'use_udp': True, + 'username': 'username', + 'password': 'password', + 'database': 'database', + 'measurement': 'measurement.name', + 'timeout': 10, + }, + }, + 'clouds': { + '_test-cloud_': { + 'profile': '_test_cloud_in_our_cloud', + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testuser', + 'password': 'testpass', + }, + 'region_name': 'test-region', + }, + '_test_cloud_no_vendor': { + 'profile': '_test_non_existant_cloud', + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testuser', + 'project_name': 'testproject', + }, + 'region-name': 'test-region', + }, + '_test-cloud-int-project_': { + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'domain_id': 'awesome-domain', + 'project_id': 12345, + 'auth_url': 'http://example.com/v2', + }, + 'region_name': 'test-region', + }, + '_test-cloud-domain-id_': { + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'project_id': 12345, + 'auth_url': 'http://example.com/v2', + 'domain_id': '6789', + 'project_domain_id': '123456789', + }, + 'region_name': 'test-region', + }, + '_test-cloud-networks_': { + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'project_id': 12345, + 'auth_url': 'http://example.com/v2', + 'domain_id': '6789', + 'project_domain_id': '123456789', + }, + 'networks': [ + { + 'name': 'a-public', + 'routes_externally': True, + 'nat_source': True, + }, + { + 'name': 'another-public', + 'routes_externally': True, + 'default_interface': True, + }, + { + 'name': 'a-private', + 'routes_externally': False, + }, + { + 'name': 'another-private', + 'routes_externally': False, + 'nat_destination': True, + }, + { + 'name': 'split-default', + 'routes_externally': True, + 'routes_ipv4_externally': False, + }, + { + 'name': 'split-no-default', + 'routes_ipv6_externally': False, + 'routes_ipv4_externally': True, + }, + ], + 'region_name': 'test-region', + }, + '_test_cloud_regions': { + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'project-id': 'testproject', + 'auth_url': 'http://example.com/v2', + }, + 'regions': [ + { + 'name': 'region1', + 'values': { + 'external_network': 'region1-network', + }, + }, + { + 'name': 'region2', + 'values': { + 'external_network': 'my-network', + }, + }, + { + 'name': 'region-no-value', + }, + ], + }, + '_test_cloud_hyphenated': { + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'project-id': '12345', + 'auth_url': 'http://example.com/v2', + }, + 'region_name': 'test-region', + }, + '_test-cloud_no_region': { + 'profile': '_test_cloud_in_our_cloud', + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testuser', + 'password': 'testpass', + }, + }, + '_test-cloud-domain-scoped_': { + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testuser', + 'password': 'testpass', + 'domain-id': '12345', + }, + }, + '_test-cloud-override-metrics': { + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testuser', + 'password': 'testpass', + }, + 'metrics': { + 'statsd': { + 'host': '127.0.0.1', + 'port': 4321, + 'prefix': 'statsd.override.prefix', + }, + 'influxdb': { + 'username': 'override-username', + 'password': 'override-password', + 'database': 'override-database', + }, + }, + }, + }, + 'ansible': { + 'expand-hostvars': False, + 'use_hostnames': True, + }, +} +SECURE_CONF = { + 'clouds': { + '_test_cloud_no_vendor': { + 'auth': { + 'password': 'testpass', + }, + } + } +} +NO_CONF = { + 'cache': {'max_age': 1}, +} + + +def _write_yaml(obj): + # Assume NestedTempfile so we don't have to cleanup + with tempfile.NamedTemporaryFile(delete=False, suffix='.yaml') as obj_yaml: + obj_yaml.write(yaml.safe_dump(obj).encode('utf-8')) + return obj_yaml.name + + +class TestCase(base.TestCase): + """Test case base class for all unit tests.""" + + def setUp(self): + super().setUp() + + conf = copy.deepcopy(USER_CONF) + tdir = self.useFixture(fixtures.TempDir()) + conf['cache']['path'] = tdir.path + self.cloud_yaml = _write_yaml(conf) + self.secure_yaml = _write_yaml(SECURE_CONF) + self.vendor_yaml = _write_yaml(VENDOR_CONF) + self.no_yaml = _write_yaml(NO_CONF) + + # Isolate the test runs from the environment + # Do this as two loops because you can't modify the dict in a loop + # over the dict in 3.4 + keys_to_isolate = [] + for env in os.environ.keys(): + if env.startswith('OS_'): + keys_to_isolate.append(env) + for env in keys_to_isolate: + self.useFixture(fixtures.EnvironmentVariable(env)) + + def _assert_cloud_details(self, cc): + self.assertIsInstance(cc, cloud_region.CloudRegion) + self.assertTrue(hasattr(cc, 'auth')) + self.assertIsInstance(cc.auth, dict) + self.assertIsNone(cc.cloud) + self.assertIn('username', cc.auth) + self.assertEqual('testuser', cc.auth['username']) + self.assertEqual('testpass', cc.auth['password']) + self.assertFalse(cc.config['image_api_use_tasks']) + self.assertTrue('project_name' in cc.auth or 'project_id' in cc.auth) + if 'project_name' in cc.auth: + self.assertEqual('testproject', cc.auth['project_name']) + elif 'project_id' in cc.auth: + self.assertEqual('testproject', cc.auth['project_id']) + self.assertEqual(cc.get_cache_expiration_time(), 1) + self.assertEqual(cc.get_cache_resource_expiration('server'), 5.0) + self.assertEqual(cc.get_cache_resource_expiration('image'), 7.0) diff --git a/openstack/tests/unit/config/test_cloud_config.py b/openstack/tests/unit/config/test_cloud_config.py new file mode 100644 index 0000000000..01fe597283 --- /dev/null +++ b/openstack/tests/unit/config/test_cloud_config.py @@ -0,0 +1,486 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy +from unittest import mock + +from keystoneauth1 import exceptions as ksa_exceptions +from keystoneauth1 import session as ksa_session + +from openstack.config import cloud_region +from openstack.config import defaults +from openstack import exceptions +from openstack.tests.unit.config import base +from openstack import version as openstack_version + +fake_config_dict = {'a': 1, 'os_b': 2, 'c': 3, 'os_c': 4} +fake_services_dict = { + 'compute_api_version': '2', + 'compute_endpoint_override': 'http://compute.example.com', + 'telemetry_endpoint': 'http://telemetry.example.com', + 'interface': 'public', + 'image_service_type': 'mage', + 'identity_interface': 'admin', + 'identity_service_name': 'locks', + 'volume_api_version': '1', + 'auth': {'password': 'hunter2', 'username': 'AzureDiamond'}, + 'connect_retries': 1, + 'connect_retry_delay': 0.5, + 'baremetal_status_code_retries': 5, + 'baremetal_connect_retries': 3, + 'baremetal_connect_retry_delay': 1.5, +} + + +class TestCloudRegion(base.TestCase): + def test_arbitrary_attributes(self): + cc = cloud_region.CloudRegion("test1", "region-al", fake_config_dict) + self.assertEqual("test1", cc.name) + self.assertEqual("region-al", cc.region_name) + + # Look up straight value + self.assertEqual("1", cc.a) + + # Look up prefixed attribute, fail - returns None + self.assertIsNone(cc.os_b) + + # Look up straight value, then prefixed value + self.assertEqual("3", cc.c) + self.assertEqual("3", cc.os_c) + + # Lookup mystery attribute + self.assertIsNone(cc.x) + + # Test default ipv6 + self.assertFalse(cc.force_ipv4) + + def test_iteration(self): + cc = cloud_region.CloudRegion("test1", "region-al", fake_config_dict) + self.assertIn('a', cc) + self.assertNotIn('x', cc) + + def test_equality(self): + cc1 = cloud_region.CloudRegion("test1", "region-al", fake_config_dict) + cc2 = cloud_region.CloudRegion("test1", "region-al", fake_config_dict) + self.assertEqual(cc1, cc2) + + def test_inequality(self): + cc1 = cloud_region.CloudRegion("test1", "region-al", fake_config_dict) + + cc2 = cloud_region.CloudRegion("test2", "region-al", fake_config_dict) + self.assertNotEqual(cc1, cc2) + + cc2 = cloud_region.CloudRegion("test1", "region-xx", fake_config_dict) + self.assertNotEqual(cc1, cc2) + + cc2 = cloud_region.CloudRegion("test1", "region-al", {}) + self.assertNotEqual(cc1, cc2) + + def test_deepcopy(self): + """Test that CloudRegion can be deep copied. + + This is a regression test for a bug where copy.deepcopy() would cause + infinite recursion in __getattr__ because deepcopy creates instances + without calling __init__, so self.config doesn't exist. + """ + cc = cloud_region.CloudRegion("test1", "region-al", fake_config_dict) + cc_copy = copy.deepcopy(cc) + self.assertEqual(cc.name, cc_copy.name) + self.assertEqual(cc.region_name, cc_copy.region_name) + self.assertEqual(cc.config, cc_copy.config) + # Verify the copy is independent + self.assertIsNot(cc.config, cc_copy.config) + + def test_get_config(self): + cc = cloud_region.CloudRegion("test1", "region-al", fake_services_dict) + self.assertIsNone(cc._get_config('nothing', None)) + # This is what is happening behind the scenes in get_default_interface. + self.assertEqual( + fake_services_dict['interface'], + cc._get_config('interface', None), + ) + # The same call as above, but from one step up the stack + self.assertEqual(fake_services_dict['interface'], cc.get_interface()) + # Which finally is what is called to populate the below + self.assertEqual('public', self.cloud.default_interface) + + def test_verify(self): + config_dict = copy.deepcopy(fake_config_dict) + config_dict['cacert'] = None + + config_dict['verify'] = False + cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) + verify, _ = cc.get_requests_verify_args() + self.assertFalse(verify) + + config_dict['verify'] = True + cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) + verify, _ = cc.get_requests_verify_args() + self.assertTrue(verify) + + config_dict['insecure'] = True + cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) + verify, _ = cc.get_requests_verify_args() + self.assertFalse(verify) + + def test_verify_cacert(self): + config_dict = copy.deepcopy(fake_config_dict) + config_dict['cacert'] = "certfile" + + config_dict['verify'] = False + cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) + verify, _ = cc.get_requests_verify_args() + self.assertFalse(verify) + + config_dict['verify'] = True + cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) + verify, _ = cc.get_requests_verify_args() + self.assertEqual("certfile", verify) + + config_dict['insecure'] = True + cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) + verify, _ = cc.get_requests_verify_args() + self.assertEqual(False, verify) + + def test_cert_with_key(self): + config_dict = copy.deepcopy(fake_config_dict) + config_dict['cacert'] = None + config_dict['verify'] = False + + config_dict['cert'] = 'cert' + config_dict['key'] = 'key' + + cc = cloud_region.CloudRegion("test1", "region-xx", config_dict) + _, cert = cc.get_requests_verify_args() + self.assertEqual(("cert", "key"), cert) + + def test_ipv6(self): + cc = cloud_region.CloudRegion( + "test1", "region-al", fake_config_dict, force_ipv4=True + ) + self.assertTrue(cc.force_ipv4) + + def test_getters(self): + cc = cloud_region.CloudRegion("test1", "region-al", fake_services_dict) + + self.assertEqual( + ['compute', 'identity', 'image', 'volume'], + sorted(cc.get_services()), + ) + self.assertEqual( + {'password': 'hunter2', 'username': 'AzureDiamond'}, + cc.get_auth_args(), + ) + self.assertEqual('public', cc.get_interface()) + self.assertEqual('public', cc.get_interface('compute')) + self.assertEqual('admin', cc.get_interface('identity')) + self.assertEqual('region-al', cc.region_name) + self.assertIsNone(cc.get_api_version('image')) + self.assertEqual('2', cc.get_api_version('compute')) + self.assertEqual('mage', cc.get_service_type('image')) + self.assertEqual('compute', cc.get_service_type('compute')) + self.assertEqual('1', cc.get_api_version('volume')) + self.assertEqual('block-storage', cc.get_service_type('volume')) + self.assertEqual( + 'http://compute.example.com', cc.get_endpoint('compute') + ) + self.assertIsNone(cc.get_endpoint('image')) + self.assertIsNone(cc.get_service_name('compute')) + self.assertEqual('locks', cc.get_service_name('identity')) + self.assertIsNone(cc.get_status_code_retries('compute')) + self.assertEqual(5, cc.get_status_code_retries('baremetal')) + self.assertEqual(1, cc.get_connect_retries('compute')) + self.assertEqual(3, cc.get_connect_retries('baremetal')) + self.assertEqual(0.5, cc.get_connect_retry_delay('compute')) + self.assertEqual(1.5, cc.get_connect_retry_delay('baremetal')) + + def test_rackspace_workaround(self): + # We're skipping loader here, so we have to expand relevant + # parts from the rackspace profile. The thing we're testing + # is that the project_id logic works. + cc = cloud_region.CloudRegion( + "test1", + "DFW", + { + 'profile': 'rackspace', + 'region_name': 'DFW', + 'auth': {'project_id': '123456'}, + 'block_storage_endpoint_override': 'https://example.com/v2/', + }, + ) + self.assertEqual( + 'https://example.com/v2/123456', cc.get_endpoint('block-storage') + ) + + def test_rackspace_workaround_only_rax(self): + cc = cloud_region.CloudRegion( + "test1", + "DFW", + { + 'region_name': 'DFW', + 'auth': {'project_id': '123456'}, + 'block_storage_endpoint_override': 'https://example.com/v2/', + }, + ) + self.assertEqual( + 'https://example.com/v2/', cc.get_endpoint('block-storage') + ) + + def test_get_region_name(self): + def assert_region_name(default, compute): + self.assertEqual(default, cc.region_name) + self.assertEqual(default, cc.get_region_name()) + self.assertEqual(default, cc.get_region_name(service_type=None)) + self.assertEqual( + compute, cc.get_region_name(service_type='compute') + ) + self.assertEqual( + default, cc.get_region_name(service_type='placement') + ) + + # No region_name kwarg, no regions specified in services dict + # (including the default). + cc = cloud_region.CloudRegion(config=fake_services_dict) + assert_region_name(None, None) + + # Only region_name kwarg; it's returned for everything + cc = cloud_region.CloudRegion( + region_name='foo', config=fake_services_dict + ) + assert_region_name('foo', 'foo') + + # No region_name kwarg; values (including default) show through from + # config dict + services_dict = dict( + fake_services_dict, + region_name='the-default', + compute_region_name='compute-region', + ) + cc = cloud_region.CloudRegion(config=services_dict) + assert_region_name('the-default', 'compute-region') + + # region_name kwarg overrides config dict default (for backward + # compatibility), but service-specific region_name takes precedence. + services_dict = dict( + fake_services_dict, + region_name='dict', + compute_region_name='compute-region', + ) + cc = cloud_region.CloudRegion( + region_name='kwarg', config=services_dict + ) + assert_region_name('kwarg', 'compute-region') + + def test_aliases(self): + services_dict = fake_services_dict.copy() + services_dict['volume_api_version'] = 12 + services_dict['alarming_service_name'] = 'aodh' + cc = cloud_region.CloudRegion("test1", "region-al", services_dict) + self.assertEqual('12', cc.get_api_version('volume')) + self.assertEqual('12', cc.get_api_version('block-storage')) + self.assertEqual('aodh', cc.get_service_name('alarm')) + self.assertEqual('aodh', cc.get_service_name('alarming')) + + def test_no_override(self): + """Test no override happens when defaults are not configured""" + cc = cloud_region.CloudRegion("test1", "region-al", fake_services_dict) + self.assertEqual('block-storage', cc.get_service_type('volume')) + self.assertEqual('workflow', cc.get_service_type('workflow')) + self.assertEqual('not-exist', cc.get_service_type('not-exist')) + + def test_get_session_no_auth(self): + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_region.CloudRegion("test1", "region-al", config_dict) + self.assertRaises(exceptions.ConfigException, cc.get_session) + + @mock.patch.object(ksa_session, 'Session') + def test_get_session(self, mock_session): + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + fake_session = mock.Mock() + fake_session.additional_user_agent = [] + mock_session.return_value = fake_session + cc = cloud_region.CloudRegion( + "test1", "region-al", config_dict, auth_plugin=mock.Mock() + ) + cc.get_session() + mock_session.assert_called_with( + auth=mock.ANY, + verify=True, + cert=None, + timeout=None, + collect_timing=False, + discovery_cache=None, + ) + self.assertEqual( + fake_session.additional_user_agent, + [('openstacksdk', openstack_version.__version__)], + ) + + @mock.patch.object(ksa_session, 'Session') + def test_get_session_with_app_name(self, mock_session): + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + fake_session = mock.Mock() + fake_session.additional_user_agent = [] + fake_session.app_name = None + fake_session.app_version = None + mock_session.return_value = fake_session + cc = cloud_region.CloudRegion( + "test1", + "region-al", + config_dict, + auth_plugin=mock.Mock(), + app_name="test_app", + app_version="test_version", + ) + cc.get_session() + mock_session.assert_called_with( + auth=mock.ANY, + verify=True, + cert=None, + timeout=None, + collect_timing=False, + discovery_cache=None, + ) + self.assertEqual(fake_session.app_name, "test_app") + self.assertEqual(fake_session.app_version, "test_version") + self.assertEqual( + fake_session.additional_user_agent, + [('openstacksdk', openstack_version.__version__)], + ) + + @mock.patch.object(ksa_session, 'Session') + def test_get_session_with_timeout(self, mock_session): + fake_session = mock.Mock() + fake_session.additional_user_agent = [] + mock_session.return_value = fake_session + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + config_dict['api_timeout'] = 9 + cc = cloud_region.CloudRegion( + "test1", "region-al", config_dict, auth_plugin=mock.Mock() + ) + cc.get_session() + mock_session.assert_called_with( + auth=mock.ANY, + verify=True, + cert=None, + timeout=9, + collect_timing=False, + discovery_cache=None, + ) + self.assertEqual( + fake_session.additional_user_agent, + [('openstacksdk', openstack_version.__version__)], + ) + + @mock.patch.object(ksa_session, 'Session') + def test_get_session_with_timing(self, mock_session): + fake_session = mock.Mock() + fake_session.additional_user_agent = [] + mock_session.return_value = fake_session + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + config_dict['timing'] = True + cc = cloud_region.CloudRegion( + "test1", "region-al", config_dict, auth_plugin=mock.Mock() + ) + cc.get_session() + mock_session.assert_called_with( + auth=mock.ANY, + verify=True, + cert=None, + timeout=None, + collect_timing=True, + discovery_cache=None, + ) + self.assertEqual( + fake_session.additional_user_agent, + [('openstacksdk', openstack_version.__version__)], + ) + + @mock.patch.object(ksa_session, 'Session') + def test_override_session_endpoint_override(self, mock_session): + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_region.CloudRegion( + "test1", "region-al", config_dict, auth_plugin=mock.Mock() + ) + self.assertEqual( + cc.get_session_endpoint('compute'), + fake_services_dict['compute_endpoint_override'], + ) + + @mock.patch.object(ksa_session, 'Session') + def test_override_session_endpoint(self, mock_session): + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_region.CloudRegion( + "test1", "region-al", config_dict, auth_plugin=mock.Mock() + ) + self.assertEqual( + cc.get_session_endpoint('telemetry'), + fake_services_dict['telemetry_endpoint'], + ) + + @mock.patch.object(cloud_region.CloudRegion, 'get_session') + def test_session_endpoint(self, mock_get_session): + mock_session = mock.Mock() + mock_get_session.return_value = mock_session + config_dict = defaults.get_defaults() + config_dict.update(fake_services_dict) + cc = cloud_region.CloudRegion( + "test1", "region-al", config_dict, auth_plugin=mock.Mock() + ) + cc.get_session_endpoint('orchestration') + mock_session.get_endpoint.assert_called_with( + service_type='orchestration', + region_name='region-al', + interface='public', + service_name=None, + min_version=None, + max_version=None, + ) + + @mock.patch.object(cloud_region.CloudRegion, 'get_session') + def test_session_endpoint_not_found(self, mock_get_session): + exc_to_raise = ksa_exceptions.catalog.EndpointNotFound + mock_get_session.return_value.get_endpoint.side_effect = exc_to_raise + cc = cloud_region.CloudRegion( + "test1", "region-al", {}, auth_plugin=mock.Mock() + ) + self.assertIsNone(cc.get_session_endpoint('notfound')) + + def test_get_endpoint_from_catalog(self): + dns_override = 'https://override.dns.example.com' + self.cloud.config.config['dns_endpoint_override'] = dns_override + self.assertEqual( + 'https://compute.example.com/v2.1/', + self.cloud.config.get_endpoint_from_catalog('compute'), + ) + self.assertEqual( + 'https://internal.compute.example.com/v2.1/', + self.cloud.config.get_endpoint_from_catalog( + 'compute', interface='internal' + ), + ) + self.assertIsNone( + self.cloud.config.get_endpoint_from_catalog( + 'compute', region_name='unknown-region' + ) + ) + self.assertEqual( + 'https://dns.example.com', + self.cloud.config.get_endpoint_from_catalog('dns'), + ) diff --git a/openstack/tests/unit/config/test_config.py b/openstack/tests/unit/config/test_config.py new file mode 100644 index 0000000000..c734bf9190 --- /dev/null +++ b/openstack/tests/unit/config/test_config.py @@ -0,0 +1,1716 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse +import copy +import os +from unittest import mock + +import fixtures +import testtools +import yaml + +from openstack import config +from openstack.config import cloud_region +from openstack.config import defaults +from openstack.config import loader +from openstack import exceptions +from openstack.tests.unit.config import base + + +def prompt_for_password(prompt=None): + """Fake prompt function that just returns a constant string""" + return 'promptpass' + + +class TestConfig(base.TestCase): + def test_get_all(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml], + ) + clouds = c.get_all() + # We add two by hand because the regions cloud is going to exist + # thrice since it has three regions in it + user_clouds = [cloud for cloud in base.USER_CONF['clouds'].keys()] + [ + '_test_cloud_regions', + '_test_cloud_regions', + ] + configured_clouds = [cloud.name for cloud in clouds] + self.assertCountEqual(user_clouds, configured_clouds) + + def test_get_all_clouds(self): + # Ensure the alias is in place + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml], + ) + clouds = c.get_all_clouds() + # We add two by hand because the regions cloud is going to exist + # thrice since it has three regions in it + user_clouds = [cloud for cloud in base.USER_CONF['clouds'].keys()] + [ + '_test_cloud_regions', + '_test_cloud_regions', + ] + configured_clouds = [cloud.name for cloud in clouds] + self.assertCountEqual(user_clouds, configured_clouds) + + def test_get_one(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cloud = c.get_one(validate=False) + self.assertIsInstance(cloud, cloud_region.CloudRegion) + self.assertEqual(cloud.name, '') + + def test_get_one_cloud(self): + # Ensure the alias is in place + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cloud = c.get_one_cloud(validate=False) + self.assertIsInstance(cloud, cloud_region.CloudRegion) + self.assertEqual(cloud.name, '') + + def test_get_one_default_cloud_from_file(self): + single_conf = base._write_yaml( + { + 'clouds': { + 'single': { + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testuser', + 'password': 'testpass', + 'project_name': 'testproject', + }, + 'region_name': 'test-region', + } + } + } + ) + c = config.OpenStackConfig( + config_files=[single_conf], + secure_files=[], + vendor_files=[self.vendor_yaml], + ) + cc = c.get_one() + self.assertEqual(cc.name, 'single') + + def test_remote_profile(self): + single_conf = base._write_yaml( + { + 'clouds': { + 'remote': { + 'profile': 'https://example.com', + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'project_name': 'testproject', + }, + 'region_name': 'test-region', + } + } + } + ) + self.register_uris( + [ + dict( + method='GET', + uri='https://example.com/.well-known/openstack/api', + json={ + "name": "example", + "profile": { + "auth": { + "auth_url": "https://auth.example.com/v3", + } + }, + }, + ), + ] + ) + + c = config.OpenStackConfig(config_files=[single_conf]) + cc = c.get_one(cloud='remote') + self.assertEqual(cc.name, 'remote') + self.assertEqual(cc.auth['auth_url'], 'https://auth.example.com/v3') + self.assertEqual(cc.auth['username'], 'testuser') + + def test_get_one_auth_defaults(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml]) + cc = c.get_one(cloud='_test-cloud_', auth={'username': 'user'}) + self.assertEqual('user', cc.auth['username']) + self.assertEqual( + defaults._defaults['auth_type'], + cc.auth_type, + ) + + def test_get_one_with_config_files(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.secure_yaml], + ) + self.assertIsInstance(c.cloud_config, dict) + self.assertIn('cache', c.cloud_config) + self.assertIsInstance(c.cloud_config['cache'], dict) + self.assertIn('max_age', c.cloud_config['cache']) + self.assertIn('path', c.cloud_config['cache']) + cc = c.get_one('_test-cloud_') + self._assert_cloud_details(cc) + cc = c.get_one('_test_cloud_no_vendor') + self._assert_cloud_details(cc) + + def test_get_one_with_int_project_id(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one('_test-cloud-int-project_') + self.assertEqual('12345', cc.auth['project_id']) + + def test_get_one_with_domain_id(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one('_test-cloud-domain-id_') + self.assertEqual('6789', cc.auth['user_domain_id']) + self.assertEqual('123456789', cc.auth['project_domain_id']) + self.assertNotIn('domain_id', cc.auth) + self.assertNotIn('domain-id', cc.auth) + self.assertNotIn('domain_id', cc) + + def test_get_one_unscoped_identity(self): + single_conf = base._write_yaml( + { + 'clouds': { + 'unscoped': { + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testuser', + 'password': 'testpass', + }, + } + } + } + ) + c = config.OpenStackConfig( + config_files=[single_conf], + secure_files=[], + vendor_files=[self.vendor_yaml], + ) + cc = c.get_one() + self.assertEqual('http://example.com/v2', cc.get_endpoint('identity')) + + def test_get_one_domain_scoped(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one('_test-cloud-domain-scoped_') + self.assertEqual('12345', cc.auth['domain_id']) + self.assertNotIn('user_domain_id', cc.auth) + self.assertNotIn('project_domain_id', cc.auth) + self.assertIsNone(cc.get_endpoint('identity')) + + def test_get_one_infer_user_domain(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one('_test-cloud-int-project_') + self.assertEqual('awesome-domain', cc.auth['user_domain_id']) + self.assertEqual('awesome-domain', cc.auth['project_domain_id']) + self.assertNotIn('domain_id', cc.auth) + self.assertNotIn('domain_id', cc) + + def test_get_one_infer_passcode(self): + single_conf = base._write_yaml( + { + 'clouds': { + 'mfa': { + 'auth_type': 'v3multifactor', + 'auth_methods': ['v3password', 'v3totp'], + 'auth': { + 'auth_url': 'fake_url', + 'username': 'testuser', + 'password': 'testpass', + 'project_name': 'testproject', + 'project_domain_name': 'projectdomain', + 'user_domain_name': 'udn', + }, + 'region_name': 'test-region', + } + } + } + ) + + c = config.OpenStackConfig(config_files=[single_conf]) + cc = c.get_one(cloud='mfa', passcode='123') + self.assertEqual('123', cc.auth['passcode']) + + def test_get_one_with_hyphenated_project_id(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one('_test_cloud_hyphenated') + self.assertEqual('12345', cc.auth['project_id']) + + def test_get_one_with_hyphenated_kwargs(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + args = { + 'auth': { + 'username': 'testuser', + 'password': 'testpass', + 'project-id': '12345', + 'auth-url': 'http://example.com/v2', + }, + 'region_name': 'test-region', + } + cc = c.get_one(**args) + self.assertEqual('http://example.com/v2', cc.auth['auth_url']) + + def test_no_environ(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + self.assertRaises(exceptions.ConfigException, c.get_one, 'envvars') + + def test_fallthrough(self): + c = config.OpenStackConfig( + config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml], + ) + for k in os.environ.keys(): + if k.startswith('OS_'): + self.useFixture(fixtures.EnvironmentVariable(k)) + c.get_one(cloud='defaults', validate=False) + + def test_prefer_ipv6_true(self): + c = config.OpenStackConfig( + config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml], + ) + cc = c.get_one(cloud='defaults', validate=False) + self.assertTrue(cc.prefer_ipv6) + + def test_prefer_ipv6_false(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one(cloud='_test-cloud_') + self.assertFalse(cc.prefer_ipv6) + + def test_force_ipv4_true(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one(cloud='_test-cloud_') + self.assertTrue(cc.force_ipv4) + + def test_force_ipv4_false(self): + c = config.OpenStackConfig( + config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml], + ) + cc = c.get_one(cloud='defaults', validate=False) + self.assertFalse(cc.force_ipv4) + + def test_get_one_auth_merge(self): + c = config.OpenStackConfig(config_files=[self.cloud_yaml]) + cc = c.get_one(cloud='_test-cloud_', auth={'username': 'user'}) + self.assertEqual('user', cc.auth['username']) + self.assertEqual('testpass', cc.auth['password']) + + def test_get_one_networks(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one('_test-cloud-networks_') + self.assertEqual( + ['a-public', 'another-public', 'split-default'], + cc.get_external_networks(), + ) + self.assertEqual( + ['a-private', 'another-private', 'split-no-default'], + cc.get_internal_networks(), + ) + self.assertEqual('a-public', cc.get_nat_source()) + self.assertEqual('another-private', cc.get_nat_destination()) + self.assertEqual('another-public', cc.get_default_network()) + self.assertEqual( + ['a-public', 'another-public', 'split-no-default'], + cc.get_external_ipv4_networks(), + ) + self.assertEqual( + ['a-public', 'another-public', 'split-default'], + cc.get_external_ipv6_networks(), + ) + + def test_get_one_no_networks(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one('_test-cloud-domain-scoped_') + self.assertEqual([], cc.get_external_networks()) + self.assertEqual([], cc.get_internal_networks()) + self.assertIsNone(cc.get_nat_source()) + self.assertIsNone(cc.get_nat_destination()) + self.assertIsNone(cc.get_default_network()) + + def test_only_secure_yaml(self): + c = config.OpenStackConfig( + config_files=['nonexistent'], + vendor_files=['nonexistent'], + secure_files=[self.secure_yaml], + ) + cc = c.get_one(cloud='_test_cloud_no_vendor', validate=False) + self.assertEqual('testpass', cc.auth['password']) + + def test_get_cloud_names(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], secure_files=[self.no_yaml] + ) + self.assertCountEqual( + [ + '_test-cloud-domain-id_', + '_test-cloud-domain-scoped_', + '_test-cloud-int-project_', + '_test-cloud-networks_', + '_test-cloud_', + '_test-cloud_no_region', + '_test_cloud_hyphenated', + '_test_cloud_no_vendor', + '_test_cloud_regions', + '_test-cloud-override-metrics', + ], + c.get_cloud_names(), + ) + c = config.OpenStackConfig( + config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml], + ) + for k in os.environ.keys(): + if k.startswith('OS_'): + self.useFixture(fixtures.EnvironmentVariable(k)) + c.get_one(cloud='defaults', validate=False) + self.assertEqual(['defaults'], sorted(c.get_cloud_names())) + + def test_set_one_cloud_creates_file(self): + config_dir = fixtures.TempDir() + self.useFixture(config_dir) + config_path = os.path.join(config_dir.path, 'clouds.yaml') + config.OpenStackConfig.set_one_cloud(config_path, '_test_cloud_') + self.assertTrue(os.path.isfile(config_path)) + with open(config_path) as fh: + self.assertEqual( + {'clouds': {'_test_cloud_': {}}}, yaml.safe_load(fh) + ) + + def test_set_one_cloud_updates_cloud(self): + new_config = {'cloud': 'new_cloud', 'auth': {'password': 'newpass'}} + + resulting_cloud_config = { + 'auth': { + 'password': 'newpass', + 'username': 'testuser', + 'auth_url': 'http://example.com/v2', + }, + 'cloud': 'new_cloud', + 'profile': '_test_cloud_in_our_cloud', + 'region_name': 'test-region', + } + resulting_config = copy.deepcopy(base.USER_CONF) + resulting_config['clouds']['_test-cloud_'] = resulting_cloud_config + config.OpenStackConfig.set_one_cloud( + self.cloud_yaml, '_test-cloud_', new_config + ) + with open(self.cloud_yaml) as fh: + written_config = yaml.safe_load(fh) + # We write a cache config for testing + written_config['cache'].pop('path', None) + self.assertEqual(written_config, resulting_config) + + def test_get_region_no_region_default(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml], + ) + region = c._get_region(cloud='_test-cloud_no_region') + self.assertEqual(region, {'name': '', 'values': {}}) + + def test_get_region_no_region(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml], + ) + region = c._get_region( + cloud='_test-cloud_no_region', region_name='override-region' + ) + self.assertEqual(region, {'name': 'override-region', 'values': {}}) + + def test_get_region_region_is_none(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml], + ) + region = c._get_region(cloud='_test-cloud_no_region', region_name=None) + self.assertEqual(region, {'name': '', 'values': {}}) + + def test_get_region_region_set(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml], + ) + region = c._get_region(cloud='_test-cloud_', region_name='test-region') + self.assertEqual(region, {'name': 'test-region', 'values': {}}) + + def test_get_region_many_regions_default(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml], + ) + region = c._get_region(cloud='_test_cloud_regions', region_name='') + self.assertEqual( + region, + { + 'name': 'region1', + 'values': {'external_network': 'region1-network'}, + }, + ) + + def test_get_region_many_regions(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml], + ) + region = c._get_region( + cloud='_test_cloud_regions', region_name='region2' + ) + self.assertEqual( + region, + {'name': 'region2', 'values': {'external_network': 'my-network'}}, + ) + + def test_get_region_by_name_no_value(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + region = c._get_region( + cloud='_test_cloud_regions', region_name='region-no-value' + ) + self.assertEqual(region, {'name': 'region-no-value', 'values': {}}) + + def test_get_region_invalid_region(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml], + ) + self.assertRaises( + exceptions.ConfigException, + c._get_region, + cloud='_test_cloud_regions', + region_name='invalid-region', + ) + + def test_get_region_no_cloud(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.no_yaml], + ) + region = c._get_region(region_name='no-cloud-region') + self.assertEqual(region, {'name': 'no-cloud-region', 'values': {}}) + + def test_get_region_invalid_keys(self): + invalid_conf = base._write_yaml( + { + 'clouds': { + '_test_cloud': { + 'profile': '_test_cloud_in_our_cloud', + 'auth': { + 'auth_url': 'http://example.com/v2', + 'username': 'testuser', + 'password': 'testpass', + }, + 'regions': [ + {'name': 'region1', 'foo': 'bar'}, + ], + } + } + } + ) + c = config.OpenStackConfig( + config_files=[invalid_conf], vendor_files=[self.vendor_yaml] + ) + self.assertRaises( + exceptions.ConfigException, + c._get_region, + cloud='_test_cloud', + region_name='region1', + ) + + @mock.patch('openstack.config.cloud_region.keyring') + @mock.patch( + 'keystoneauth1.identity.base.BaseIdentityPlugin.set_auth_state' + ) + def test_load_auth_cache_not_found(self, ks_mock, kr_mock): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], secure_files=[] + ) + c._cache_auth = True + + kr_mock.get_password = mock.Mock(side_effect=[RuntimeError]) + + region = c.get_one('_test-cloud_') + kr_mock.get_password.assert_called_with( + 'openstacksdk', region._auth.get_cache_id() + ) + ks_mock.assert_not_called() + + @mock.patch('openstack.config.cloud_region.keyring') + @mock.patch( + 'keystoneauth1.identity.base.BaseIdentityPlugin.set_auth_state' + ) + def test_load_auth_cache_found(self, ks_mock, kr_mock): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], secure_files=[] + ) + c._cache_auth = True + fake_auth = {'a': 'b'} + + kr_mock.get_password = mock.Mock(return_value=fake_auth) + + region = c.get_one('_test-cloud_') + kr_mock.get_password.assert_called_with( + 'openstacksdk', region._auth.get_cache_id() + ) + ks_mock.assert_called_with(fake_auth) + + @mock.patch('openstack.config.cloud_region.keyring') + def test_set_auth_cache_empty_auth(self, kr_mock): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], secure_files=[] + ) + c._cache_auth = True + + kr_mock.get_password = mock.Mock(side_effect=[RuntimeError]) + kr_mock.set_password = mock.Mock() + + region = c.get_one('_test-cloud_') + + region.set_auth_cache() + kr_mock.set_password.assert_not_called() + + @mock.patch('openstack.config.cloud_region.keyring') + def test_set_auth_cache(self, kr_mock): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], secure_files=[] + ) + c._cache_auth = True + + kr_mock.get_password = mock.Mock(side_effect=[RuntimeError]) + kr_mock.set_password = mock.Mock() + + region = c.get_one('_test-cloud_') + region._auth.set_auth_state( + '{"auth_token":"foo", "body":{"token":"bar"}}' + ) + + region.set_auth_cache() + kr_mock.set_password.assert_called_with( + 'openstacksdk', + region._auth.get_cache_id(), + region._auth.get_auth_state(), + ) + + def test_metrics_global(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.secure_yaml], + ) + self.assertIsInstance(c.cloud_config, dict) + cc = c.get_one('_test-cloud_') + statsd = { + 'host': '127.0.0.1', + 'port': '1234', + } + # NOTE(ianw) we don't test/call get__client() because we + # don't want to instantiate the client, which tries to + # connect / do hostname lookups. + self.assertEqual(statsd['host'], cc._statsd_host) + self.assertEqual(statsd['port'], cc._statsd_port) + self.assertEqual('openstack.api', cc.get_statsd_prefix()) + influxdb = { + 'use_udp': True, + 'host': '127.0.0.1', + 'port': '1234', + 'username': 'username', + 'password': 'password', + 'database': 'database', + 'measurement': 'measurement.name', + 'timeout': 10, + } + self.assertEqual(influxdb, cc._influxdb_config) + + def test_metrics_override(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.secure_yaml], + ) + self.assertIsInstance(c.cloud_config, dict) + cc = c.get_one('_test-cloud-override-metrics') + statsd = { + 'host': '127.0.0.1', + 'port': '4321', + 'prefix': 'statsd.override.prefix', + } + self.assertEqual(statsd['host'], cc._statsd_host) + self.assertEqual(statsd['port'], cc._statsd_port) + self.assertEqual(statsd['prefix'], cc.get_statsd_prefix()) + influxdb = { + 'use_udp': True, + 'host': '127.0.0.1', + 'port': '1234', + 'username': 'override-username', + 'password': 'override-password', + 'database': 'override-database', + 'measurement': 'measurement.name', + 'timeout': 10, + } + self.assertEqual(influxdb, cc._influxdb_config) + + +class TestExcludedFormattedConfigValue(base.TestCase): + # verify https://storyboard.openstack.org/#!/story/1635696 + # + # get_one_cloud() and get_one_cloud_osc() iterate over config + # values and try to expand any variables in those values by + # calling value.format(), however some config values + # (e.g. password) should never have format() applied to them, not + # only might that change the password but it will also cause the + # format() function to raise an exception if it can not parse the + # format string. Examples would be single brace (e.g. 'foo{') + # which raises an ValueError because it's looking for a matching + # end brace or a brace pair with a key value that cannot be found + # (e.g. 'foo{bar}') which raises a KeyError. + + def setUp(self): + super().setUp() + + self.args = dict( + auth_url='http://example.com/v2', + username='user', + project_name='project', + region_name='region2', + snack_type='cookie', + os_auth_token='no-good-things', + ) + + self.options = argparse.Namespace(**self.args) + + def test_get_one_cloud_password_brace(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + password = 'foo{' # Would raise ValueError, single brace + self.options.password = password + cc = c.get_one_cloud( + cloud='_test_cloud_regions', argparse=self.options, validate=False + ) + self.assertEqual(cc.password, password) + + password = 'foo{bar}' # Would raise KeyError, 'bar' not found + self.options.password = password + cc = c.get_one_cloud( + cloud='_test_cloud_regions', argparse=self.options, validate=False + ) + self.assertEqual(cc.password, password) + + def test_get_one_cloud_osc_password_brace(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + password = 'foo{' # Would raise ValueError, single brace + self.options.password = password + cc = c.get_one_cloud_osc( + cloud='_test_cloud_regions', argparse=self.options, validate=False + ) + self.assertEqual(cc.password, password) + + password = 'foo{bar}' # Would raise KeyError, 'bar' not found + self.options.password = password + cc = c.get_one_cloud_osc( + cloud='_test_cloud_regions', argparse=self.options, validate=False + ) + self.assertEqual(cc.password, password) + + +class TestConfigArgparse(base.TestCase): + def setUp(self): + super().setUp() + + self.args = dict( + auth_url='http://example.com/v2', + username='user', + password='password', + project_name='project', + region_name='region2', + snack_type='cookie', + os_auth_token='no-good-things', + ) + + self.options = argparse.Namespace(**self.args) + + def test_get_one_bad_region_argparse(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + self.assertRaises( + exceptions.ConfigException, + c.get_one, + cloud='_test-cloud_', + argparse=self.options, + ) + + def test_get_one_argparse(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + cc = c.get_one( + cloud='_test_cloud_regions', argparse=self.options, validate=False + ) + self.assertEqual(cc.region_name, 'region2') + self.assertEqual(cc.snack_type, 'cookie') + + def test_get_one_precedence(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + kwargs = { + 'auth': { + 'username': 'testuser', + 'password': 'authpass', + 'project-id': 'testproject', + 'auth_url': 'http://example.com/v2', + }, + 'region_name': 'kwarg_region', + 'password': 'ansible_password', + 'arbitrary': 'value', + } + + args = dict( + auth_url='http://example.com/v2', + username='user', + password='argpass', + project_name='project', + region_name='region2', + snack_type='cookie', + ) + + options = argparse.Namespace(**args) + cc = c.get_one(argparse=options, **kwargs) + self.assertEqual(cc.region_name, 'region2') + self.assertEqual(cc.auth['password'], 'authpass') + self.assertEqual(cc.snack_type, 'cookie') + + def test_get_one_cloud_precedence_osc(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + ) + + kwargs = { + 'auth': { + 'username': 'testuser', + 'password': 'authpass', + 'project-id': 'testproject', + 'auth_url': 'http://example.com/v2', + }, + 'region_name': 'kwarg_region', + 'password': 'ansible_password', + 'arbitrary': 'value', + } + + args = dict( + auth_url='http://example.com/v2', + username='user', + password='argpass', + project_name='project', + region_name='region2', + snack_type='cookie', + ) + + options = argparse.Namespace(**args) + cc = c.get_one_cloud_osc(argparse=options, **kwargs) + self.assertEqual(cc.region_name, 'region2') + self.assertEqual(cc.auth['password'], 'argpass') + self.assertEqual(cc.snack_type, 'cookie') + + def test_get_one_precedence_no_argparse(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + kwargs = { + 'auth': { + 'username': 'testuser', + 'password': 'authpass', + 'project-id': 'testproject', + 'auth_url': 'http://example.com/v2', + }, + 'region_name': 'kwarg_region', + 'password': 'ansible_password', + 'arbitrary': 'value', + } + + cc = c.get_one(**kwargs) + self.assertEqual(cc.region_name, 'kwarg_region') + self.assertEqual(cc.auth['password'], 'authpass') + self.assertIsNone(cc.password) + + def test_get_one_just_argparse(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + cc = c.get_one(argparse=self.options, validate=False) + self.assertIsNone(cc.cloud) + self.assertEqual(cc.region_name, 'region2') + self.assertEqual(cc.snack_type, 'cookie') + + def test_get_one_just_kwargs(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + cc = c.get_one(validate=False, **self.args) + self.assertIsNone(cc.cloud) + self.assertEqual(cc.region_name, 'region2') + self.assertEqual(cc.snack_type, 'cookie') + + def test_get_one_dash_kwargs(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + args = { + 'auth-url': 'http://example.com/v2', + 'username': 'user', + 'password': 'password', + 'project_name': 'project', + 'region_name': 'other-test-region', + 'snack_type': 'cookie', + } + cc = c.get_one(**args) + self.assertIsNone(cc.cloud) + self.assertEqual(cc.region_name, 'other-test-region') + self.assertEqual(cc.snack_type, 'cookie') + + def test_get_one_no_argparse(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + cc = c.get_one(cloud='_test-cloud_', argparse=None) + self._assert_cloud_details(cc) + self.assertEqual(cc.region_name, 'test-region') + self.assertIsNone(cc.snack_type) + + def test_get_one_no_argparse_regions(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + cc = c.get_one(cloud='_test_cloud_regions', argparse=None) + self._assert_cloud_details(cc) + self.assertEqual(cc.region_name, 'region1') + self.assertIsNone(cc.snack_type) + + def test_get_one_bad_region(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + self.assertRaises( + exceptions.ConfigException, + c.get_one, + cloud='_test_cloud_regions', + region_name='bad', + ) + + def test_get_one_bad_region_no_regions(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + self.assertRaises( + exceptions.ConfigException, + c.get_one, + cloud='_test-cloud_', + region_name='bad_region', + ) + + def test_get_one_no_argparse_region2(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + cc = c.get_one( + cloud='_test_cloud_regions', region_name='region2', argparse=None + ) + self._assert_cloud_details(cc) + self.assertEqual(cc.region_name, 'region2') + self.assertIsNone(cc.snack_type) + + def test_get_one_network(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + cc = c.get_one( + cloud='_test_cloud_regions', region_name='region1', argparse=None + ) + self._assert_cloud_details(cc) + self.assertEqual(cc.region_name, 'region1') + self.assertEqual('region1-network', cc.config['external_network']) + + def test_get_one_per_region_network(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + cc = c.get_one( + cloud='_test_cloud_regions', region_name='region2', argparse=None + ) + self._assert_cloud_details(cc) + self.assertEqual(cc.region_name, 'region2') + self.assertEqual('my-network', cc.config['external_network']) + + def test_get_one_no_yaml_no_cloud(self): + c = config.OpenStackConfig(load_yaml_config=False) + + self.assertRaises( + exceptions.ConfigException, + c.get_one, + cloud='_test_cloud_regions', + region_name='region2', + argparse=None, + ) + + def test_get_one_no_yaml(self): + c = config.OpenStackConfig(load_yaml_config=False) + + cc = c.get_one( + region_name='region2', + argparse=None, + **base.USER_CONF['clouds']['_test_cloud_regions'], + ) + # Not using assert_cloud_details because of cache settings which + # are not present without the file + self.assertIsInstance(cc, cloud_region.CloudRegion) + self.assertTrue(hasattr(cc, 'auth')) + self.assertIsInstance(cc.auth, dict) + self.assertIsNone(cc.cloud) + self.assertIn('username', cc.auth) + self.assertEqual('testuser', cc.auth['username']) + self.assertEqual('testpass', cc.auth['password']) + self.assertFalse(cc.config['image_api_use_tasks']) + self.assertTrue('project_name' in cc.auth or 'project_id' in cc.auth) + if 'project_name' in cc.auth: + self.assertEqual('testproject', cc.auth['project_name']) + elif 'project_id' in cc.auth: + self.assertEqual('testproject', cc.auth['project_id']) + self.assertEqual(cc.region_name, 'region2') + + def test_fix_env_args(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + env_args = {'os-compute-api-version': 1} + fixed_args = c._fix_args(env_args) + + self.assertDictEqual({'compute_api_version': 1}, fixed_args) + + def test_extra_config(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + defaults = {'use_hostnames': False, 'other-value': 'something'} + ansible_options = c.get_extra_config('ansible', defaults) + + # This should show that the default for use_hostnames above is + # overridden by the value in the config file defined in base.py + # It should also show that other-value key is normalized and passed + # through even though there is no corresponding value in the config + # file, and that expand-hostvars key is normalized and the value + # from the config comes through even though there is no default. + self.assertDictEqual( + { + 'expand_hostvars': False, + 'use_hostnames': True, + 'other_value': 'something', + }, + ansible_options, + ) + + def test_get_client_config(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + cc = c.get_one(cloud='_test_cloud_regions') + + defaults = { + 'use_hostnames': False, + 'other-value': 'something', + 'force_ipv4': False, + } + ansible_options = cc.get_client_config('ansible', defaults) + + # This should show that the default for use_hostnames and force_ipv4 + # above is overridden by the value in the config file defined in + # base.py + # It should also show that other-value key is normalized and passed + # through even though there is no corresponding value in the config + # file, and that expand-hostvars key is normalized and the value + # from the config comes through even though there is no default. + self.assertDictEqual( + { + 'expand_hostvars': False, + 'use_hostnames': True, + 'other_value': 'something', + 'force_ipv4': True, + }, + ansible_options, + ) + + def test_register_argparse_cloud(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + parser = argparse.ArgumentParser() + c.register_argparse_arguments(parser, []) + opts, _remain = parser.parse_known_args(['--os-cloud', 'foo']) + self.assertEqual(opts.os_cloud, 'foo') + + def test_env_argparse_precedence(self): + self.useFixture( + fixtures.EnvironmentVariable('OS_TENANT_NAME', 'tenants-are-bad') + ) + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + cc = c.get_one(cloud='envvars', argparse=self.options, validate=False) + self.assertEqual(cc.auth['project_name'], 'project') + + def test_argparse_default_no_token(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + parser = argparse.ArgumentParser() + c.register_argparse_arguments(parser, []) + # novaclient will add this + parser.add_argument('--os-auth-token') + opts, _remain = parser.parse_known_args() + cc = c.get_one(cloud='_test_cloud_regions', argparse=opts) + self.assertEqual(cc.config['auth_type'], 'password') + self.assertNotIn('token', cc.config['auth']) + + def test_argparse_token(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + + parser = argparse.ArgumentParser() + c.register_argparse_arguments(parser, []) + # novaclient will add this + parser.add_argument('--os-auth-token') + opts, _remain = parser.parse_known_args( + ['--os-auth-token', 'very-bad-things', '--os-auth-type', 'token'] + ) + cc = c.get_one(argparse=opts, validate=False) + self.assertEqual(cc.config['auth_type'], 'token') + self.assertEqual(cc.config['auth']['token'], 'very-bad-things') + + def test_argparse_underscores(self): + c = config.OpenStackConfig( + config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml], + ) + parser = argparse.ArgumentParser() + parser.add_argument('--os_username') + argv = [ + '--os_username', + 'user', + '--os_password', + 'pass', + '--os-auth-url', + 'auth-url', + '--os-project-name', + 'project', + ] + c.register_argparse_arguments(parser, argv=argv) + opts, _remain = parser.parse_known_args(argv) + cc = c.get_one(argparse=opts) + self.assertEqual(cc.config['auth']['username'], 'user') + self.assertEqual(cc.config['auth']['password'], 'pass') + self.assertEqual(cc.config['auth']['auth_url'], 'auth-url') + + def test_argparse_action_append_no_underscore(self): + c = config.OpenStackConfig( + config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml], + ) + parser = argparse.ArgumentParser() + parser.add_argument('--foo', action='append') + argv = ['--foo', '1', '--foo', '2'] + c.register_argparse_arguments(parser, argv=argv) + opts, _remain = parser.parse_known_args(argv) + self.assertEqual(opts.foo, ['1', '2']) + + def test_argparse_underscores_duplicate(self): + c = config.OpenStackConfig( + config_files=[self.no_yaml], + vendor_files=[self.no_yaml], + secure_files=[self.no_yaml], + ) + parser = argparse.ArgumentParser() + parser.add_argument('--os_username') + argv = [ + '--os_username', + 'user', + '--os_password', + 'pass', + '--os-username', + 'user1', + '--os-password', + 'pass1', + '--os-auth-url', + 'auth-url', + '--os-project-name', + 'project', + ] + self.assertRaises( + exceptions.ConfigException, + c.register_argparse_arguments, + parser=parser, + argv=argv, + ) + + def test_register_argparse_bad_plugin(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + parser = argparse.ArgumentParser() + self.assertRaises( + exceptions.ConfigException, + c.register_argparse_arguments, + parser, + ['--os-auth-type', 'foo'], + ) + + def test_register_argparse_not_password(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + parser = argparse.ArgumentParser() + args = [ + '--os-auth-type', + 'v3token', + '--os-token', + 'some-secret', + ] + c.register_argparse_arguments(parser, args) + opts, _remain = parser.parse_known_args(args) + self.assertEqual(opts.os_token, 'some-secret') + + def test_register_argparse_password(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + parser = argparse.ArgumentParser() + args = [ + '--os-password', + 'some-secret', + ] + c.register_argparse_arguments(parser, args) + opts, _remain = parser.parse_known_args(args) + self.assertEqual(opts.os_password, 'some-secret') + with testtools.ExpectedException(AttributeError): + opts.os_token + + def test_register_argparse_service_type(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + parser = argparse.ArgumentParser() + args = [ + '--os-service-type', + 'network', + '--os-endpoint-type', + 'admin', + '--http-timeout', + '20', + ] + c.register_argparse_arguments(parser, args) + opts, _remain = parser.parse_known_args(args) + self.assertEqual(opts.os_service_type, 'network') + self.assertEqual(opts.os_endpoint_type, 'admin') + self.assertEqual(opts.http_timeout, '20') + with testtools.ExpectedException(AttributeError): + opts.os_network_service_type + cloud = c.get_one(argparse=opts, validate=False) + self.assertEqual(cloud.config['service_type'], 'network') + self.assertEqual(cloud.config['interface'], 'admin') + self.assertEqual(cloud.config['api_timeout'], '20') + self.assertNotIn('http_timeout', cloud.config) + + def test_register_argparse_network_service_type(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + parser = argparse.ArgumentParser() + args = [ + '--os-endpoint-type', + 'admin', + '--network-api-version', + '4', + ] + c.register_argparse_arguments(parser, args, ['network']) + opts, _remain = parser.parse_known_args(args) + self.assertEqual(opts.os_service_type, 'network') + self.assertEqual(opts.os_endpoint_type, 'admin') + self.assertIsNone(opts.os_network_service_type) + self.assertIsNone(opts.os_network_api_version) + self.assertEqual(opts.network_api_version, '4') + cloud = c.get_one(argparse=opts, validate=False) + self.assertEqual(cloud.config['service_type'], 'network') + self.assertEqual(cloud.config['interface'], 'admin') + self.assertEqual(cloud.config['network_api_version'], '4') + self.assertNotIn('http_timeout', cloud.config) + + def test_register_argparse_network_service_types(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + parser = argparse.ArgumentParser() + args = [ + '--os-compute-service-name', + 'cloudServers', + '--os-network-service-type', + 'badtype', + '--os-endpoint-type', + 'admin', + '--network-api-version', + '4', + ] + c.register_argparse_arguments( + parser, args, ['compute', 'network', 'volume'] + ) + opts, _remain = parser.parse_known_args(args) + self.assertEqual(opts.os_network_service_type, 'badtype') + self.assertIsNone(opts.os_compute_service_type) + self.assertIsNone(opts.os_volume_service_type) + self.assertEqual(opts.os_service_type, 'compute') + self.assertEqual(opts.os_compute_service_name, 'cloudServers') + self.assertEqual(opts.os_endpoint_type, 'admin') + self.assertIsNone(opts.os_network_api_version) + self.assertEqual(opts.network_api_version, '4') + cloud = c.get_one(argparse=opts, validate=False) + self.assertEqual(cloud.config['service_type'], 'compute') + self.assertEqual(cloud.config['network_service_type'], 'badtype') + self.assertEqual(cloud.config['interface'], 'admin') + self.assertEqual(cloud.config['network_api_version'], '4') + self.assertNotIn('volume_service_type', cloud.config) + self.assertNotIn('http_timeout', cloud.config) + + +class TestConfigPrompt(base.TestCase): + def setUp(self): + super().setUp() + + self.args = dict( + auth_url='http://example.com/v2', + username='user', + project_name='project', + # region_name='region2', + auth_type='password', + ) + + self.options = argparse.Namespace(**self.args) + + def test_get_one_prompt(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + pw_func=prompt_for_password, + ) + + # This needs a cloud definition without a password. + # If this starts failing unexpectedly check that the cloud_yaml + # and/or vendor_yaml do not have a password in the selected cloud. + cc = c.get_one( + cloud='_test_cloud_no_vendor', + argparse=self.options, + ) + self.assertEqual('promptpass', cc.auth['password']) + + +class TestConfigDefault(base.TestCase): + def setUp(self): + super().setUp() + + # Reset defaults after each test so that other tests are + # not affected by any changes. + self.addCleanup(self._reset_defaults) + + def _reset_defaults(self): + defaults._defaults = None + + def test_set_no_default(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one(cloud='_test-cloud_', argparse=None) + self._assert_cloud_details(cc) + self.assertEqual('password', cc.auth_type) + + +class TestMagicFixes(base.TestCase): + def _test_magic_fixes(self, cloud, expected): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + result = c.magic_fixes(cloud) + self.assertEqual(expected, result) + + def test_set_no_default(self): + cloud = { + 'auth': {}, + 'identity_endpoint_type': 'admin', + 'compute_endpoint_type': 'private', + 'endpoint_type': 'public', + 'auth_type': 'v3password', + } + expected = { + 'identity_interface': 'admin', + 'compute_interface': 'private', + 'interface': 'public', + 'auth_type': 'v3password', + 'auth': {}, + 'networks': [], + } + self._test_magic_fixes(cloud, expected) + + def test_project_v2password(self): + cloud = { + 'auth_type': 'v2password', + 'auth': { + 'project-name': 'my_project_name', + 'project-id': 'my_project_id', + }, + } + expected = { + 'auth_type': 'v2password', + 'auth': { + 'tenant_name': 'my_project_name', + 'tenant_id': 'my_project_id', + }, + 'networks': [], + } + self._test_magic_fixes(cloud, expected) + + def test_project_password(self): + cloud = { + 'auth_type': 'password', + 'auth': { + 'project-name': 'my_project_name', + 'project-id': 'my_project_id', + }, + } + expected = { + 'auth_type': 'password', + 'auth': { + 'project_name': 'my_project_name', + 'project_id': 'my_project_id', + }, + 'networks': [], + } + self._test_magic_fixes(cloud, expected) + + def test_project_conflict_priority(self): + """The order of priority should be + 1: env or cli settings + 2: setting from 'auth' section of clouds.yaml + + The ordering of #1 is important so that operators can use domain-wide + inherited credentials in clouds.yaml. + """ + + cloud = { + 'auth_type': 'password', + 'auth': { + 'project_id': 'my_project_id', + }, + } + expected = { + 'auth_type': 'password', + 'auth': { + 'project_id': 'my_project_id', + }, + 'networks': [], + } + self._test_magic_fixes(cloud, expected) + + cloud = { + 'auth_type': 'password', + 'auth': { + 'project_id': 'my_project_id', + }, + 'project_id': 'different_project_id', + } + expected = { + 'auth_type': 'password', + 'auth': { + 'project_id': 'different_project_id', + }, + 'networks': [], + } + self._test_magic_fixes(cloud, expected) + + def test_backwards_network_fail(self): + cloud = { + 'auth': {}, + 'external_network': 'public', + 'networks': [ + {'name': 'private', 'routes_externally': False}, + ], + } + self.assertRaises( + exceptions.ConfigException, self._test_magic_fixes, cloud, {} + ) + + def test_backwards_network(self): + cloud = { + 'auth': {}, + 'external_network': 'public', + 'internal_network': 'private', + } + expected = { + 'auth': {}, + 'auth_type': None, + 'external_network': 'public', + 'internal_network': 'private', + 'networks': [ + { + 'name': 'public', + 'routes_externally': True, + 'nat_destination': False, + 'default_interface': True, + }, + { + 'name': 'private', + 'routes_externally': False, + 'nat_destination': True, + 'default_interface': False, + }, + ], + } + self._test_magic_fixes(cloud, expected) + + def test_normalize_network(self): + cloud = {'auth': {}, 'networks': [{'name': 'private'}]} + expected = { + 'auth': {}, + 'auth_type': None, + 'networks': [ + { + 'name': 'private', + 'routes_externally': False, + 'nat_destination': False, + 'default_interface': False, + 'nat_source': False, + 'routes_ipv4_externally': False, + 'routes_ipv6_externally': False, + }, + ], + } + self._test_magic_fixes(cloud, expected) + + def test_single_default_interface(self): + cloud = { + 'auth': {}, + 'networks': [ + {'name': 'blue', 'default_interface': True}, + {'name': 'purple', 'default_interface': True}, + ], + } + self.assertRaises( + exceptions.ConfigException, self._test_magic_fixes, cloud, {} + ) + + def test_token_auth(self): + expected = { + "auth_type": "v3token", + "auth": { + "token": "my_token", + }, + 'networks': [], + } + + cloud = { + "auth_type": "v3token", + "auth": { + "token": "my_token", + }, + } + self._test_magic_fixes(cloud, expected) + + cloud = { + "auth_type": "v3token", + "auth": { + "auth_token": "my_token", + }, + } + self._test_magic_fixes(cloud, expected) + + cloud = { + "auth_type": "v3token", + "auth": { + "auth-token": "my_token", + }, + } + self._test_magic_fixes(cloud, expected) + + cloud = { + "auth_type": "v3token", + "auth": {}, + "token": "my_token", + } + self._test_magic_fixes(cloud, expected) + + cloud = { + "auth_type": "v3token", + "auth": {}, + "auth_token": "my_token", + } + self._test_magic_fixes(cloud, expected) + + cloud = { + "auth_type": "v3token", + "auth": {}, + "auth-token": "my_token", + } + self._test_magic_fixes(cloud, expected) + + # test priority + cloud = { + "auth_type": "v3token", + "auth": { + "token": "I will be ignored", + }, + "token": "my_token", + } + self._test_magic_fixes(cloud, expected) + + def test_passcode(self): + cloud = { + "auth": {}, + "passcode": "totp", + } + expected = { + "auth": { + "passcode": "totp", + }, + 'auth_type': None, + 'networks': [], + } + self._test_magic_fixes(cloud, expected) + + def test_endpoint_type_to_interface(self): + cloud = { + 'auth': {}, + "endpoint_type": "public", + "foo_endpoint_type": "internal", + } + expected = { + "auth": {}, + 'auth_type': None, + 'networks': [], + "interface": "public", + "foo_interface": "internal", + } + self._test_magic_fixes(cloud, expected) + + def test_bool_keys(self): + cloud = {"auth": {}} + cloud.update({k: "True" for k in loader.BOOL_KEYS}) + expected = { + "auth_type": None, + "auth": {}, + "networks": [], + } + expected.update({k: True for k in loader.BOOL_KEYS}) + self._test_magic_fixes(cloud, expected) + + def test_csv_keys(self): + cloud = {"auth": {}} + cloud.update({k: "spam,ham" for k in loader.CSV_KEYS}) + expected = { + "auth_type": None, + "auth": {}, + "networks": [], + } + expected.update({k: ["spam", "ham"] for k in loader.CSV_KEYS}) + self._test_magic_fixes(cloud, expected) + + def test_auth_url_formatting(self): + cloud = { + "auth": { + "auth_url": "https://my.cloud/{region_id}", + }, + "region_id": "RegionOne", + } + expected = { + "auth_type": None, + "auth": { + "auth_url": "https://my.cloud/RegionOne", + }, + "region_id": "RegionOne", + "networks": [], + } + self._test_magic_fixes(cloud, expected) diff --git a/openstack/tests/unit/config/test_environ.py b/openstack/tests/unit/config/test_environ.py new file mode 100644 index 0000000000..53e1bd7f23 --- /dev/null +++ b/openstack/tests/unit/config/test_environ.py @@ -0,0 +1,205 @@ +# Copyright (c) 2014 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import fixtures + +from openstack import config +from openstack.config import cloud_region +from openstack import exceptions +from openstack.tests.unit.config import base + + +class TestEnviron(base.TestCase): + def setUp(self): + super().setUp() + self.useFixture( + fixtures.EnvironmentVariable('OS_AUTH_URL', 'https://example.com') + ) + self.useFixture( + fixtures.EnvironmentVariable('OS_USERNAME', 'testuser') + ) + self.useFixture( + fixtures.EnvironmentVariable('OS_PASSWORD', 'testpass') + ) + self.useFixture( + fixtures.EnvironmentVariable('OS_PROJECT_NAME', 'testproject') + ) + self.useFixture( + fixtures.EnvironmentVariable('NOVA_PROJECT_ID', 'testnova') + ) + + def test_get_one(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + self.assertIsInstance(c.get_one(), cloud_region.CloudRegion) + + def test_no_fallthrough(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + self.assertRaises(exceptions.ConfigException, c.get_one, 'openstack') + + def test_envvar_name_override(self): + self.useFixture( + fixtures.EnvironmentVariable('OS_CLOUD_NAME', 'override') + ) + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one('override') + self._assert_cloud_details(cc) + + def test_envvar_prefer_ipv6_override(self): + self.useFixture( + fixtures.EnvironmentVariable('OS_PREFER_IPV6', 'false') + ) + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.secure_yaml], + ) + cc = c.get_one('_test-cloud_') + self.assertFalse(cc.prefer_ipv6) + + def test_environ_exists(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.secure_yaml], + ) + cc = c.get_one('envvars') + self._assert_cloud_details(cc) + self.assertNotIn('auth_url', cc.config) + self.assertIn('auth_url', cc.config['auth']) + self.assertNotIn('project_id', cc.config['auth']) + self.assertNotIn('auth_url', cc.config) + cc = c.get_one('_test-cloud_') + self._assert_cloud_details(cc) + cc = c.get_one('_test_cloud_no_vendor') + self._assert_cloud_details(cc) + + def test_environ_prefix(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + envvar_prefix='NOVA_', + secure_files=[self.secure_yaml], + ) + cc = c.get_one('envvars') + self._assert_cloud_details(cc) + self.assertNotIn('auth_url', cc.config) + self.assertIn('auth_url', cc.config['auth']) + self.assertIn('project_id', cc.config['auth']) + self.assertNotIn('auth_url', cc.config) + cc = c.get_one('_test-cloud_') + self._assert_cloud_details(cc) + cc = c.get_one('_test_cloud_no_vendor') + self._assert_cloud_details(cc) + + def test_get_one_with_config_files(self): + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + secure_files=[self.secure_yaml], + ) + self.assertIsInstance(c.cloud_config, dict) + self.assertIn('cache', c.cloud_config) + self.assertIsInstance(c.cloud_config['cache'], dict) + self.assertIn('max_age', c.cloud_config['cache']) + self.assertIn('path', c.cloud_config['cache']) + cc = c.get_one('_test-cloud_') + self._assert_cloud_details(cc) + cc = c.get_one('_test_cloud_no_vendor') + self._assert_cloud_details(cc) + + def test_config_file_override(self): + self.useFixture( + fixtures.EnvironmentVariable( + 'OS_CLIENT_CONFIG_FILE', self.cloud_yaml + ) + ) + c = config.OpenStackConfig( + config_files=[], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one('_test-cloud_') + self._assert_cloud_details(cc) + + +class TestEnvvars(base.TestCase): + def test_no_envvars(self): + self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + self.assertRaises(exceptions.ConfigException, c.get_one, 'envvars') + + def test_test_envvars(self): + self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) + self.useFixture( + fixtures.EnvironmentVariable('OS_STDERR_CAPTURE', 'True') + ) + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + self.assertRaises(exceptions.ConfigException, c.get_one, 'envvars') + + def test_incomplete_envvars(self): + self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) + self.useFixture(fixtures.EnvironmentVariable('OS_USERNAME', 'user')) + config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + # This is broken due to an issue that's fixed in a subsequent patch + # commenting it out in this patch to keep the patch size reasonable + # self.assertRaises( + # keystoneauth1.exceptions.auth_plugins.MissingRequiredOptions, + # c.get_one, 'envvars') + + def test_have_envvars(self): + self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) + self.useFixture( + fixtures.EnvironmentVariable('OS_AUTH_URL', 'http://example.com') + ) + self.useFixture(fixtures.EnvironmentVariable('OS_USERNAME', 'user')) + self.useFixture( + fixtures.EnvironmentVariable('OS_PASSWORD', 'password') + ) + self.useFixture( + fixtures.EnvironmentVariable('OS_PROJECT_NAME', 'project') + ) + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], vendor_files=[self.vendor_yaml] + ) + cc = c.get_one('envvars') + self.assertEqual(cc.config['auth']['username'], 'user') + + def test_old_envvars(self): + self.useFixture(fixtures.EnvironmentVariable('NOVA_USERNAME', 'nova')) + self.useFixture( + fixtures.EnvironmentVariable('NOVA_AUTH_URL', 'http://example.com') + ) + self.useFixture( + fixtures.EnvironmentVariable('NOVA_PASSWORD', 'password') + ) + self.useFixture( + fixtures.EnvironmentVariable('NOVA_PROJECT_NAME', 'project') + ) + c = config.OpenStackConfig( + config_files=[self.cloud_yaml], + vendor_files=[self.vendor_yaml], + envvar_prefix='NOVA_', + ) + cc = c.get_one('envvars') + self.assertEqual(cc.config['auth']['username'], 'nova') diff --git a/openstack/tests/unit/config/test_from_conf.py b/openstack/tests/unit/config/test_from_conf.py new file mode 100644 index 0000000000..032d72c426 --- /dev/null +++ b/openstack/tests/unit/config/test_from_conf.py @@ -0,0 +1,369 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from keystoneauth1 import exceptions as ks_exc +import requests.exceptions + +from openstack.config import cloud_region +from openstack import connection +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestFromConf(base.TestCase): + def _get_conn(self, **from_conf_kwargs): + oslocfg = self._load_ks_cfg_opts() + # Throw name in here to prove **kwargs is working + config = cloud_region.from_conf( + oslocfg, + session=self.cloud.session, + name='from_conf.example.com', + **from_conf_kwargs, + ) + self.assertEqual('from_conf.example.com', config.name) + + return connection.Connection(config=config, strict_proxies=True) + + def test_adapter_opts_set(self): + """Adapter opts specified in the conf.""" + conn = self._get_conn() + + discovery = { + "versions": { + "values": [ + { + "status": "stable", + "updated": "2019-06-01T00:00:00Z", + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.heat-v2+json", # noqa: E501 + } + ], + "id": "v2.0", + "links": [ + { + "href": "https://example.org:8888/heat/v2", + "rel": "self", + } + ], + } + ] + } + } + self.register_uris( + [ + dict( + method='GET', + uri='https://example.org:8888/heat/v2', + json=discovery, + ), + dict( + method='GET', + uri='https://example.org:8888/heat/v2/foo', + json={'foo': {}}, + ), + ] + ) + + adap = conn.orchestration + self.assertEqual('SpecialRegion', adap.region_name) + self.assertEqual('orchestration', adap.service_type) + self.assertEqual('internal', adap.interface) + self.assertEqual( + 'https://example.org:8888/heat/v2', adap.endpoint_override + ) + + adap.get('/foo') + self.assert_calls() + + def test_default_adapter_opts(self): + """Adapter opts are registered, but all defaulting in conf.""" + conn = self._get_conn() + + server_id = str(uuid.uuid4()) + server_name = self.getUniqueString('name') + fake_server = fakes.make_fake_server(server_id, server_name) + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail'] + ), + json={'servers': [fake_server]}, + ), + ] + ) + + # Nova has empty adapter config, so these default + adap = conn.compute + self.assertIsNone(adap.region_name) + self.assertEqual('compute', adap.service_type) + self.assertEqual('public', adap.interface) + self.assertIsNone(adap.endpoint_override) + + s = next(adap.servers()) + self.assertEqual(s.id, server_id) + self.assertEqual(s.name, server_name) + self.assert_calls() + + def test_service_not_ready_catalog(self): + """Adapter opts are registered, but all defaulting in conf.""" + conn = self._get_conn() + + server_id = str(uuid.uuid4()) + server_name = self.getUniqueString('name') + fake_server = fakes.make_fake_server(server_id, server_name) + + self.register_uris( + [ + dict( + method='GET', + uri='https://compute.example.com/v2.1/', + exc=requests.exceptions.ConnectionError, + ), + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail'] + ), + json={'servers': [fake_server]}, + ), + ] + ) + + self.assertRaises( + exceptions.ServiceDiscoveryException, getattr, conn, 'compute' + ) + + # Nova has empty adapter config, so these default + adap = conn.compute + self.assertIsNone(adap.region_name) + self.assertEqual('compute', adap.service_type) + self.assertEqual('public', adap.interface) + self.assertIsNone(adap.endpoint_override) + + s = next(adap.servers()) + self.assertEqual(s.id, server_id) + self.assertEqual(s.name, server_name) + self.assert_calls() + + def test_name_with_dashes(self): + conn = self._get_conn() + + discovery = { + "versions": { + "values": [ + { + "status": "stable", + "id": "v1", + "links": [ + { + "href": "https://example.org:5050/v1", + "rel": "self", + } + ], + } + ] + } + } + status = {'finished': True, 'error': None} + self.register_uris( + [ + dict( + method='GET', + uri='https://example.org:5050', + json=discovery, + ), + # strict-proxies means we're going to fetch the discovery + # doc from the versioned endpoint to verify it works. + dict( + method='GET', + uri='https://example.org:5050/v1', + json=discovery, + ), + dict( + method='GET', + uri='https://example.org:5050/v1/introspection/abcd', + json=status, + ), + ] + ) + + adap = conn.baremetal_introspection + self.assertEqual('baremetal-introspection', adap.service_type) + self.assertEqual('public', adap.interface) + self.assertEqual('https://example.org:5050/v1', adap.endpoint_override) + + self.assertTrue(adap.get_introspection('abcd').is_finished) + + def test_service_not_ready_endpoint_override(self): + conn = self._get_conn() + + discovery = { + "versions": { + "values": [ + { + "status": "stable", + "id": "v1", + "links": [ + { + "href": "https://example.org:5050/v1", + "rel": "self", + } + ], + } + ] + } + } + status = {'finished': True, 'error': None} + self.register_uris( + [ + dict( + method='GET', + uri='https://example.org:5050', + exc=requests.exceptions.ConnectTimeout, + ), + dict( + method='GET', + uri='https://example.org:5050', + json=discovery, + ), + # strict-proxies means we're going to fetch the discovery + # doc from the versioned endpoint to verify it works. + dict( + method='GET', + uri='https://example.org:5050/v1', + json=discovery, + ), + dict( + method='GET', + uri='https://example.org:5050/v1/introspection/abcd', + json=status, + ), + ] + ) + + self.assertRaises( + exceptions.ServiceDiscoveryException, + getattr, + conn, + 'baremetal_introspection', + ) + + adap = conn.baremetal_introspection + self.assertEqual('baremetal-introspection', adap.service_type) + self.assertEqual('public', adap.interface) + self.assertEqual('https://example.org:5050/v1', adap.endpoint_override) + + self.assertTrue(adap.get_introspection('abcd').is_finished) + + def assert_service_disabled( + self, service_type, expected_reason, **from_conf_kwargs + ): + conn = self._get_conn(**from_conf_kwargs) + # The _ServiceDisabledProxyShim loads up okay... + adap = getattr(conn, service_type) + # ...but freaks out if you try to use it. + ex = self.assertRaises( + exceptions.ServiceDisabledException, getattr, adap, 'get' + ) + self.assertIn( + f"Service '{service_type}' is disabled because its configuration " + "could not be loaded.", + ex.message, + ) + self.assertIn(expected_reason, ex.message) + + def test_no_such_conf_section(self): + """No conf section (therefore no adapter opts) for service type.""" + del self.oslo_config_dict['heat'] + self.assert_service_disabled( + 'orchestration', + "No section for project 'heat' (service type 'orchestration') was " + "present in the config.", + ) + + def test_no_such_conf_section_ignore_service_type(self): + """Ignore absent conf section if service type not requested.""" + del self.oslo_config_dict['heat'] + self.assert_service_disabled( + 'orchestration', + "Not in the list of requested service_types.", + # 'orchestration' absent from this list + service_types=['compute'], + ) + + def test_no_adapter_opts(self): + """Conf section present, but opts for service type not registered.""" + self.oslo_config_dict['heat'] = None + self.assert_service_disabled( + 'orchestration', + "Encountered an exception attempting to process config for " + "project 'heat' (service type 'orchestration'): no such option", + ) + + def test_no_adapter_opts_ignore_service_type(self): + """Ignore unregistered conf section if service type not requested.""" + self.oslo_config_dict['heat'] = None + self.assert_service_disabled( + 'orchestration', + "Not in the list of requested service_types.", + # 'orchestration' absent from this list + service_types=['compute'], + ) + + def test_invalid_adapter_opts(self): + """Adapter opts are bogus, in exception-raising ways.""" + self.oslo_config_dict['heat'] = { + 'interface': 'public', + 'valid_interfaces': 'private', + } + self.assert_service_disabled( + 'orchestration', + "Encountered an exception attempting to process config for " + "project 'heat' (service type 'orchestration'): interface and " + "valid_interfaces are mutually exclusive.", + ) + + def test_no_session(self): + # TODO(efried): Currently calling without a Session is not implemented. + self.assertRaises( + exceptions.ConfigException, + cloud_region.from_conf, + self._load_ks_cfg_opts(), + ) + + def test_no_endpoint(self): + """Conf contains adapter opts, but service type not in catalog.""" + self.os_fixture.v3_token.remove_service('monitoring') + conn = self._get_conn() + # Monasca is not in the service catalog + self.assertRaises( + ks_exc.catalog.EndpointNotFound, getattr, conn, 'monitoring' + ) + + def test_no_endpoint_ignore_service_type(self): + """Bogus service type disabled if not in requested service_types.""" + self.assert_service_disabled( + 'monitoring', + "Not in the list of requested service_types.", + # 'monitoring' absent from this list + service_types={'compute', 'orchestration', 'bogus'}, + ) diff --git a/openstack/tests/unit/config/test_from_session.py b/openstack/tests/unit/config/test_from_session.py new file mode 100644 index 0000000000..b96692c0bc --- /dev/null +++ b/openstack/tests/unit/config/test_from_session.py @@ -0,0 +1,60 @@ +# Copyright 2018 Red Hat, Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from testscenarios import load_tests_apply_scenarios as load_tests # noqa + +from openstack.config import cloud_region +from openstack import connection +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestFromSession(base.TestCase): + scenarios = [ + ('no_region', dict(test_region=None)), + ('with_region', dict(test_region='RegionOne')), + ] + + def test_from_session(self): + config = cloud_region.from_session( + self.cloud.session, region_name=self.test_region + ) + self.assertEqual(config.name, 'identity.example.com') + if not self.test_region: + self.assertIsNone(config.region_name) + else: + self.assertEqual(config.region_name, self.test_region) + + server_id = str(uuid.uuid4()) + server_name = self.getUniqueString('name') + fake_server = fakes.make_fake_server(server_id, server_name) + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'detail'] + ), + json={'servers': [fake_server]}, + ), + ] + ) + + conn = connection.Connection(config=config) + s = next(conn.compute.servers()) + self.assertEqual(s.id, server_id) + self.assertEqual(s.name, server_name) + self.assert_calls() diff --git a/openstack/tests/unit/config/test_init.py b/openstack/tests/unit/config/test_init.py new file mode 100644 index 0000000000..c1813feb04 --- /dev/null +++ b/openstack/tests/unit/config/test_init.py @@ -0,0 +1,34 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import argparse + +import openstack.config +from openstack.tests.unit.config import base + + +class TestInit(base.TestCase): + def test_get_cloud_region_without_arg_parser(self): + cloud_region = openstack.config.get_cloud_region( + options=None, validate=False + ) + self.assertIsInstance( + cloud_region, openstack.config.cloud_region.CloudRegion + ) + + def test_get_cloud_region_with_arg_parser(self): + cloud_region = openstack.config.get_cloud_region( + options=argparse.ArgumentParser(), validate=False + ) + self.assertIsInstance( + cloud_region, openstack.config.cloud_region.CloudRegion + ) diff --git a/openstack/tests/unit/config/test_json.py b/openstack/tests/unit/config/test_json.py new file mode 100644 index 0000000000..da3b9d2fbe --- /dev/null +++ b/openstack/tests/unit/config/test_json.py @@ -0,0 +1,68 @@ +# Copyright (c) 2015 Hewlett-Packard Development Company, L.P. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import glob +import json +import os + +import jsonschema +from testtools import content + +from openstack.config import defaults +from openstack.tests.unit.config import base + + +class TestConfig(base.TestCase): + def json_diagnostics(self, exc_info): + self.addDetail('filename', content.text_content(self.filename)) + for error in sorted(self.validator.iter_errors(self.json_data)): + self.addDetail('jsonschema', content.text_content(str(error))) + + def test_defaults_valid_json(self): + _schema_path = os.path.join( + os.path.dirname(os.path.realpath(defaults.__file__)), 'schema.json' + ) + with open(_schema_path) as f: + schema = json.load(f) + self.validator = jsonschema.Draft4Validator(schema) + self.addOnException(self.json_diagnostics) + + self.filename = os.path.join( + os.path.dirname(os.path.realpath(defaults.__file__)), + 'defaults.json', + ) + with open(self.filename) as f: + self.json_data = json.load(f) + + self.assertTrue(self.validator.is_valid(self.json_data)) + + def test_vendors_valid_json(self): + _schema_path = os.path.join( + os.path.dirname(os.path.realpath(defaults.__file__)), + 'vendor-schema.json', + ) + with open(_schema_path) as f: + schema = json.load(f) + self.validator = jsonschema.Draft4Validator(schema) + + self.addOnException(self.json_diagnostics) + + _vendors_path = os.path.join( + os.path.dirname(os.path.realpath(defaults.__file__)), 'vendors' + ) + for self.filename in glob.glob(os.path.join(_vendors_path, '*.json')): + with open(self.filename) as f: + self.json_data = json.load(f) + + self.assertTrue(self.validator.is_valid(self.json_data)) diff --git a/openstack/tests/unit/config/test_loader.py b/openstack/tests/unit/config/test_loader.py new file mode 100644 index 0000000000..710921cfcc --- /dev/null +++ b/openstack/tests/unit/config/test_loader.py @@ -0,0 +1,176 @@ +# Copyright 2020 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import os +import tempfile +import textwrap + +from openstack.config import loader +from openstack import exceptions +from openstack.tests.unit.config import base + +FILES = { + 'yaml': textwrap.dedent( + ''' + foo: bar + baz: + - 1 + - 2 + - 3 + ''' + ), + 'json': textwrap.dedent( + ''' + { + "foo": "bar", + "baz": [ + 1, + 2, + 3 + ] + } + ''' + ), + 'txt': textwrap.dedent( + ''' + foo + bar baz + test + one two + ''' + ), +} + + +class TestLoader(base.TestCase): + def test_base_load_yaml_json_file(self): + with tempfile.TemporaryDirectory() as tmpdir: + tested_files = [] + for key, value in FILES.items(): + fn = os.path.join(tmpdir, f'file.{key}') + with open(fn, 'w+') as fp: + fp.write(value) + tested_files.append(fn) + + path, _ = loader.OpenStackConfig()._load_yaml_json_file( + tested_files + ) + # NOTE(hberaud): Prefer to test path rather than file because + # our FILES var is a dict so results are appened + # without keeping the initial order (python 3.5) + self.assertEqual(tmpdir, os.path.dirname(path)) + + def test__load_yaml_json_file_without_json(self): + with tempfile.TemporaryDirectory() as tmpdir: + tested_files = [] + for key, value in FILES.items(): + if key == 'json': + continue + fn = os.path.join(tmpdir, f'file.{key}') + with open(fn, 'w+') as fp: + fp.write(value) + tested_files.append(fn) + + path, _ = loader.OpenStackConfig()._load_yaml_json_file( + tested_files + ) + # NOTE(hberaud): Prefer to test path rather than file because + # our FILES var is a dict so results are appened + # without keeping the initial order (python 3.5) + self.assertEqual(tmpdir, os.path.dirname(path)) + + def test__load_yaml_json_file_without_json_yaml(self): + with tempfile.TemporaryDirectory() as tmpdir: + tested_files = [] + fn = os.path.join(tmpdir, 'file.txt') + with open(fn, 'w+') as fp: + fp.write(FILES['txt']) + tested_files.append(fn) + + path, _ = loader.OpenStackConfig()._load_yaml_json_file( + tested_files + ) + self.assertEqual(fn, path) + + def test__load_yaml_json_file_without_perm(self): + with tempfile.TemporaryDirectory() as tmpdir: + tested_files = [] + fn = os.path.join(tmpdir, 'file.txt') + with open(fn, 'w+') as fp: + fp.write(FILES['txt']) + os.chmod(fn, 0o222) + tested_files.append(fn) + + path, _ = loader.OpenStackConfig()._load_yaml_json_file( + tested_files + ) + self.assertEqual(None, path) + + def test__load_yaml_json_file_nonexisting(self): + tested_files = [] + fn = os.path.join('/fake', 'file.txt') + tested_files.append(fn) + + path, _ = loader.OpenStackConfig()._load_yaml_json_file(tested_files) + self.assertEqual(None, path) + + +class TestFixArgv(base.TestCase): + def test_no_changes(self): + argv = [ + '-a', + '-b', + '--long-arg', + '--multi-value', + 'key1=value1', + '--multi-value', + 'key2=value2', + ] + expected = argv[:] + loader._fix_argv(argv) + self.assertEqual(expected, argv) + + def test_replace(self): + argv = [ + '-a', + '-b', + '--long-arg', + '--multi_value', + 'key1=value1', + '--multi_value', + 'key2=value2', + ] + expected = [ + '-a', + '-b', + '--long-arg', + '--multi-value', + 'key1=value1', + '--multi-value', + 'key2=value2', + ] + loader._fix_argv(argv) + self.assertEqual(expected, argv) + + def test_mix(self): + argv = [ + '-a', + '-b', + '--long-arg', + '--multi_value', + 'key1=value1', + '--multi-value', + 'key2=value2', + ] + self.assertRaises(exceptions.ConfigException, loader._fix_argv, argv) diff --git a/openstack/tests/unit/container_infrastructure_management/__init__.py b/openstack/tests/unit/container_infrastructure_management/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/container_infrastructure_management/v1/__init__.py b/openstack/tests/unit/container_infrastructure_management/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/container_infrastructure_management/v1/test_cluster.py b/openstack/tests/unit/container_infrastructure_management/v1/test_cluster.py new file mode 100644 index 0000000000..809eeb099b --- /dev/null +++ b/openstack/tests/unit/container_infrastructure_management/v1/test_cluster.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.container_infrastructure_management.v1 import cluster +from openstack.tests.unit import base + +EXAMPLE = { + "cluster_template_id": "0562d357-8641-4759-8fed-8173f02c9633", + "create_timeout": 60, + "discovery_url": None, + "flavor_id": None, + "keypair": "my_keypair", + "labels": {}, + "master_count": 2, + "master_flavor_id": None, + "name": "k8s", + "node_count": 2, +} + + +class TestCluster(base.TestCase): + def test_basic(self): + sot = cluster.Cluster() + self.assertIsNone(sot.resource_key) + self.assertEqual('clusters', sot.resources_key) + self.assertEqual('/clusters', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = cluster.Cluster(**EXAMPLE) + self.assertEqual( + EXAMPLE['cluster_template_id'], + sot.cluster_template_id, + ) + self.assertEqual(EXAMPLE['create_timeout'], sot.create_timeout) + self.assertEqual(EXAMPLE['discovery_url'], sot.discovery_url) + self.assertEqual(EXAMPLE['flavor_id'], sot.flavor_id) + self.assertEqual(EXAMPLE['keypair'], sot.keypair) + self.assertEqual(EXAMPLE['labels'], sot.labels) + self.assertEqual(EXAMPLE['master_count'], sot.master_count) + self.assertEqual(EXAMPLE['master_flavor_id'], sot.master_flavor_id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['node_count'], sot.node_count) diff --git a/openstack/tests/unit/container_infrastructure_management/v1/test_cluster_certificate.py b/openstack/tests/unit/container_infrastructure_management/v1/test_cluster_certificate.py new file mode 100644 index 0000000000..6435b9bbc4 --- /dev/null +++ b/openstack/tests/unit/container_infrastructure_management/v1/test_cluster_certificate.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.container_infrastructure_management.v1 import ( + cluster_certificate, +) +from openstack.tests.unit import base + +EXAMPLE = { + "cluster_uuid": "0b4b766f-1500-44b3-9804-5a6e12fe6df4", + "pem": "-----BEGIN CERTIFICATE-----\nMIICzDCCAbSgAwIBAgIQOOkVcEN7TNa9E80G", + "bay_uuid": "0b4b766f-1500-44b3-9804-5a6e12fe6df4", + "csr": "-----BEGIN CERTIFICATE REQUEST-----\nMIIEfzCCAmcCAQAwFDESMBAGA1UE", +} + + +class TestClusterCertificate(base.TestCase): + def test_basic(self): + sot = cluster_certificate.ClusterCertificate() + self.assertIsNone(sot.resource_key) + self.assertEqual('/certificates', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) + + def test_make_it(self): + sot = cluster_certificate.ClusterCertificate(**EXAMPLE) + + self.assertEqual(EXAMPLE['cluster_uuid'], sot.cluster_uuid) + self.assertEqual(EXAMPLE['bay_uuid'], sot.bay_uuid) + self.assertEqual(EXAMPLE['csr'], sot.csr) + self.assertEqual(EXAMPLE['pem'], sot.pem) diff --git a/openstack/tests/unit/container_infrastructure_management/v1/test_cluster_template.py b/openstack/tests/unit/container_infrastructure_management/v1/test_cluster_template.py new file mode 100644 index 0000000000..d291a85688 --- /dev/null +++ b/openstack/tests/unit/container_infrastructure_management/v1/test_cluster_template.py @@ -0,0 +1,102 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.container_infrastructure_management.v1 import cluster_template +from openstack.tests.unit import base + +EXAMPLE = { + "insecure_registry": None, + "http_proxy": "http://10.164.177.169:8080", + "updated_at": None, + "floating_ip_enabled": True, + "fixed_subnet": None, + "master_flavor_id": None, + "uuid": "085e1c4d-4f68-4bfd-8462-74b9e14e4f39", + "no_proxy": "10.0.0.0/8,172.0.0.0/8,192.0.0.0/8,localhost", + "https_proxy": "http://10.164.177.169:8080", + "tls_disabled": False, + "keypair_id": "kp", + "public": False, + "labels": {}, + "docker_volume_size": 3, + "server_type": "vm", + "external_network_id": "public", + "cluster_distro": "fedora-atomic", + "image_id": "fedora-atomic-latest", + "volume_driver": "cinder", + "registry_enabled": False, + "docker_storage_driver": "devicemapper", + "apiserver_port": None, + "name": "k8s-bm2", + "created_at": "2016-08-29T02:08:08+00:00", + "network_driver": "flannel", + "fixed_network": None, + "coe": "kubernetes", + "flavor_id": "m1.small", + "master_lb_enabled": True, + "dns_nameserver": "8.8.8.8", + "hidden": True, +} + + +class TestClusterTemplate(base.TestCase): + def test_basic(self): + sot = cluster_template.ClusterTemplate() + self.assertIsNone(sot.resource_key) + self.assertEqual('clustertemplates', sot.resources_key) + self.assertEqual('/clustertemplates', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = cluster_template.ClusterTemplate(**EXAMPLE) + + self.assertEqual(EXAMPLE['apiserver_port'], sot.apiserver_port) + self.assertEqual(EXAMPLE['cluster_distro'], sot.cluster_distro) + self.assertEqual(EXAMPLE['coe'], sot.coe) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual( + EXAMPLE['docker_storage_driver'], sot.docker_storage_driver + ) + self.assertEqual(EXAMPLE['docker_volume_size'], sot.docker_volume_size) + self.assertEqual(EXAMPLE['dns_nameserver'], sot.dns_nameserver) + self.assertEqual( + EXAMPLE['external_network_id'], sot.external_network_id + ) + self.assertEqual(EXAMPLE['fixed_network'], sot.fixed_network) + self.assertEqual(EXAMPLE['fixed_subnet'], sot.fixed_subnet) + self.assertEqual(EXAMPLE['flavor_id'], sot.flavor_id) + self.assertEqual(EXAMPLE['http_proxy'], sot.http_proxy) + self.assertEqual(EXAMPLE['https_proxy'], sot.https_proxy) + self.assertEqual(EXAMPLE['image_id'], sot.image_id) + self.assertEqual(EXAMPLE['insecure_registry'], sot.insecure_registry) + self.assertEqual( + EXAMPLE['floating_ip_enabled'], sot.is_floating_ip_enabled + ) + self.assertEqual(EXAMPLE['hidden'], sot.is_hidden) + self.assertEqual( + EXAMPLE['master_lb_enabled'], sot.is_master_lb_enabled + ) + self.assertEqual(EXAMPLE['tls_disabled'], sot.is_tls_disabled) + self.assertEqual(EXAMPLE['public'], sot.is_public) + self.assertEqual(EXAMPLE['registry_enabled'], sot.is_registry_enabled) + self.assertEqual(EXAMPLE['keypair_id'], sot.keypair_id) + self.assertEqual(EXAMPLE['master_flavor_id'], sot.master_flavor_id) + self.assertEqual(EXAMPLE['network_driver'], sot.network_driver) + self.assertEqual(EXAMPLE['no_proxy'], sot.no_proxy) + self.assertEqual(EXAMPLE['server_type'], sot.server_type) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE['uuid'], sot.uuid) + self.assertEqual(EXAMPLE['volume_driver'], sot.volume_driver) diff --git a/openstack/tests/unit/container_infrastructure_management/v1/test_proxy.py b/openstack/tests/unit/container_infrastructure_management/v1/test_proxy.py new file mode 100644 index 0000000000..7724f3313c --- /dev/null +++ b/openstack/tests/unit/container_infrastructure_management/v1/test_proxy.py @@ -0,0 +1,123 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.container_infrastructure_management.v1 import ( + cluster_certificate, +) +from openstack.container_infrastructure_management.v1 import _proxy +from openstack.container_infrastructure_management.v1 import cluster +from openstack.container_infrastructure_management.v1 import cluster_template +from openstack.container_infrastructure_management.v1 import service +from openstack.tests.unit import test_proxy_base + + +class TestMagnumProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + +class TestCluster(TestMagnumProxy): + def test_cluster_get(self): + self.verify_get(self.proxy.get_cluster, cluster.Cluster) + + def test_cluster_find(self): + self.verify_find( + self.proxy.find_cluster, + cluster.Cluster, + method_kwargs={}, + expected_kwargs={}, + ) + + def test_clusters(self): + self.verify_list( + self.proxy.clusters, + cluster.Cluster, + method_kwargs={"query": 1}, + expected_kwargs={"query": 1}, + ) + + def test_cluster_create_attrs(self): + self.verify_create(self.proxy.create_cluster, cluster.Cluster) + + def test_cluster_delete(self): + self.verify_delete(self.proxy.delete_cluster, cluster.Cluster, False) + + def test_cluster_delete_ignore(self): + self.verify_delete(self.proxy.delete_cluster, cluster.Cluster, True) + + +class TestClusterCertificate(TestMagnumProxy): + def test_cluster_certificate_get(self): + self.verify_get( + self.proxy.get_cluster_certificate, + cluster_certificate.ClusterCertificate, + ) + + def test_cluster_certificate_create_attrs(self): + self.verify_create( + self.proxy.create_cluster_certificate, + cluster_certificate.ClusterCertificate, + ) + + +class TestClusterTemplate(TestMagnumProxy): + def test_cluster_template_get(self): + self.verify_get( + self.proxy.get_cluster_template, cluster_template.ClusterTemplate + ) + + def test_cluster_template_find(self): + self.verify_find( + self.proxy.find_cluster_template, + cluster_template.ClusterTemplate, + method_kwargs={}, + expected_kwargs={}, + ) + + def test_cluster_templates(self): + self.verify_list( + self.proxy.cluster_templates, + cluster_template.ClusterTemplate, + method_kwargs={"query": 1}, + expected_kwargs={"query": 1}, + ) + + def test_cluster_template_create_attrs(self): + self.verify_create( + self.proxy.create_cluster_template, + cluster_template.ClusterTemplate, + ) + + def test_cluster_template_delete(self): + self.verify_delete( + self.proxy.delete_cluster_template, + cluster_template.ClusterTemplate, + False, + ) + + def test_cluster_template_delete_ignore(self): + self.verify_delete( + self.proxy.delete_cluster_template, + cluster_template.ClusterTemplate, + True, + ) + + +class TestService(TestMagnumProxy): + def test_services(self): + self.verify_list( + self.proxy.services, + service.Service, + method_kwargs={}, + expected_kwargs={}, + ) diff --git a/openstack/tests/unit/container_infrastructure_management/v1/test_service.py b/openstack/tests/unit/container_infrastructure_management/v1/test_service.py new file mode 100644 index 0000000000..4ede15e09a --- /dev/null +++ b/openstack/tests/unit/container_infrastructure_management/v1/test_service.py @@ -0,0 +1,49 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.container_infrastructure_management.v1 import service +from openstack.tests.unit import base + +EXAMPLE = { + "binary": "magnum-conductor", + "created_at": "2016-08-23T10:52:13+00:00", + "state": "up", + "report_count": 2179, + "updated_at": "2016-08-25T01:13:16+00:00", + "host": "magnum-manager", + "disabled_reason": None, + "id": 1, +} + + +class TestService(base.TestCase): + def test_basic(self): + sot = service.Service() + self.assertIsNone(sot.resource_key) + self.assertEqual('mservices', sot.resources_key) + self.assertEqual('/mservices', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = service.Service(**EXAMPLE) + + self.assertEqual(EXAMPLE['binary'], sot.binary) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['disabled_reason'], sot.disabled_reason) + self.assertEqual(EXAMPLE['host'], sot.host) + self.assertEqual(EXAMPLE['report_count'], sot.report_count) + self.assertEqual(EXAMPLE['state'], sot.state) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/database/test_database_service.py b/openstack/tests/unit/database/test_database_service.py deleted file mode 100644 index 6793acd435..0000000000 --- a/openstack/tests/unit/database/test_database_service.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.database import database_service - - -class TestDatabaseService(testtools.TestCase): - - def test_service(self): - sot = database_service.DatabaseService() - self.assertEqual('database', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(1, len(sot.valid_versions)) - self.assertEqual('v1', sot.valid_versions[0].module) - self.assertEqual('v1', sot.valid_versions[0].path) diff --git a/openstack/tests/unit/database/v1/test_database.py b/openstack/tests/unit/database/v1/test_database.py index 345780d071..95781838c1 100644 --- a/openstack/tests/unit/database/v1/test_database.py +++ b/openstack/tests/unit/database/v1/test_database.py @@ -10,9 +10,8 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.database.v1 import database +from openstack.tests.unit import base IDENTIFIER = 'NAME' @@ -25,25 +24,24 @@ } -class TestDatabase(testtools.TestCase): - +class TestDatabase(base.TestCase): def test_basic(self): sot = database.Database() self.assertEqual('database', sot.resource_key) self.assertEqual('databases', sot.resources_key) path = '/instances/%(instance_id)s/databases' self.assertEqual(path, sot.base_path) - self.assertEqual('database', sot.service.service_type) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) def test_make_it(self): - sot = database.Database(EXAMPLE) + sot = database.Database(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE['character_set'], sot.character_set) self.assertEqual(EXAMPLE['collate'], sot.collate) self.assertEqual(EXAMPLE['instance_id'], sot.instance_id) self.assertEqual(IDENTIFIER, sot.name) + self.assertEqual(IDENTIFIER, sot.id) diff --git a/openstack/tests/unit/database/v1/test_flavor.py b/openstack/tests/unit/database/v1/test_flavor.py index 180ffb0dde..a6dacb4bf4 100644 --- a/openstack/tests/unit/database/v1/test_flavor.py +++ b/openstack/tests/unit/database/v1/test_flavor.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.database.v1 import flavor +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,22 +23,20 @@ } -class TestFlavor(testtools.TestCase): - +class TestFlavor(base.TestCase): def test_basic(self): sot = flavor.Flavor() self.assertEqual('flavor', sot.resource_key) self.assertEqual('flavors', sot.resources_key) self.assertEqual('/flavors', sot.base_path) - self.assertEqual('database', sot.service.service_type) self.assertTrue(sot.allow_list) self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_retrieve) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) def test_make_it(self): - sot = flavor.Flavor(EXAMPLE) + sot = flavor.Flavor(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) diff --git a/openstack/tests/unit/database/v1/test_instance.py b/openstack/tests/unit/database/v1/test_instance.py index 13687d17e7..6a0d6d8798 100644 --- a/openstack/tests/unit/database/v1/test_instance.py +++ b/openstack/tests/unit/database/v1/test_instance.py @@ -10,10 +10,10 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +from unittest import mock from openstack.database.v1 import instance +from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,34 +23,42 @@ 'name': '4', 'status': '5', 'volume': '6', + 'datastore': {'7': 'seven'}, + 'region': '8', + 'hostname': '9', + 'created': '10', + 'updated': '11', } -class TestInstance(testtools.TestCase): - +class TestInstance(base.TestCase): def test_basic(self): sot = instance.Instance() self.assertEqual('instance', sot.resource_key) self.assertEqual('instances', sot.resources_key) self.assertEqual('/instances', sot.base_path) - self.assertEqual('database', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_retrieve) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): - sot = instance.Instance(EXAMPLE) + sot = instance.Instance(**EXAMPLE) self.assertEqual(EXAMPLE['flavor'], sot.flavor) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['volume'], sot.volume) + self.assertEqual(EXAMPLE['datastore'], sot.datastore) + self.assertEqual(EXAMPLE['region'], sot.region) + self.assertEqual(EXAMPLE['hostname'], sot.hostname) + self.assertEqual(EXAMPLE['created'], sot.created_at) + self.assertEqual(EXAMPLE['updated'], sot.updated_at) def test_enable_root_user(self): - sot = instance.Instance(EXAMPLE) + sot = instance.Instance(**EXAMPLE) response = mock.Mock() response.body = {'user': {'name': 'root', 'password': 'foo'}} response.json = mock.Mock(return_value=response.body) @@ -59,11 +67,13 @@ def test_enable_root_user(self): self.assertEqual(response.body['user'], sot.enable_root_user(sess)) - url = ("instances/%s/root" % IDENTIFIER) - sess.post.assert_called_with(url, endpoint_filter=sot.service) + url = f"instances/{IDENTIFIER}/root" + sess.post.assert_called_with( + url, + ) def test_is_root_enabled(self): - sot = instance.Instance(EXAMPLE) + sot = instance.Instance(**EXAMPLE) response = mock.Mock() response.body = {'rootEnabled': True} response.json = mock.Mock(return_value=response.body) @@ -72,11 +82,13 @@ def test_is_root_enabled(self): self.assertTrue(sot.is_root_enabled(sess)) - url = ("instances/%s/root" % IDENTIFIER) - sess.get.assert_called_with(url, endpoint_filter=sot.service) + url = f"instances/{IDENTIFIER}/root" + sess.get.assert_called_with( + url, + ) def test_action_restart(self): - sot = instance.Instance(EXAMPLE) + sot = instance.Instance(**EXAMPLE) response = mock.Mock() response.json = mock.Mock(return_value='') sess = mock.Mock() @@ -84,13 +96,12 @@ def test_action_restart(self): self.assertIsNone(sot.restart(sess)) - url = ("instances/%s/action" % IDENTIFIER) - body = {'restart': {}} - sess.post.assert_called_with(url, endpoint_filter=sot.service, - json=body) + url = f"instances/{IDENTIFIER}/action" + body = {'restart': None} + sess.post.assert_called_with(url, json=body) def test_action_resize(self): - sot = instance.Instance(EXAMPLE) + sot = instance.Instance(**EXAMPLE) response = mock.Mock() response.json = mock.Mock(return_value='') sess = mock.Mock() @@ -99,13 +110,12 @@ def test_action_resize(self): self.assertIsNone(sot.resize(sess, flavor)) - url = ("instances/%s/action" % IDENTIFIER) + url = f"instances/{IDENTIFIER}/action" body = {'resize': {'flavorRef': flavor}} - sess.post.assert_called_with(url, endpoint_filter=sot.service, - json=body) + sess.post.assert_called_with(url, json=body) def test_action_resize_volume(self): - sot = instance.Instance(EXAMPLE) + sot = instance.Instance(**EXAMPLE) response = mock.Mock() response.json = mock.Mock(return_value='') sess = mock.Mock() @@ -114,7 +124,6 @@ def test_action_resize_volume(self): self.assertIsNone(sot.resize_volume(sess, size)) - url = ("instances/%s/action" % IDENTIFIER) + url = f"instances/{IDENTIFIER}/action" body = {'resize': {'volume': size}} - sess.post.assert_called_with(url, endpoint_filter=sot.service, - json=body) + sess.post.assert_called_with(url, json=body) diff --git a/openstack/tests/unit/database/v1/test_proxy.py b/openstack/tests/unit/database/v1/test_proxy.py index 41d2ce13a4..e624edfc75 100644 --- a/openstack/tests/unit/database/v1/test_proxy.py +++ b/openstack/tests/unit/database/v1/test_proxy.py @@ -20,26 +20,55 @@ class TestDatabaseProxy(test_proxy_base.TestProxyBase): def setUp(self): - super(TestDatabaseProxy, self).setUp() + super().setUp() self.proxy = _proxy.Proxy(self.session) def test_database_create_attrs(self): - self.verify_create(self.proxy.create_database, database.Database) + self.verify_create( + self.proxy.create_database, + database.Database, + method_kwargs={"instance": "id"}, + expected_kwargs={"instance_id": "id"}, + ) def test_database_delete(self): - self.verify_delete(self.proxy.delete_database, - database.Database, False) + self.verify_delete( + self.proxy.delete_database, + database.Database, + ignore_missing=False, + method_kwargs={"instance": "test_id"}, + expected_kwargs={"instance_id": "test_id"}, + ) def test_database_delete_ignore(self): - self.verify_delete(self.proxy.delete_database, - database.Database, True) + self.verify_delete( + self.proxy.delete_database, + database.Database, + ignore_missing=True, + method_kwargs={"instance": "test_id"}, + expected_kwargs={"instance_id": "test_id"}, + ) def test_database_find(self): - self.verify_find(self.proxy.find_database, database.Database) + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_database, + method_args=["db", "instance"], + expected_args=[database.Database, "db"], + expected_kwargs={ + "instance_id": "instance", + "ignore_missing": True, + }, + ) def test_databases(self): - self.verify_list(self.proxy.databases, database.Database, - paginated=False) + self.verify_list( + self.proxy.databases, + database.Database, + method_args=["id"], + expected_args=[], + expected_kwargs={"instance_id": "id"}, + ) def test_database_get(self): self.verify_get(self.proxy.get_database, database.Database) @@ -51,19 +80,18 @@ def test_flavor_get(self): self.verify_get(self.proxy.get_flavor, flavor.Flavor) def test_flavors(self): - self.verify_list(self.proxy.flavors, flavor.Flavor, - paginated=False) + self.verify_list(self.proxy.flavors, flavor.Flavor) def test_instance_create_attrs(self): self.verify_create(self.proxy.create_instance, instance.Instance) def test_instance_delete(self): - self.verify_delete(self.proxy.delete_instance, - instance.Instance, False) + self.verify_delete( + self.proxy.delete_instance, instance.Instance, False + ) def test_instance_delete_ignore(self): - self.verify_delete(self.proxy.delete_instance, - instance.Instance, True) + self.verify_delete(self.proxy.delete_instance, instance.Instance, True) def test_instance_find(self): self.verify_find(self.proxy.find_instance, instance.Instance) @@ -72,26 +100,57 @@ def test_instance_get(self): self.verify_get(self.proxy.get_instance, instance.Instance) def test_instances(self): - self.verify_list(self.proxy.instances, instance.Instance, - paginated=False) + self.verify_list(self.proxy.instances, instance.Instance) def test_instance_update(self): self.verify_update(self.proxy.update_instance, instance.Instance) def test_user_create_attrs(self): - self.verify_create(self.proxy.create_user, user.User) + self.verify_create( + self.proxy.create_user, + user.User, + method_kwargs={"instance": "id"}, + expected_kwargs={"instance_id": "id"}, + ) def test_user_delete(self): - self.verify_delete(self.proxy.delete_user, user.User, False) + self.verify_delete( + self.proxy.delete_user, + user.User, + False, + method_kwargs={"instance": "id"}, + expected_kwargs={"instance_id": "id"}, + ) def test_user_delete_ignore(self): - self.verify_delete(self.proxy.delete_user, user.User, True) + self.verify_delete( + self.proxy.delete_user, + user.User, + True, + method_kwargs={"instance": "id"}, + expected_kwargs={"instance_id": "id"}, + ) def test_user_find(self): - self.verify_find(self.proxy.find_user, user.User) + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_user, + method_args=["user", "instance"], + expected_args=[user.User, "user"], + expected_kwargs={ + "instance_id": "instance", + "ignore_missing": True, + }, + ) def test_users(self): - self.verify_list(self.proxy.users, user.User, paginated=False) + self.verify_list( + self.proxy.users, + user.User, + method_args=["test_instance"], + expected_args=[], + expected_kwargs={"instance_id": "test_instance"}, + ) def test_user_get(self): self.verify_get(self.proxy.get_user, user.User) diff --git a/openstack/tests/unit/database/v1/test_user.py b/openstack/tests/unit/database/v1/test_user.py index 85e05bf23e..7d5ce73ae4 100644 --- a/openstack/tests/unit/database/v1/test_user.py +++ b/openstack/tests/unit/database/v1/test_user.py @@ -10,10 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools - from openstack.database.v1 import user +from openstack.tests.unit import base + INSTANCE_ID = 'INSTANCE_ID' @@ -22,48 +21,29 @@ 'name': '2', 'password': '3', } -EXISTING = { - 'databases': '1', - 'name': '2', -} -class TestUser(testtools.TestCase): - +class TestUser(base.TestCase): def test_basic(self): sot = user.User() self.assertEqual('user', sot.resource_key) self.assertEqual('users', sot.resources_key) self.assertEqual('/instances/%(instance_id)s/users', sot.base_path) - self.assertEqual('database', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make(self): - sot = user.User(CREATING) + sot = user.User(**CREATING) self.assertEqual(CREATING['name'], sot.id) self.assertEqual(CREATING['databases'], sot.databases) self.assertEqual(CREATING['name'], sot.name) + self.assertEqual(CREATING['name'], sot.id) self.assertEqual(CREATING['password'], sot.password) - def test_existing(self): - sot = user.User(EXISTING) - self.assertEqual(EXISTING['name'], sot.id) - self.assertEqual(EXISTING['databases'], sot.databases) - self.assertEqual(EXISTING['name'], sot.name) - self.assertIsNone(sot.password) - def test_create(self): - sess = mock.Mock() - resp = mock.Mock() - sess.post = mock.Mock(return_value=resp) - path_args = {'instance_id': INSTANCE_ID} - url = '/instances/%(instance_id)s/users' % path_args - payload = {'users': [CREATING]} - - user.User.create_by_id(sess, CREATING, path_args=path_args) - sess.post.assert_called_with(url, endpoint_filter=user.User.service, - json=payload) + sot = user.User(instance_id=INSTANCE_ID, **CREATING) + result = sot._prepare_request() + self.assertEqual(result.body, {sot.resources_key: CREATING}) diff --git a/openstack/tests/unit/dns/__init__.py b/openstack/tests/unit/dns/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/dns/test_version.py b/openstack/tests/unit/dns/test_version.py new file mode 100644 index 0000000000..9d24243498 --- /dev/null +++ b/openstack/tests/unit/dns/test_version.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns import version +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'links': '2', + 'status': '3', +} + + +class TestVersion(base.TestCase): + def test_basic(self): + sot = version.Version() + self.assertEqual('version', sot.resource_key) + self.assertEqual('versions', sot.resources_key) + self.assertEqual('/', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = version.Version(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['status'], sot.status) diff --git a/openstack/tests/unit/dns/v2/__init__.py b/openstack/tests/unit/dns/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/dns/v2/test_blacklist.py b/openstack/tests/unit/dns/v2/test_blacklist.py new file mode 100644 index 0000000000..e7a166d3fd --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_blacklist.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import blacklist +from openstack.tests.unit import base + +IDENTIFIER = '373cb85e-0f4a-487a-846e-dce7a65cca4d' +EXAMPLE = { + 'id': IDENTIFIER, + 'description': 'blacklist test description', + 'pattern': '.*example.com.', +} + + +class TestBlackList(base.TestCase): + def test_basic(self): + sot = blacklist.Blacklist() + self.assertEqual(None, sot.resource_key) + self.assertEqual('blacklists', sot.resources_key) + self.assertEqual('/blacklists', sot.base_path) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertEqual('PATCH', sot.commit_method) + + def test_make_it(self): + sot = blacklist.Blacklist(**EXAMPLE) + self.assertEqual(IDENTIFIER, sot.id) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['pattern'], sot.pattern) diff --git a/openstack/tests/unit/dns/v2/test_floating_ip.py b/openstack/tests/unit/dns/v2/test_floating_ip.py new file mode 100644 index 0000000000..18312dbddd --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_floating_ip.py @@ -0,0 +1,52 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import floating_ip as fip +from openstack.tests.unit import base + + +IDENTIFIER = 'RegionOne:id' +EXAMPLE = { + 'status': 'PENDING', + 'ptrdname': 'smtp.example.com.', + 'description': 'This is a floating ip for 127.0.0.1', + 'links': {'self': 'dummylink/reverse/floatingips/RegionOne:id'}, + 'ttl': 600, + 'address': '172.24.4.10', + 'action': 'CREATE', + 'id': IDENTIFIER, +} + + +class TestFloatingIP(base.TestCase): + def test_basic(self): + sot = fip.FloatingIP() + self.assertEqual(None, sot.resource_key) + self.assertEqual('floatingips', sot.resources_key) + self.assertEqual('/reverse/floatingips', sot.base_path) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertFalse(sot.allow_delete) + + self.assertEqual('PATCH', sot.commit_method) + + def test_make_it(self): + sot = fip.FloatingIP(**EXAMPLE) + self.assertEqual(IDENTIFIER, sot.id) + self.assertEqual(EXAMPLE['ptrdname'], sot.ptrdname) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['ttl'], sot.ttl) + self.assertEqual(EXAMPLE['address'], sot.address) + self.assertEqual(EXAMPLE['action'], sot.action) + self.assertEqual(EXAMPLE['status'], sot.status) diff --git a/openstack/tests/unit/dns/v2/test_limit.py b/openstack/tests/unit/dns/v2/test_limit.py new file mode 100644 index 0000000000..e8ace272b0 --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_limit.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import limit as _limit +from openstack.tests.unit import base + +IDENTIFIER = 'limit' +EXAMPLE = { + "max_page_limit": 1000, + "max_recordset_name_length": 255, + "max_recordset_records": 20, + "max_zone_name_length": 255, + "max_zone_records": 500, + "max_zone_recordsets": 500, + "max_zones": 10, + "min_ttl": 100, +} + + +class TestLimit(base.TestCase): + def test_basic(self): + sot = _limit.Limit() + self.assertEqual('limit', sot.resource_key) + self.assertEqual(None, sot.resources_key) + self.assertEqual('/limits', sot.base_path) + self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/dns/v2/test_proxy.py b/openstack/tests/unit/dns/v2/test_proxy.py new file mode 100644 index 0000000000..90cedb79d0 --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_proxy.py @@ -0,0 +1,454 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import _proxy +from openstack.dns.v2 import blacklist +from openstack.dns.v2 import floating_ip +from openstack.dns.v2 import quota +from openstack.dns.v2 import recordset +from openstack.dns.v2 import service_status +from openstack.dns.v2 import tld +from openstack.dns.v2 import tsigkey +from openstack.dns.v2 import zone +from openstack.dns.v2 import zone_export +from openstack.dns.v2 import zone_import +from openstack.dns.v2 import zone_nameserver +from openstack.dns.v2 import zone_share +from openstack.dns.v2 import zone_transfer +from openstack.tests.unit import test_proxy_base + + +class TestDnsProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + +class TestDnsZone(TestDnsProxy): + def test_zone_create(self): + self.verify_create( + self.proxy.create_zone, + zone.Zone, + method_kwargs={'name': 'id'}, + expected_kwargs={'name': 'id', 'prepend_key': False}, + ) + + def test_zone_delete(self): + self.verify_delete( + self.proxy.delete_zone, + zone.Zone, + True, + expected_kwargs={'ignore_missing': True, 'delete_shares': False}, + ) + + def test_zone_find(self): + self.verify_find(self.proxy.find_zone, zone.Zone) + + def test_zone_get(self): + self.verify_get(self.proxy.get_zone, zone.Zone) + + def test_zones(self): + self.verify_list(self.proxy.zones, zone.Zone) + + def test_zone_update(self): + self.verify_update(self.proxy.update_zone, zone.Zone) + + def test_zone_abandon(self): + self._verify( + "openstack.dns.v2.zone.Zone.abandon", + self.proxy.abandon_zone, + method_args=[{'zone': 'id'}], + expected_args=[self.proxy], + ) + + def test_zone_xfr(self): + self._verify( + "openstack.dns.v2.zone.Zone.xfr", + self.proxy.xfr_zone, + method_args=[{'zone': 'id'}], + expected_args=[self.proxy], + ) + + +class TestDnsZoneNameserver(TestDnsProxy): + def test_get_zone_nameservers(self): + self.verify_list( + self.proxy.zone_nameservers, + zone_nameserver.ZoneNameserver, + method_kwargs={'zone': 'id'}, + expected_kwargs={'zone_id': 'id'}, + ) + + +class TestDnsRecordset(TestDnsProxy): + def test_recordset_create(self): + self.verify_create( + self.proxy.create_recordset, + recordset.Recordset, + method_kwargs={'zone': 'id'}, + expected_kwargs={'zone_id': 'id', 'prepend_key': False}, + ) + + def test_recordset_delete(self): + self.verify_delete( + self.proxy.delete_recordset, recordset.Recordset, True + ) + + def test_recordset_update(self): + self.verify_update(self.proxy.update_recordset, recordset.Recordset) + + def test_recordset_get(self): + self.verify_get( + self.proxy.get_recordset, + recordset.Recordset, + method_kwargs={'zone': 'zid'}, + expected_kwargs={'zone_id': 'zid'}, + ) + + def test_recordsets(self): + self.verify_list( + self.proxy.recordsets, + recordset.Recordset, + expected_kwargs={'base_path': '/recordsets'}, + ) + + def test_recordsets_zone(self): + self.verify_list( + self.proxy.recordsets, + recordset.Recordset, + method_kwargs={'zone': 'zid'}, + expected_kwargs={'zone_id': 'zid'}, + ) + + def test_recordset_find(self): + self._verify( + "openstack.proxy.Proxy._find", + self.proxy.find_recordset, + method_args=['zone', 'rs'], + method_kwargs={}, + expected_args=[recordset.Recordset, 'rs'], + expected_kwargs={'ignore_missing': True, 'zone_id': 'zone'}, + ) + + +class TestDnsFloatIP(TestDnsProxy): + def test_floating_ips(self): + self.verify_list(self.proxy.floating_ips, floating_ip.FloatingIP) + + def test_floating_ip_get(self): + self.verify_get(self.proxy.get_floating_ip, floating_ip.FloatingIP) + + def test_floating_ip_update(self): + self.verify_update( + self.proxy.update_floating_ip, floating_ip.FloatingIP + ) + + def test_floating_ip_unset(self): + self._verify( + 'openstack.proxy.Proxy._update', + self.proxy.unset_floating_ip, + method_args=['value'], + method_kwargs={}, + expected_args=[floating_ip.FloatingIP, 'value'], + expected_kwargs={'ptrdname': None}, + ) + + +class TestDnsZoneImport(TestDnsProxy): + def test_zone_import_delete(self): + self.verify_delete( + self.proxy.delete_zone_import, zone_import.ZoneImport, True + ) + + def test_zone_import_get(self): + self.verify_get(self.proxy.get_zone_import, zone_import.ZoneImport) + + def test_zone_imports(self): + self.verify_list(self.proxy.zone_imports, zone_import.ZoneImport) + + def test_zone_import_create(self): + self.verify_create( + self.proxy.create_zone_import, + zone_import.ZoneImport, + method_kwargs={'name': 'id'}, + expected_kwargs={'name': 'id', 'prepend_key': False}, + ) + + +class TestDnsZoneExport(TestDnsProxy): + def test_zone_export_delete(self): + self.verify_delete( + self.proxy.delete_zone_export, zone_export.ZoneExport, True + ) + + def test_zone_export_get(self): + self.verify_get(self.proxy.get_zone_export, zone_export.ZoneExport) + + def test_zone_export_get_text(self): + self.verify_get( + self.proxy.get_zone_export_text, + zone_export.ZoneExport, + method_args=[{'id': 'zone_export_id_value'}], + expected_kwargs={'base_path': '/zones/tasks/export/%(id)s/export'}, + ) + + def test_zone_exports(self): + self.verify_list(self.proxy.zone_exports, zone_export.ZoneExport) + + def test_zone_export_create(self): + self.verify_create( + self.proxy.create_zone_export, + zone_export.ZoneExport, + method_args=[{'id': 'zone_id_value'}], + method_kwargs={'name': 'id'}, + expected_args=[], + expected_kwargs={ + 'name': 'id', + 'zone_id': 'zone_id_value', + 'prepend_key': False, + }, + ) + + +class TestDnsZoneTransferRequest(TestDnsProxy): + def test_zone_transfer_request_delete(self): + self.verify_delete( + self.proxy.delete_zone_transfer_request, + zone_transfer.ZoneTransferRequest, + True, + ) + + def test_zone_transfer_request_get(self): + self.verify_get( + self.proxy.get_zone_transfer_request, + zone_transfer.ZoneTransferRequest, + ) + + def test_zone_transfer_requests(self): + self.verify_list( + self.proxy.zone_transfer_requests, + zone_transfer.ZoneTransferRequest, + ) + + def test_zone_transfer_request_create(self): + self.verify_create( + self.proxy.create_zone_transfer_request, + zone_transfer.ZoneTransferRequest, + method_args=[{'id': 'zone_id_value'}], + method_kwargs={'name': 'id'}, + expected_args=[], + expected_kwargs={ + 'name': 'id', + 'zone_id': 'zone_id_value', + 'prepend_key': False, + }, + ) + + def test_zone_transfer_request_update(self): + self.verify_update( + self.proxy.update_zone_transfer_request, + zone_transfer.ZoneTransferRequest, + ) + + +class TestDnsZoneTransferAccept(TestDnsProxy): + def test_zone_transfer_accept_get(self): + self.verify_get( + self.proxy.get_zone_transfer_accept, + zone_transfer.ZoneTransferAccept, + ) + + def test_zone_transfer_accepts(self): + self.verify_list( + self.proxy.zone_transfer_accepts, zone_transfer.ZoneTransferAccept + ) + + def test_zone_transfer_accept_create(self): + self.verify_create( + self.proxy.create_zone_transfer_accept, + zone_transfer.ZoneTransferAccept, + ) + + +class TestDnsZoneShare(TestDnsProxy): + def test_zone_share_create(self): + self.verify_create( + self.proxy.create_zone_share, + zone_share.ZoneShare, + method_kwargs={'zone': 'bogus_id'}, + expected_kwargs={'zone_id': 'bogus_id'}, + ) + + def test_zone_share_delete(self): + self.verify_delete( + self.proxy.delete_zone_share, + zone_share.ZoneShare, + ignore_missing=True, + method_args={'zone': 'bogus_id', 'zone_share': 'bogus_id'}, + expected_args=['zone_share'], + expected_kwargs={'zone_id': 'zone', 'ignore_missing': True}, + ) + + def test_zone_share_find(self): + self.verify_find( + self.proxy.find_zone_share, + zone_share.ZoneShare, + method_args=['zone'], + expected_args=['zone'], + expected_kwargs={ + 'zone_id': 'resource_name', + 'ignore_missing': True, + }, + ) + + def test_zone_share_get(self): + self.verify_get( + self.proxy.get_zone_share, + zone_share.ZoneShare, + method_args=['zone', 'zone_share'], + expected_args=['zone_share'], + expected_kwargs={'zone_id': 'zone'}, + ) + + def test_zone_shares(self): + self.verify_list( + self.proxy.zone_shares, + zone_share.ZoneShare, + method_args=['zone'], + expected_args=[], + expected_kwargs={'zone_id': 'zone'}, + ) + + +class TestDnsServiceStatus(TestDnsProxy): + def test_service_statuses(self): + self.verify_list( + self.proxy.service_statuses, service_status.ServiceStatus + ) + + def test_service_status_get(self): + self.verify_get( + self.proxy.get_service_status, service_status.ServiceStatus + ) + + +class TestDnsTsigKey(TestDnsProxy): + def test_tsigkey_create(self): + self.verify_create( + self.proxy.create_tsigkey, + tsigkey.TSIGKey, + method_kwargs={'name': 'id'}, + expected_kwargs={'name': 'id', 'prepend_key': False}, + ) + + def test_tsigkey_delete(self): + self.verify_delete( + self.proxy.delete_tsigkey, + tsigkey.TSIGKey, + True, + expected_kwargs={'ignore_missing': True, 'delete_shares': False}, + ) + + def test_tsigkey_find(self): + self.verify_find(self.proxy.find_tsigkey, tsigkey.TSIGKey) + + def test_tsigkey_get(self): + self.verify_get(self.proxy.get_tsigkey, tsigkey.TSIGKey) + + def test_tesigkeys(self): + self.verify_list(self.proxy.tsigkeys, tsigkey.TSIGKey) + + +class TestDnsBlacklist(TestDnsProxy): + def test_blacklist_create(self): + self.verify_create( + self.proxy.create_blacklist, + blacklist.Blacklist, + method_kwargs={'pattern': r'.*\.example\.com'}, + expected_kwargs={ + 'pattern': r'.*\.example\.com', + 'prepend_key': False, + }, + ) + + def test_blacklist_delete(self): + self.verify_delete( + self.proxy.delete_blacklist, + blacklist.Blacklist, + ignore_missing=True, + ) + + def test_blacklist_update(self): + self.verify_update(self.proxy.update_blacklist, blacklist.Blacklist) + + def test_blacklist_get(self): + self.verify_get(self.proxy.get_blacklist, blacklist.Blacklist) + + def test_blacklists(self): + self.verify_list(self.proxy.blacklists, blacklist.Blacklist) + + +class TestDnsTLD(TestDnsProxy): + def test_tld_create(self): + self.verify_create( + self.proxy.create_tld, + tld.TLD, + method_kwargs={"name": "id"}, + expected_kwargs={"name": "id", "prepend_key": False}, + ) + + def test_tld_delete(self): + self.verify_delete( + self.proxy.delete_tld, + tld.TLD, + True, + expected_kwargs={"ignore_missing": True}, + ) + + def test_tld_find(self): + self.verify_find(self.proxy.find_tld, tld.TLD) + + def test_tld_get(self): + self.verify_get(self.proxy.get_tld, tld.TLD) + + def test_tlds(self): + self.verify_list(self.proxy.tlds, tld.TLD) + + def test_tld_update(self): + self.verify_update(self.proxy.update_tld, tld.TLD) + + +class TestDnsQuota(TestDnsProxy): + def test_quotas(self): + self.verify_list(self.proxy.quotas, quota.Quota) + + def test_quota_get(self): + self.verify_get(self.proxy.get_quota, quota.Quota) + + def test_quota_update(self): + self.verify_update(self.proxy.update_quota, quota.Quota) + + def test_quota_delete(self): + self.verify_delete( + self.proxy.delete_quota, + quota.Quota, + False, + expected_kwargs={'ignore_missing': False}, + ) + + def test_quota_delete_ignore(self): + self.verify_delete( + self.proxy.delete_quota, + quota.Quota, + True, + expected_kwargs={'ignore_missing': True}, + ) diff --git a/openstack/tests/unit/dns/v2/test_quota.py b/openstack/tests/unit/dns/v2/test_quota.py new file mode 100644 index 0000000000..e788b02cff --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_quota.py @@ -0,0 +1,51 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import quota +from openstack.tests.unit import base + +IDENTIFIER = "IDENTIFIER" +EXAMPLE = { + "zones": 10, + "zone_recordsets": 500, + "zone_records": 500, + "recordset_records": 20, + "api_export_size": 1000, +} + + +class TestQuota(base.TestCase): + def test_basic(self): + sot = quota.Quota() + self.assertIsNone(sot.resources_key) + self.assertIsNone(sot.resource_key) + self.assertEqual("/quotas", sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.commit_method, "PATCH") + + def test_make_it(self): + sot = quota.Quota(project='FAKE_PROJECT', **EXAMPLE) + self.assertEqual(EXAMPLE['zones'], sot.zones) + self.assertEqual(EXAMPLE['zone_recordsets'], sot.zone_recordsets) + self.assertEqual(EXAMPLE['zone_records'], sot.zone_records) + self.assertEqual(EXAMPLE['recordset_records'], sot.recordset_records) + self.assertEqual(EXAMPLE['api_export_size'], sot.api_export_size) + self.assertEqual('FAKE_PROJECT', sot.project) + + def test_prepare_request(self): + body = {'id': 'ABCDEFGH', 'zones': 20} + quota_obj = quota.Quota(**body) + response = quota_obj._prepare_request() + self.assertNotIn('id', response) diff --git a/openstack/tests/unit/dns/v2/test_recordset.py b/openstack/tests/unit/dns/v2/test_recordset.py new file mode 100644 index 0000000000..6254524a4d --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_recordset.py @@ -0,0 +1,69 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import recordset +from openstack.tests.unit import base + + +IDENTIFIER = 'NAME' +EXAMPLE = { + 'description': 'This is an example record set.', + 'updated_at': None, + 'records': ['10.1.0.2'], + 'ttl': 3600, + 'id': IDENTIFIER, + 'name': 'example.org.', + 'project_id': '4335d1f0-f793-11e2-b778-0800200c9a66', + 'zone_id': '2150b1bf-dee2-4221-9d85-11f7886fb15f', + 'zone_name': 'example.com.', + 'created_at': '2014-10-24T19:59:44.000000', + 'version': 1, + 'type': 'A', + 'status': 'ACTIVE', + 'action': 'NONE', +} + + +class TestRecordset(base.TestCase): + def test_basic(self): + sot = recordset.Recordset() + self.assertIsNone(sot.resource_key) + self.assertEqual('recordsets', sot.resources_key) + self.assertEqual('/zones/%(zone_id)s/recordsets', sot.base_path) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + + self.assertDictEqual( + { + 'data': 'data', + 'description': 'description', + 'limit': 'limit', + 'marker': 'marker', + 'name': 'name', + 'status': 'status', + 'ttl': 'ttl', + 'type': 'type', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = recordset.Recordset(**EXAMPLE) + self.assertEqual(IDENTIFIER, sot.id) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['ttl'], sot.ttl) + self.assertEqual(EXAMPLE['type'], sot.type) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['status'], sot.status) diff --git a/openstack/tests/unit/dns/v2/test_service_status.py b/openstack/tests/unit/dns/v2/test_service_status.py new file mode 100644 index 0000000000..d2bcc7fea7 --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_service_status.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import service_status as svc_status +from openstack.tests.unit import base + + +class TestServiceStatus(base.TestCase): + def test_basic(self): + sot = svc_status.ServiceStatus() + self.assertEqual(None, sot.resource_key) + self.assertEqual('service_statuses', sot.resources_key) + self.assertEqual('/service_statuses', sot.base_path) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) diff --git a/openstack/tests/unit/dns/v2/test_tld.py b/openstack/tests/unit/dns/v2/test_tld.py new file mode 100644 index 0000000000..c8d714ebaf --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_tld.py @@ -0,0 +1,66 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.dns.v2 import tld +from openstack.tests.unit import base + +IDENTIFIER = "NAME" +EXAMPLE = { + "id": IDENTIFIER, + "name": "com", + "description": "tld description", +} + + +class TestTLD(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.status_code = 200 + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.post = mock.Mock(return_value=self.resp) + self.sess.default_microversion = None + + def test_basic(self): + sot = tld.TLD() + self.assertEqual(None, sot.resource_key) + self.assertEqual("tlds", sot.resources_key) + self.assertEqual("/tlds", sot.base_path) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + + self.assertEqual("PATCH", sot.commit_method) + + self.assertDictEqual( + { + "description": "description", + "name": "name", + "limit": "limit", + "marker": "marker", + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = tld.TLD(**EXAMPLE) + self.assertEqual(IDENTIFIER, sot.id) + self.assertEqual(EXAMPLE["description"], sot.description) + self.assertEqual(EXAMPLE["name"], sot.name) diff --git a/openstack/tests/unit/dns/v2/test_tsigkey.py b/openstack/tests/unit/dns/v2/test_tsigkey.py new file mode 100644 index 0000000000..bf4d4ba91a --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_tsigkey.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import tsigkey +from openstack.tests.unit import base + +IDENTIFIER = '4c72c7d3-6cfa-4fe1-9984-7705119f0228' +EXAMPLE = { + "id": IDENTIFIER, + "name": 'test-key', + "algorithm": 'hmac-sha512', + "secret": 'test-secret', + "scope": 'POOL', + "resource_id": IDENTIFIER, +} + + +class TestTsigKey(base.TestCase): + def test_basic(self): + sot = tsigkey.TSIGKey() + self.assertEqual(None, sot.resource_key) + self.assertEqual('tsigkeys', sot.resources_key) + self.assertEqual('/tsigkeys', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + self.assertDictEqual( + { + 'name': 'name', + 'algorithm': 'algorithm', + 'scope': 'scope', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = tsigkey.TSIGKey(**EXAMPLE) + self.assertEqual(IDENTIFIER, sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['algorithm'], sot.algorithm) + self.assertEqual(EXAMPLE['scope'], sot.scope) + self.assertEqual(EXAMPLE['resource_id'], sot.resource_id) + self.assertEqual(EXAMPLE['secret'], sot.secret) diff --git a/openstack/tests/unit/dns/v2/test_zone.py b/openstack/tests/unit/dns/v2/test_zone.py new file mode 100644 index 0000000000..673509efe0 --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_zone.py @@ -0,0 +1,88 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.dns.v2 import zone +from openstack.tests.unit import base + +IDENTIFIER = 'NAME' +EXAMPLE = { + 'attributes': {'tier': 'gold', 'ha': 'true'}, + 'id': IDENTIFIER, + 'name': 'test.org', + 'email': 'joe@example.org', + 'type': 'PRIMARY', + 'ttl': 7200, + 'description': 'This is an example zone.', + 'status': 'ACTIVE', + 'shared': False, +} + + +class TestZone(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.status_code = 200 + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.post = mock.Mock(return_value=self.resp) + self.sess.default_microversion = None + + def test_basic(self): + sot = zone.Zone() + self.assertEqual(None, sot.resource_key) + self.assertEqual('zones', sot.resources_key) + self.assertEqual('/zones', sot.base_path) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + + self.assertEqual('PATCH', sot.commit_method) + + self.assertDictEqual( + { + 'description': 'description', + 'email': 'email', + 'limit': 'limit', + 'marker': 'marker', + 'name': 'name', + 'status': 'status', + 'ttl': 'ttl', + 'type': 'type', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = zone.Zone(**EXAMPLE) + self.assertEqual(IDENTIFIER, sot.id) + self.assertEqual(EXAMPLE['email'], sot.email) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['ttl'], sot.ttl) + self.assertEqual(EXAMPLE['type'], sot.type) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['shared'], sot.is_shared) + + def test_abandon(self): + sot = zone.Zone(**EXAMPLE) + self.assertIsNone(sot.abandon(self.sess)) + self.sess.post.assert_called_with( + 'zones/NAME/tasks/abandon', json=None + ) diff --git a/openstack/tests/unit/dns/v2/test_zone_export.py b/openstack/tests/unit/dns/v2/test_zone_export.py new file mode 100644 index 0000000000..c058bd10f0 --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_zone_export.py @@ -0,0 +1,86 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.dns.v2 import zone_export +from openstack.tests.unit import base + + +IDENTIFIER = '074e805e-fe87-4cbb-b10b-21a06e215d41' +EXAMPLE = { + 'status': 'COMPLETE', + 'zone_id': '6625198b-d67d-47dc-8d29-f90bd60f3ac4', + 'links': { + 'self': 'http://127.0.0.1:9001/v2/zones/tasks/exports/074e805e-f', + 'href': 'http://127.0.0.1:9001/v2/zones/6625198b-d67d-', + }, + 'created_at': '2015-05-08T15:43:42.000000', + 'updated_at': '2015-05-08T15:43:43.000000', + 'version': 2, + 'location': 'designate://v2/zones/tasks/exports/8ec17fe1/export', + 'message': 'example.com. exported', + 'project_id': 'noauth-project', + 'id': IDENTIFIER, +} + + +@mock.patch.object(zone_export.ZoneExport, '_translate_response', mock.Mock()) +class TestZoneExport(base.TestCase): + def test_basic(self): + sot = zone_export.ZoneExport() + self.assertEqual('', sot.resource_key) + self.assertEqual('exports', sot.resources_key) + self.assertEqual('/zones/tasks/export', sot.base_path) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertTrue(sot.allow_delete) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'message': 'message', + 'status': 'status', + 'zone_id': 'zone_id', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = zone_export.ZoneExport(**EXAMPLE) + self.assertEqual(IDENTIFIER, sot.id) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE['version'], sot.version) + self.assertEqual(EXAMPLE['message'], sot.message) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['zone_id'], sot.zone_id) + + def test_create(self): + sot = zone_export.ZoneExport() + response = mock.Mock() + response.json = mock.Mock(return_value='') + self.session = mock.Mock(spec=adapter.Adapter) + self.session.default_microversion = '1.1' + + sot.create(self.session) + self.session.post.assert_called_once_with( + mock.ANY, + json=None, + headers=None, + microversion=self.session.default_microversion, + ) diff --git a/openstack/tests/unit/dns/v2/test_zone_import.py b/openstack/tests/unit/dns/v2/test_zone_import.py new file mode 100644 index 0000000000..ac0e621903 --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_zone_import.py @@ -0,0 +1,84 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.dns.v2 import zone_import +from openstack.tests.unit import base + +IDENTIFIER = '074e805e-fe87-4cbb-b10b-21a06e215d41' +EXAMPLE = { + 'status': 'COMPLETE', + 'zone_id': '6625198b-d67d-47dc-8d29-f90bd60f3ac4', + 'links': { + 'self': 'http://127.0.0.1:9001/v2/zones/tasks/imports/074e805e-f', + 'href': 'http://127.0.0.1:9001/v2/zones/6625198b-d67d-', + }, + 'created_at': '2015-05-08T15:43:42.000000', + 'updated_at': '2015-05-08T15:43:43.000000', + 'version': 2, + 'message': 'example.com. imported', + 'project_id': 'noauth-project', + 'id': IDENTIFIER, +} + + +@mock.patch.object(zone_import.ZoneImport, '_translate_response', mock.Mock()) +class TestZoneImport(base.TestCase): + def test_basic(self): + sot = zone_import.ZoneImport() + self.assertEqual('', sot.resource_key) + self.assertEqual('imports', sot.resources_key) + self.assertEqual('/zones/tasks/import', sot.base_path) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertTrue(sot.allow_delete) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'message': 'message', + 'status': 'status', + 'zone_id': 'zone_id', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = zone_import.ZoneImport(**EXAMPLE) + self.assertEqual(IDENTIFIER, sot.id) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE['version'], sot.version) + self.assertEqual(EXAMPLE['message'], sot.message) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['zone_id'], sot.zone_id) + + def test_create(self): + sot = zone_import.ZoneImport() + response = mock.Mock() + response.json = mock.Mock(return_value='') + self.session = mock.Mock(spec=adapter.Adapter) + self.session.default_microversion = '1.1' + + sot.create(self.session) + self.session.post.assert_called_once_with( + mock.ANY, + json=None, + headers={'content-type': 'text/dns'}, + microversion=self.session.default_microversion, + ) diff --git a/openstack/tests/unit/dns/v2/test_zone_nameserver.py b/openstack/tests/unit/dns/v2/test_zone_nameserver.py new file mode 100644 index 0000000000..76d98fa896 --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_zone_nameserver.py @@ -0,0 +1,40 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.dns.v2 import zone_nameserver +from openstack.tests.unit import base + + +class TestZoneNameserver(base.TestCase): + def test_basic(self): + sot = zone_nameserver.ZoneNameserver() + self.assertEqual(None, sot.resource_key) + self.assertEqual('nameservers', sot.resources_key) + self.assertEqual('/zones/%(zone_id)s/nameservers', sot.base_path) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_commit) + + self.assertDictEqual({}, sot._query_mapping._mapping) + + def test_make_it(self): + hostname = 'bogus-hostname' + priority = 123 + + sot = zone_nameserver.ZoneNameserver( + hostname=hostname, priority=priority + ) + self.assertEqual(hostname, sot.hostname) + self.assertEqual(priority, sot.priority) diff --git a/openstack/tests/unit/dns/v2/test_zone_share.py b/openstack/tests/unit/dns/v2/test_zone_share.py new file mode 100644 index 0000000000..45bc81ef69 --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_zone_share.py @@ -0,0 +1,68 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.dns.v2 import zone_share +from openstack.tests.unit import base + + +class TestZoneShare(base.TestCase): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.json = mock.Mock(return_value=self.resp.body) + self.resp.status_code = 200 + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.post = mock.Mock(return_value=self.resp) + self.sess.default_microversion = None + + def test_basic(self): + sot = zone_share.ZoneShare() + self.assertEqual(None, sot.resource_key) + self.assertEqual('shared_zones', sot.resources_key) + self.assertEqual('/zones/%(zone_id)s/shares', sot.base_path) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_delete) + self.assertFalse(sot.allow_commit) + + self.assertDictEqual( + { + 'target_project_id': 'target_project_id', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + share_id = 'bogus_id' + zone_id = 'bogus_zone_id' + project_id = 'bogus_project_id' + target_id = 'bogus_target_id' + expected = { + 'id': share_id, + 'zone_id': zone_id, + 'project_id': project_id, + 'target_project_id': target_id, + } + + sot = zone_share.ZoneShare(**expected) + self.assertEqual(share_id, sot.id) + self.assertEqual(zone_id, sot.zone_id) + self.assertEqual(project_id, sot.project_id) + self.assertEqual(target_id, sot.target_project_id) diff --git a/openstack/tests/unit/dns/v2/test_zone_transfer.py b/openstack/tests/unit/dns/v2/test_zone_transfer.py new file mode 100644 index 0000000000..b876a36345 --- /dev/null +++ b/openstack/tests/unit/dns/v2/test_zone_transfer.py @@ -0,0 +1,104 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.dns.v2 import zone_transfer +from openstack.tests.unit import base + + +IDENTIFIER = '074e805e-fe87-4cbb-b10b-21a06e215d41' +EXAMPLE_REQUEST = { + 'created_at': '2014-07-17T20:34:40.882579', + 'description': 'some description', + 'id': IDENTIFIER, + 'key': '9Z2R50Y0', + 'project_id': '1', + 'status': 'ACTIVE', + 'target_project_id': '123456', + 'updated_at': None, + 'zone_id': '6b78734a-aef1-45cd-9708-8eb3c2d26ff8', + 'zone_name': 'qa.dev.example.com.', +} +EXAMPLE_ACCEPT = { + 'status': 'COMPLETE', + 'zone_id': 'b4542f5a-f1ea-4ec1-b850-52db9dc3f465', + 'created_at': '2016-06-22 06:13:55', + 'updated_at': 'null', + 'key': 'FUGXMZ5N', + 'project_id': '2e43de7ce3504a8fb90a45382532c37e', + 'id': IDENTIFIER, + 'zone_transfer_request_id': '794fdf58-6e1d-41da-8b2d-16b6d10c8827', +} + + +class TestZoneTransferRequest(base.TestCase): + def test_basic(self): + sot = zone_transfer.ZoneTransferRequest() + # self.assertEqual('', sot.resource_key) + self.assertEqual('transfer_requests', sot.resources_key) + self.assertEqual('/zones/tasks/transfer_requests', sot.base_path) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + + self.assertDictEqual( + {'limit': 'limit', 'marker': 'marker', 'status': 'status'}, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = zone_transfer.ZoneTransferRequest(**EXAMPLE_REQUEST) + self.assertEqual(IDENTIFIER, sot.id) + self.assertEqual(EXAMPLE_REQUEST['created_at'], sot.created_at) + self.assertEqual(EXAMPLE_REQUEST['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE_REQUEST['description'], sot.description) + self.assertEqual(EXAMPLE_REQUEST['key'], sot.key) + self.assertEqual(EXAMPLE_REQUEST['project_id'], sot.project_id) + self.assertEqual(EXAMPLE_REQUEST['status'], sot.status) + self.assertEqual( + EXAMPLE_REQUEST['target_project_id'], sot.target_project_id + ) + self.assertEqual(EXAMPLE_REQUEST['zone_id'], sot.zone_id) + self.assertEqual(EXAMPLE_REQUEST['zone_name'], sot.zone_name) + + +class TestZoneTransferAccept(base.TestCase): + def test_basic(self): + sot = zone_transfer.ZoneTransferAccept() + # self.assertEqual('', sot.resource_key) + self.assertEqual('transfer_accepts', sot.resources_key) + self.assertEqual('/zones/tasks/transfer_accepts', sot.base_path) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + + self.assertDictEqual( + {'limit': 'limit', 'marker': 'marker', 'status': 'status'}, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = zone_transfer.ZoneTransferAccept(**EXAMPLE_ACCEPT) + self.assertEqual(IDENTIFIER, sot.id) + self.assertEqual(EXAMPLE_ACCEPT['created_at'], sot.created_at) + self.assertEqual(EXAMPLE_ACCEPT['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE_ACCEPT['key'], sot.key) + self.assertEqual(EXAMPLE_ACCEPT['project_id'], sot.project_id) + self.assertEqual(EXAMPLE_ACCEPT['status'], sot.status) + self.assertEqual(EXAMPLE_ACCEPT['zone_id'], sot.zone_id) + self.assertEqual( + EXAMPLE_ACCEPT['zone_transfer_request_id'], + sot.zone_transfer_request_id, + ) diff --git a/openstack/tests/unit/fake/__init__.py b/openstack/tests/unit/fake/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/fake/fake_service.py b/openstack/tests/unit/fake/fake_service.py new file mode 100644 index 0000000000..8440b52b82 --- /dev/null +++ b/openstack/tests/unit/fake/fake_service.py @@ -0,0 +1,24 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import service_description +from openstack.tests.unit.fake.v1 import _proxy as _proxy_1 +from openstack.tests.unit.fake.v2 import _proxy as _proxy_2 + + +class FakeService(service_description.ServiceDescription): + """The fake service.""" + + supported_versions = { + '1': _proxy_1.Proxy, + '2': _proxy_2.Proxy, + } diff --git a/openstack/tests/unit/fake/v1/__init__.py b/openstack/tests/unit/fake/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/fake/v1/_proxy.py b/openstack/tests/unit/fake/v1/_proxy.py new file mode 100644 index 0000000000..8b84491fa2 --- /dev/null +++ b/openstack/tests/unit/fake/v1/_proxy.py @@ -0,0 +1,19 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack import proxy + + +class Proxy(proxy.Proxy): + skip_discovery = True + + def dummy(self): + return True diff --git a/openstack/tests/unit/fake/v1/fake.py b/openstack/tests/unit/fake/v1/fake.py new file mode 100644 index 0000000000..901be4a392 --- /dev/null +++ b/openstack/tests/unit/fake/v1/fake.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack import resource + + +class Fake(resource.Resource): + resource_key = "resource" + resources_key = "resources" + base_path = "/fake" + + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_head = True + + #: The transaction date and time. + timestamp = resource.Header("x-timestamp") + #: The name of this resource. + name = resource.Body("name", alternate_id=True) + #: The value of the resource. Also available in headers. + value = resource.Body("value", alias="x-resource-value") + #: Is this resource cool? If so, set it to True. + #: This is a multi-line comment about cool stuff. + cool = resource.Body("cool", type=bool) diff --git a/openstack/tests/unit/fake/v2/__init__.py b/openstack/tests/unit/fake/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/fake/v2/_proxy.py b/openstack/tests/unit/fake/v2/_proxy.py new file mode 100644 index 0000000000..66955be6be --- /dev/null +++ b/openstack/tests/unit/fake/v2/_proxy.py @@ -0,0 +1,19 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack import proxy + + +class Proxy(proxy.Proxy): + skip_discovery = True + + def dummy(self): + return False diff --git a/openstack/tests/unit/fake/v2/fake.py b/openstack/tests/unit/fake/v2/fake.py new file mode 100644 index 0000000000..901be4a392 --- /dev/null +++ b/openstack/tests/unit/fake/v2/fake.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +from openstack import resource + + +class Fake(resource.Resource): + resource_key = "resource" + resources_key = "resources" + base_path = "/fake" + + allow_create = True + allow_fetch = True + allow_commit = True + allow_delete = True + allow_list = True + allow_head = True + + #: The transaction date and time. + timestamp = resource.Header("x-timestamp") + #: The name of this resource. + name = resource.Body("name", alternate_id=True) + #: The value of the resource. Also available in headers. + value = resource.Body("value", alias="x-resource-value") + #: Is this resource cool? If so, set it to True. + #: This is a multi-line comment about cool stuff. + cool = resource.Body("cool", type=bool) diff --git a/openstack/tests/unit/fakes.py b/openstack/tests/unit/fakes.py index f979ded57c..50c8d48310 100644 --- a/openstack/tests/unit/fakes.py +++ b/openstack/tests/unit/fakes.py @@ -13,14 +13,17 @@ # License for the specific language governing permissions and limitations # under the License. -import mock +import json +from unittest import mock + +import requests class FakeTransport(mock.Mock): RESPONSE = mock.Mock('200 OK') def __init__(self): - super(FakeTransport, self).__init__() + super().__init__() self.request = mock.Mock() self.request.return_value = self.RESPONSE @@ -30,8 +33,24 @@ class FakeAuthenticator(mock.Mock): ENDPOINT = 'http://www.example.com/endpoint' def __init__(self): - super(FakeAuthenticator, self).__init__() + super().__init__() self.get_token = mock.Mock() self.get_token.return_value = self.TOKEN self.get_endpoint = mock.Mock() self.get_endpoint.return_value = self.ENDPOINT + + +class FakeResponse(requests.Response): + def __init__( + self, headers=None, status_code=200, data=None, encoding=None + ): + super().__init__() + + headers = headers or {} + + self.status_code = status_code + + self.headers.update(headers) + self._content = json.dumps(data) + if not isinstance(self._content, bytes): + self._content = self._content.encode() diff --git a/openstack/tests/unit/fixtures/accelerator.json b/openstack/tests/unit/fixtures/accelerator.json new file mode 100644 index 0000000000..bf2c046913 --- /dev/null +++ b/openstack/tests/unit/fixtures/accelerator.json @@ -0,0 +1,27 @@ +{ + "versions": [ + { + "id": "2.0", + "links": [ + { + "href": "/v2/", + "rel": "self" + }, + { + "href": "https://accelerator.example.com/api-ref/accelerator", + "rel": "help" + } + ], + "max_version": "2.0", + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.accelerator-v1+json" + } + ], + "min_version": "2.0", + "status": "CURRENT", + "updated": "2019-09-01T00:00:00Z" + } + ] +} diff --git a/openstack/tests/unit/fixtures/bad-glance-version.json b/openstack/tests/unit/fixtures/bad-glance-version.json new file mode 100644 index 0000000000..0fd91011ff --- /dev/null +++ b/openstack/tests/unit/fixtures/bad-glance-version.json @@ -0,0 +1,15 @@ +{ + "versions": [ + { + "status": "CURRENT", + "updated": "2013-07-23T11:33:21Z", + "links": [ + { + "href": "https://example.com/image/v7/", + "rel": "self" + } + ], + "id": "v7" + } + ] +} diff --git a/openstack/tests/unit/fixtures/bad-placement.json b/openstack/tests/unit/fixtures/bad-placement.json new file mode 100644 index 0000000000..72f7fd7168 --- /dev/null +++ b/openstack/tests/unit/fixtures/bad-placement.json @@ -0,0 +1,10 @@ +{ + "versions": [ + { + "id": "v1.0", + "links": [{"href": "", "rel": "self"}], + "max_version": "1.17", + "min_version": "1.0" + } + ] +} diff --git a/openstack/tests/unit/fixtures/baremetal.json b/openstack/tests/unit/fixtures/baremetal.json new file mode 100644 index 0000000000..4712764fd4 --- /dev/null +++ b/openstack/tests/unit/fixtures/baremetal.json @@ -0,0 +1,30 @@ +{ + "default_version": { + "id": "v1", + "links": [ + { + "href": "https://baremetal.example.com/v1/", + "rel": "self" + } + ], + "min_version": "1.1", + "status": "CURRENT", + "version": "1.33" + }, + "description": "Ironic is an OpenStack project which aims to provision baremetal machines.", + "name": "OpenStack Ironic API", + "versions": [ + { + "id": "v1", + "links": [ + { + "href": "https://baremetal.example.com/v1/", + "rel": "self" + } + ], + "min_version": "1.1", + "status": "CURRENT", + "version": "1.33" + } + ] +} diff --git a/openstack/tests/unit/fixtures/block-storage-version.json b/openstack/tests/unit/fixtures/block-storage-version.json new file mode 100644 index 0000000000..f6daa6e851 --- /dev/null +++ b/openstack/tests/unit/fixtures/block-storage-version.json @@ -0,0 +1,28 @@ +{ + "versions": [ + { + "status": "CURRENT", + "updated": "2017-02-25T12:00:00Z", + "links": [ + { + "href": "https://docs.openstack.org/", + "type": "text/html", + "rel": "describedby" + }, + { + "href": "https://volume.example.com/v3/", + "rel": "self" + } + ], + "min_version": "3.0", + "version": "3.0", + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.volume+json;version=3" + } + ], + "id": "v3.0" + } + ] +} diff --git a/openstack/tests/unit/fixtures/clouds/clouds.yaml b/openstack/tests/unit/fixtures/clouds/clouds.yaml new file mode 100644 index 0000000000..ebd4cc0ba5 --- /dev/null +++ b/openstack/tests/unit/fixtures/clouds/clouds.yaml @@ -0,0 +1,26 @@ +clouds: + _test_cloud_: + auth: + auth_url: https://identity.example.com + password: password + project_name: admin + username: admin + user_domain_name: default + project_domain_name: default + region_name: RegionOne + _test_cloud_v2_: + auth: + auth_url: https://identity.example.com + password: password + project_name: admin + username: admin + identity_api_version: '2.0' + region_name: RegionOne + _bogus_test_: + auth_type: bogus + auth: + auth_url: https://identity.example.com/v2.0 + username: _test_user_ + password: _test_pass_ + project_name: _test_project_ + region_name: _test_region_ diff --git a/openstack/tests/unit/fixtures/clouds/clouds_cache.yaml b/openstack/tests/unit/fixtures/clouds/clouds_cache.yaml new file mode 100644 index 0000000000..21fb137a91 --- /dev/null +++ b/openstack/tests/unit/fixtures/clouds/clouds_cache.yaml @@ -0,0 +1,32 @@ +cache: + max_age: 90 + class: dogpile.cache.memory + expiration: + server: 1 + port: 1 +clouds: + _test_cloud_: + auth: + auth_url: https://identity.example.com + password: password + project_name: admin + username: admin + user_domain_name: default + project_domain_name: default + region_name: RegionOne + _test_cloud_v2_: + auth: + auth_url: https://identity.example.com + password: password + project_name: admin + username: admin + identity_api_version: '2.0' + region_name: RegionOne + _bogus_test_: + auth_type: bogus + auth: + auth_url: http://identity.example.com/v2.0 + username: _test_user_ + password: _test_pass_ + project_name: _test_project_ + region_name: _test_region_ diff --git a/openstack/tests/unit/fixtures/clustering.json b/openstack/tests/unit/fixtures/clustering.json new file mode 100644 index 0000000000..228399c055 --- /dev/null +++ b/openstack/tests/unit/fixtures/clustering.json @@ -0,0 +1,27 @@ +{ + "versions": [ + { + "id": "1.0", + "links": [ + { + "href": "/v1/", + "rel": "self" + }, + { + "href": "https://clustering.example.com/api-ref/clustering", + "rel": "help" + } + ], + "max_version": "1.7", + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.clustering-v1+json" + } + ], + "min_version": "1.0", + "status": "CURRENT", + "updated": "2016-01-18T00:00:00Z" + } + ] +} diff --git a/openstack/tests/unit/fixtures/compute-version.json b/openstack/tests/unit/fixtures/compute-version.json new file mode 100644 index 0000000000..2e9ba6ae79 --- /dev/null +++ b/openstack/tests/unit/fixtures/compute-version.json @@ -0,0 +1,30 @@ +{ + "versions": [ + { + "status": "SUPPORTED", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "href": "https://compute.example.com/v2/", + "rel": "self" + } + ], + "min_version": "", + "version": "", + "id": "v2.0" + }, + { + "status": "CURRENT", + "updated": "2013-07-23T11:33:21Z", + "links": [ + { + "href": "https://compute.example.com/v2.1/", + "rel": "self" + } + ], + "min_version": "2.10", + "version": "2.53", + "id": "v2.1" + } + ] +} diff --git a/openstack/tests/unit/fixtures/discovery.json b/openstack/tests/unit/fixtures/discovery.json new file mode 100644 index 0000000000..9162ecc9d7 --- /dev/null +++ b/openstack/tests/unit/fixtures/discovery.json @@ -0,0 +1,45 @@ +{ + "versions": { + "values": [ + { + "status": "stable", + "updated": "2016-04-04T00:00:00Z", + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.identity-v3+json" + } + ], + "id": "v3.6", + "links": [ + { + "href": "https://identity.example.com/v3/", + "rel": "self" + } + ] + }, + { + "status": "stable", + "updated": "2014-04-17T00:00:00Z", + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.identity-v2.0+json" + } + ], + "id": "v2.0", + "links": [ + { + "href": "https://identity.example.com/v2.0/", + "rel": "self" + }, + { + "href": "http://docs.openstack.org/", + "type": "text/html", + "rel": "describedby" + } + ] + } + ] + } +} diff --git a/openstack/tests/unit/fixtures/dns.json b/openstack/tests/unit/fixtures/dns.json new file mode 100644 index 0000000000..1fc8e86bda --- /dev/null +++ b/openstack/tests/unit/fixtures/dns.json @@ -0,0 +1,24 @@ +{ + "versions": { + "values": [{ + "id": "v1", + "links": [ + { + "href": "https://dns.example.com/v1", + "rel": "self" + } + ], + "status": "DEPRECATED" + }, { + "id": "v2", + "links": [ + { + "href": "https://dns.example.com/v2", + "rel": "self" + } + ], + "status": "CURRENT" + }] + } +} + diff --git a/openstack/tests/unit/fixtures/image-version-broken.json b/openstack/tests/unit/fixtures/image-version-broken.json new file mode 100644 index 0000000000..a130ca4032 --- /dev/null +++ b/openstack/tests/unit/fixtures/image-version-broken.json @@ -0,0 +1,64 @@ +{ + "versions": [ + { + "status": "CURRENT", + "id": "v2.3", + "links": [ + { + "href": "http://localhost/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.2", + "links": [ + { + "href": "http://localhost/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.1", + "links": [ + { + "href": "http://localhost/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.0", + "links": [ + { + "href": "http://localhost/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v1.1", + "links": [ + { + "href": "http://localhost/v1/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v1.0", + "links": [ + { + "href": "http://localhost/v1/", + "rel": "self" + } + ] + } + ] +} diff --git a/openstack/tests/unit/fixtures/image-version-suburl.json b/openstack/tests/unit/fixtures/image-version-suburl.json new file mode 100644 index 0000000000..5ec1a07939 --- /dev/null +++ b/openstack/tests/unit/fixtures/image-version-suburl.json @@ -0,0 +1,64 @@ +{ + "versions": [ + { + "status": "CURRENT", + "id": "v2.3", + "links": [ + { + "href": "http://example.com/image/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.2", + "links": [ + { + "href": "http://example.com/image/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.1", + "links": [ + { + "href": "http://example.com/image/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.0", + "links": [ + { + "href": "http://example.com/image/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v1.1", + "links": [ + { + "href": "http://example.com/image/v1/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v1.0", + "links": [ + { + "href": "http://example.com/image/v1/", + "rel": "self" + } + ] + } + ] +} diff --git a/openstack/tests/unit/fixtures/image-version-v1.json b/openstack/tests/unit/fixtures/image-version-v1.json new file mode 100644 index 0000000000..60b0a3bd37 --- /dev/null +++ b/openstack/tests/unit/fixtures/image-version-v1.json @@ -0,0 +1,24 @@ +{ + "versions": [ + { + "status": "CURRENT", + "id": "v1.1", + "links": [ + { + "href": "http://image.example.com/v1/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v1.0", + "links": [ + { + "href": "http://image.example.com/v1/", + "rel": "self" + } + ] + } + ] +} diff --git a/openstack/tests/unit/fixtures/image-version-v2.json b/openstack/tests/unit/fixtures/image-version-v2.json new file mode 100644 index 0000000000..399a53aa99 --- /dev/null +++ b/openstack/tests/unit/fixtures/image-version-v2.json @@ -0,0 +1,44 @@ +{ + "versions": [ + { + "status": "CURRENT", + "id": "v2.3", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.2", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.1", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.0", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + } + ] +} diff --git a/openstack/tests/unit/fixtures/image-version.json b/openstack/tests/unit/fixtures/image-version.json new file mode 100644 index 0000000000..bd688ee3b4 --- /dev/null +++ b/openstack/tests/unit/fixtures/image-version.json @@ -0,0 +1,64 @@ +{ + "versions": [ + { + "status": "CURRENT", + "id": "v2.3", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.2", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.1", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v2.0", + "links": [ + { + "href": "http://image.example.com/v2/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v1.1", + "links": [ + { + "href": "http://image.example.com/v1/", + "rel": "self" + } + ] + }, + { + "status": "SUPPORTED", + "id": "v1.0", + "links": [ + { + "href": "http://image.example.com/v1/", + "rel": "self" + } + ] + } + ] +} diff --git a/openstack/tests/unit/fixtures/old-compute-version.json b/openstack/tests/unit/fixtures/old-compute-version.json new file mode 100644 index 0000000000..08cbfa95c8 --- /dev/null +++ b/openstack/tests/unit/fixtures/old-compute-version.json @@ -0,0 +1,30 @@ +{ + "versions": [ + { + "status": "SUPPORTED", + "updated": "2011-01-21T11:33:21Z", + "links": [ + { + "href": "https://compute.example.com/v2/", + "rel": "self" + } + ], + "min_version": "", + "version": "", + "id": "v2.0" + }, + { + "status": "CURRENT", + "updated": "2013-07-23T11:33:21Z", + "links": [ + { + "href": "https://compute.example.com/v2.1/", + "rel": "self" + } + ], + "min_version": "2.10", + "version": "2.50", + "id": "v2.1" + } + ] +} diff --git a/openstack/tests/unit/fixtures/placement.json b/openstack/tests/unit/fixtures/placement.json new file mode 100644 index 0000000000..6ba0f278e8 --- /dev/null +++ b/openstack/tests/unit/fixtures/placement.json @@ -0,0 +1,11 @@ +{ + "versions": [ + { + "id": "v1.0", + "links": [{"href": "", "rel": "self"}], + "max_version": "1.17", + "min_version": "1.0", + "status": "CURRENT" + } + ] +} diff --git a/openstack/tests/unit/fixtures/shared-file-system.json b/openstack/tests/unit/fixtures/shared-file-system.json new file mode 100644 index 0000000000..3d22f6e5c2 --- /dev/null +++ b/openstack/tests/unit/fixtures/shared-file-system.json @@ -0,0 +1,28 @@ +{ + "versions": [ + { + "id": "v2.0", + "status": "CURRENT", + "version": "2.58", + "min_version": "2.0", + "updated": "2015-08-27T11:33:21Z", + "links": [ + { + "rel": "describedby", + "type": "text/html", + "href": "http://docs.openstack.org/" + }, + { + "rel": "self", + "href": "https://shared-file-system.example.com/v2/" + } + ], + "media-types": [ + { + "base": "application/json", + "type": "application/vnd.openstack.share+json;version=1" + } + ] + } + ] +} \ No newline at end of file diff --git a/openstack/tests/unit/identity/test_identity_service.py b/openstack/tests/unit/identity/test_identity_service.py deleted file mode 100644 index dbfaea3d6f..0000000000 --- a/openstack/tests/unit/identity/test_identity_service.py +++ /dev/null @@ -1,37 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.identity import identity_service - - -class TestIdentityService(testtools.TestCase): - - def test_regular_service(self): - sot = identity_service.IdentityService() - self.assertEqual('identity', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(2, len(sot.valid_versions)) - self.assertEqual('v3', sot.valid_versions[0].module) - self.assertEqual('v3', sot.valid_versions[0].path) - self.assertEqual('v2', sot.valid_versions[1].module) - self.assertEqual('v2', sot.valid_versions[1].path) - - def test_admin_service(self): - sot = identity_service.AdminService() - self.assertEqual('identity', sot.service_type) - self.assertEqual('admin', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) diff --git a/openstack/tests/unit/identity/test_version.py b/openstack/tests/unit/identity/test_version.py index 0fac7cbb6c..8ec4a52fdd 100644 --- a/openstack/tests/unit/identity/test_version.py +++ b/openstack/tests/unit/identity/test_version.py @@ -10,10 +10,10 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +from unittest import mock from openstack.identity import version +from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -24,22 +24,20 @@ } -class TestVersion(testtools.TestCase): - +class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): - sot = version.Version(EXAMPLE) + sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['media-types'], sot.media_types) self.assertEqual(EXAMPLE['status'], sot.status) @@ -58,8 +56,8 @@ def test_list(self): resp.json = mock.Mock(return_value=resp.body) session = mock.Mock() session.get = mock.Mock(return_value=resp) - sot = version.Version(EXAMPLE) + sot = version.Version(**EXAMPLE) result = sot.list(session) - self.assertEqual(next(result)['id'], 'v1.0') - self.assertEqual(next(result)['id'], 'v1.1') + self.assertEqual(next(result).id, 'v1.0') + self.assertEqual(next(result).id, 'v1.1') self.assertRaises(StopIteration, next, result) diff --git a/openstack/tests/unit/identity/v2/test_extension.py b/openstack/tests/unit/identity/v2/test_extension.py index 3c9471d5cd..b6df19f9e4 100644 --- a/openstack/tests/unit/identity/v2/test_extension.py +++ b/openstack/tests/unit/identity/v2/test_extension.py @@ -10,38 +10,36 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +from unittest import mock from openstack.identity.v2 import extension +from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'alias': '1', 'description': '2', - 'links': '3', + 'links': [], 'name': '4', 'namespace': '5', 'updated': '2015-03-09T12:14:57.233772', } -class TestExtension(testtools.TestCase): - +class TestExtension(base.TestCase): def test_basic(self): sot = extension.Extension() self.assertEqual('extension', sot.resource_key) self.assertEqual('extensions', sot.resources_key) self.assertEqual('/extensions', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): - sot = extension.Extension(EXAMPLE) + sot = extension.Extension(**EXAMPLE) self.assertEqual(EXAMPLE['alias'], sot.alias) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['links'], sot.links) @@ -62,7 +60,7 @@ def test_list(self): resp.json = mock.Mock(return_value=resp.body) session = mock.Mock() session.get = mock.Mock(return_value=resp) - sot = extension.Extension(EXAMPLE) + sot = extension.Extension(**EXAMPLE) result = sot.list(session) self.assertEqual(next(result).name, 'a') self.assertEqual(next(result).name, 'b') diff --git a/openstack/tests/unit/identity/v2/test_proxy.py b/openstack/tests/unit/identity/v2/test_proxy.py index 83a39c8ff7..57aebbe1a3 100644 --- a/openstack/tests/unit/identity/v2/test_proxy.py +++ b/openstack/tests/unit/identity/v2/test_proxy.py @@ -14,12 +14,12 @@ from openstack.identity.v2 import role from openstack.identity.v2 import tenant from openstack.identity.v2 import user -from openstack.tests.unit import test_proxy_base +from openstack.tests.unit import test_proxy_base as test_proxy_base class TestIdentityProxy(test_proxy_base.TestProxyBase): def setUp(self): - super(TestIdentityProxy, self).setUp() + super().setUp() self.proxy = _proxy.Proxy(self.session) def test_role_create_attrs(self): @@ -38,7 +38,7 @@ def test_role_get(self): self.verify_get(self.proxy.get_role, role.Role) def test_roles(self): - self.verify_list(self.proxy.roles, role.Role, paginated=True) + self.verify_list(self.proxy.roles, role.Role) def test_role_update(self): self.verify_update(self.proxy.update_role, role.Role) @@ -59,7 +59,7 @@ def test_tenant_get(self): self.verify_get(self.proxy.get_tenant, tenant.Tenant) def test_tenants(self): - self.verify_list(self.proxy.tenants, tenant.Tenant, paginated=True) + self.verify_list(self.proxy.tenants, tenant.Tenant) def test_tenant_update(self): self.verify_update(self.proxy.update_tenant, tenant.Tenant) @@ -80,7 +80,7 @@ def test_user_get(self): self.verify_get(self.proxy.get_user, user.User) def test_users(self): - self.verify_list(self.proxy.users, user.User, paginated=True) + self.verify_list(self.proxy.users, user.User) def test_user_update(self): self.verify_update(self.proxy.update_user, user.User) diff --git a/openstack/tests/unit/identity/v2/test_role.py b/openstack/tests/unit/identity/v2/test_role.py index 55967c25ec..cc19192bb0 100644 --- a/openstack/tests/unit/identity/v2/test_role.py +++ b/openstack/tests/unit/identity/v2/test_role.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.identity.v2 import role +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,23 +23,20 @@ } -class TestRole(testtools.TestCase): - +class TestRole(base.TestCase): def test_basic(self): sot = role.Role() self.assertEqual('role', sot.resource_key) self.assertEqual('roles', sot.resources_key) self.assertEqual('/OS-KSADM/roles', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_retrieve) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): - sot = role.Role(EXAMPLE) - self.assertTrue(sot.enabled) + sot = role.Role(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) diff --git a/openstack/tests/unit/identity/v2/test_tenant.py b/openstack/tests/unit/identity/v2/test_tenant.py index 3430284b08..e3fe61c29f 100644 --- a/openstack/tests/unit/identity/v2/test_tenant.py +++ b/openstack/tests/unit/identity/v2/test_tenant.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.identity.v2 import tenant +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,22 +23,20 @@ } -class TestTenant(testtools.TestCase): - +class TestTenant(base.TestCase): def test_basic(self): sot = tenant.Tenant() self.assertEqual('tenant', sot.resource_key) self.assertEqual('tenants', sot.resources_key) self.assertEqual('/tenants', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_retrieve) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): - sot = tenant.Tenant(EXAMPLE) + sot = tenant.Tenant(**EXAMPLE) self.assertEqual(EXAMPLE['description'], sot.description) self.assertTrue(sot.is_enabled) self.assertEqual(EXAMPLE['id'], sot.id) diff --git a/openstack/tests/unit/identity/v2/test_user.py b/openstack/tests/unit/identity/v2/test_user.py index d6ec1ee394..63b069b9e4 100644 --- a/openstack/tests/unit/identity/v2/test_user.py +++ b/openstack/tests/unit/identity/v2/test_user.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.identity.v2 import user +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,22 +23,20 @@ } -class TestUser(testtools.TestCase): - +class TestUser(base.TestCase): def test_basic(self): sot = user.User() self.assertEqual('user', sot.resource_key) self.assertEqual('users', sot.resources_key) self.assertEqual('/users', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_retrieve) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): - sot = user.User(EXAMPLE) + sot = user.User(**EXAMPLE) self.assertEqual(EXAMPLE['email'], sot.email) self.assertTrue(sot.is_enabled) self.assertEqual(EXAMPLE['id'], sot.id) diff --git a/openstack/tests/unit/identity/v3/test_access_rule.py b/openstack/tests/unit/identity/v3/test_access_rule.py new file mode 100644 index 0000000000..95aa6275b9 --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_access_rule.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import access_rule +from openstack.tests.unit import base + +EXAMPLE = { + "links": { + "self": "https://example.com/identity/v3/access_rules" + "/07d719df00f349ef8de77d542edf010c" + }, + "path": "/v2.1/servers/{server_id}/ips", + "method": "GET", + "service": "compute", +} + + +class TestAccessRule(base.TestCase): + def test_basic(self): + sot = access_rule.AccessRule() + self.assertEqual('access_rule', sot.resource_key) + self.assertEqual('access_rules', sot.resources_key) + self.assertEqual('/users/%(user_id)s/access_rules', sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = access_rule.AccessRule(**EXAMPLE) + self.assertEqual(EXAMPLE['path'], sot.path) + self.assertEqual(EXAMPLE['method'], sot.method) + self.assertEqual(EXAMPLE['service'], sot.service) + self.assertEqual(EXAMPLE['links'], sot.links) diff --git a/openstack/tests/unit/identity/v3/test_application_credential.py b/openstack/tests/unit/identity/v3/test_application_credential.py new file mode 100644 index 0000000000..c4a3ba7295 --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_application_credential.py @@ -0,0 +1,57 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import application_credential +from openstack.tests.unit import base + + +EXAMPLE = { + "user": {"id": "8ac43bb0926245cead88676a96c750d3"}, + "name": 'monitoring', + "secret": 'rEaqvJka48mpv', + "roles": [{"name": "Reader"}], + "access_rules": [ + {"path": "/v2.0/metrics", "service": "monitoring", "method": "GET"}, + ], + "expires_at": '2018-02-27T18:30:59Z', + "description": "Application credential for monitoring", + "unrestricted": "False", + "project_id": "3", + "links": {"self": "http://example.com/v3/application_credential_1"}, +} + + +class TestApplicationCredential(base.TestCase): + def test_basic(self): + sot = application_credential.ApplicationCredential() + self.assertEqual('application_credential', sot.resource_key) + self.assertEqual('application_credentials', sot.resources_key) + self.assertEqual( + '/users/%(user_id)s/application_credentials', sot.base_path + ) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = application_credential.ApplicationCredential(**EXAMPLE) + self.assertEqual(EXAMPLE['user'], sot.user) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['secret'], sot.secret) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['expires_at'], sot.expires_at) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['roles'], sot.roles) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['access_rules'], sot.access_rules) diff --git a/openstack/tests/unit/identity/v3/test_credential.py b/openstack/tests/unit/identity/v3/test_credential.py index 36066d9cdc..06ad8ca705 100644 --- a/openstack/tests/unit/identity/v3/test_credential.py +++ b/openstack/tests/unit/identity/v3/test_credential.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.identity.v3 import credential +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -24,20 +24,28 @@ } -class TestCredential(testtools.TestCase): - +class TestCredential(base.TestCase): def test_basic(self): sot = credential.Credential() self.assertEqual('credential', sot.resource_key) self.assertEqual('credentials', sot.resources_key) self.assertEqual('/credentials', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) + self.assertEqual('PATCH', sot.commit_method) + + self.assertDictEqual( + { + 'type': 'type', + 'user_id': 'user_id', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = credential.Credential(**EXAMPLE) diff --git a/openstack/tests/unit/identity/v3/test_domain.py b/openstack/tests/unit/identity/v3/test_domain.py index 07faa7fe5c..03d868cae3 100644 --- a/openstack/tests/unit/identity/v3/test_domain.py +++ b/openstack/tests/unit/identity/v3/test_domain.py @@ -10,34 +10,69 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools +from unittest import mock + +from keystoneauth1 import adapter from openstack.identity.v3 import domain +from openstack.identity.v3 import group +from openstack.identity.v3 import role +from openstack.identity.v3 import user +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'description': '1', 'enabled': True, 'id': IDENTIFIER, - 'links': {'self': 'http://example.com/identity/v3/domains/id'}, + 'links': {'self': f'http://example.com/identity/v3/domains/{IDENTIFIER}'}, 'name': '4', + 'options': {'foo': 'bar'}, } -class TestDomain(testtools.TestCase): +class TestDomain(base.TestCase): + def setUp(self): + super().setUp() + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = 1 + self.sess._get_connection = mock.Mock(return_value=self.cloud) + self.good_resp = mock.Mock() + self.good_resp.body = None + self.good_resp.json = mock.Mock(return_value=self.good_resp.body) + self.good_resp.status_code = 204 + + self.bad_resp = mock.Mock() + self.bad_resp.body = None + self.bad_resp.json = mock.Mock(return_value=self.bad_resp.body) + self.bad_resp.status_code = 401 def test_basic(self): sot = domain.Domain() self.assertEqual('domain', sot.resource_key) self.assertEqual('domains', sot.resources_key) self.assertEqual('/domains', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) + self.assertEqual('PATCH', sot.commit_method) + + self.assertDictEqual( + { + 'name': 'name', + 'is_enabled': 'enabled', + 'limit': 'limit', + 'marker': 'marker', + 'tags': 'tags', + 'any_tags': 'tags-any', + 'not_tags': 'not-tags', + 'not_any_tags': 'not-tags-any', + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = domain.Domain(**EXAMPLE) @@ -46,3 +81,244 @@ def test_make_it(self): self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) + self.assertDictEqual(EXAMPLE['options'], sot.options) + + def test_assign_role_to_user_good(self): + sot = domain.Domain(**EXAMPLE) + resp = self.good_resp + self.sess.put = mock.Mock(return_value=resp) + + self.assertTrue( + sot.assign_role_to_user( + self.sess, user.User(id='1'), role.Role(id='2'), False + ) + ) + + self.sess.put.assert_called_with('domains/IDENTIFIER/users/1/roles/2') + + def test_assign_inherited_role_to_user_good(self): + sot = domain.Domain(**EXAMPLE) + resp = self.good_resp + self.sess.put = mock.Mock(return_value=resp) + + self.assertTrue( + sot.assign_role_to_user( + self.sess, user.User(id='1'), role.Role(id='2'), True + ) + ) + + self.sess.put.assert_called_with( + 'OS-INHERIT/domains/IDENTIFIER/users/1/roles/2/inherited_to_projects' + ) + + def test_assign_role_to_user_bad(self): + sot = domain.Domain(**EXAMPLE) + resp = self.bad_resp + self.sess.put = mock.Mock(return_value=resp) + + self.assertFalse( + sot.assign_role_to_user( + self.sess, user.User(id='1'), role.Role(id='2'), False + ) + ) + + def test_validate_user_has_role_good(self): + sot = domain.Domain(**EXAMPLE) + resp = self.good_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertTrue( + sot.validate_user_has_role( + self.sess, user.User(id='1'), role.Role(id='2'), False + ) + ) + + self.sess.head.assert_called_with('domains/IDENTIFIER/users/1/roles/2') + + def test_validate_user_has_inherited_role_good(self): + sot = domain.Domain(**EXAMPLE) + resp = self.good_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertTrue( + sot.validate_user_has_role( + self.sess, user.User(id='1'), role.Role(id='2'), True + ) + ) + + self.sess.head.assert_called_with( + 'OS-INHERIT/domains/IDENTIFIER/users/1/roles/2/inherited_to_projects' + ) + + def test_validate_user_has_role_bad(self): + sot = domain.Domain(**EXAMPLE) + resp = self.bad_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertFalse( + sot.validate_user_has_role( + self.sess, user.User(id='1'), role.Role(id='2'), False + ) + ) + + def test_unassign_role_from_user_good(self): + sot = domain.Domain(**EXAMPLE) + resp = self.good_resp + self.sess.delete = mock.Mock(return_value=resp) + + self.assertTrue( + sot.unassign_role_from_user( + self.sess, user.User(id='1'), role.Role(id='2'), False + ) + ) + + self.sess.delete.assert_called_with( + 'domains/IDENTIFIER/users/1/roles/2' + ) + + def test_unassign_inherited_role_from_user_good(self): + sot = domain.Domain(**EXAMPLE) + resp = self.good_resp + self.sess.delete = mock.Mock(return_value=resp) + + self.assertTrue( + sot.unassign_role_from_user( + self.sess, user.User(id='1'), role.Role(id='2'), True + ) + ) + + self.sess.delete.assert_called_with( + 'OS-INHERIT/domains/IDENTIFIER/users/1/roles/2/inherited_to_projects' + ) + + def test_unassign_role_from_user_bad(self): + sot = domain.Domain(**EXAMPLE) + resp = self.bad_resp + self.sess.delete = mock.Mock(return_value=resp) + + self.assertFalse( + sot.unassign_role_from_user( + self.sess, user.User(id='1'), role.Role(id='2'), False + ) + ) + + def test_assign_role_to_group_good(self): + sot = domain.Domain(**EXAMPLE) + resp = self.good_resp + self.sess.put = mock.Mock(return_value=resp) + + self.assertTrue( + sot.assign_role_to_group( + self.sess, group.Group(id='1'), role.Role(id='2'), False + ) + ) + + self.sess.put.assert_called_with('domains/IDENTIFIER/groups/1/roles/2') + + def test_assign_inherited_role_to_group_good(self): + sot = domain.Domain(**EXAMPLE) + resp = self.good_resp + self.sess.put = mock.Mock(return_value=resp) + + self.assertTrue( + sot.assign_role_to_group( + self.sess, group.Group(id='1'), role.Role(id='2'), True + ) + ) + + self.sess.put.assert_called_with( + 'OS-INHERIT/domains/IDENTIFIER/groups/1/roles/2/inherited_to_projects' + ) + + def test_assign_role_to_group_bad(self): + sot = domain.Domain(**EXAMPLE) + resp = self.bad_resp + self.sess.put = mock.Mock(return_value=resp) + + self.assertFalse( + sot.assign_role_to_group( + self.sess, group.Group(id='1'), role.Role(id='2'), False + ) + ) + + def test_validate_group_has_role_good(self): + sot = domain.Domain(**EXAMPLE) + resp = self.good_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertTrue( + sot.validate_group_has_role( + self.sess, group.Group(id='1'), role.Role(id='2'), False + ) + ) + + self.sess.head.assert_called_with( + 'domains/IDENTIFIER/groups/1/roles/2' + ) + + def test_validate_group_has_inherited_role_good(self): + sot = domain.Domain(**EXAMPLE) + resp = self.good_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertTrue( + sot.validate_group_has_role( + self.sess, group.Group(id='1'), role.Role(id='2'), True + ) + ) + + self.sess.head.assert_called_with( + 'OS-INHERIT/domains/IDENTIFIER/groups/1/roles/2/inherited_to_projects' + ) + + def test_validate_group_has_role_bad(self): + sot = domain.Domain(**EXAMPLE) + resp = self.bad_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertFalse( + sot.validate_group_has_role( + self.sess, group.Group(id='1'), role.Role(id='2'), False + ) + ) + + def test_unassign_role_from_group_good(self): + sot = domain.Domain(**EXAMPLE) + resp = self.good_resp + self.sess.delete = mock.Mock(return_value=resp) + + self.assertTrue( + sot.unassign_role_from_group( + self.sess, group.Group(id='1'), role.Role(id='2'), False + ) + ) + + self.sess.delete.assert_called_with( + 'domains/IDENTIFIER/groups/1/roles/2' + ) + + def test_unassign_inherited_role_from_group_good(self): + sot = domain.Domain(**EXAMPLE) + resp = self.good_resp + self.sess.delete = mock.Mock(return_value=resp) + + self.assertTrue( + sot.unassign_role_from_group( + self.sess, group.Group(id='1'), role.Role(id='2'), True + ) + ) + + self.sess.delete.assert_called_with( + 'OS-INHERIT/domains/IDENTIFIER/groups/1/roles/2/inherited_to_projects' + ) + + def test_unassign_role_from_group_bad(self): + sot = domain.Domain(**EXAMPLE) + resp = self.bad_resp + self.sess.delete = mock.Mock(return_value=resp) + + self.assertFalse( + sot.unassign_role_from_group( + self.sess, group.Group(id='1'), role.Role(id='2'), False + ) + ) diff --git a/openstack/tests/unit/identity/v3/test_domain_config.py b/openstack/tests/unit/identity/v3/test_domain_config.py new file mode 100644 index 0000000000..26d774d9a8 --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_domain_config.py @@ -0,0 +1,47 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import domain_config +from openstack.tests.unit import base + + +EXAMPLE = { + 'identity': { + 'driver': 'ldap', + }, + 'ldap': { + 'url': 'ldap://myldap.com:389/', + 'user_tree_dn': 'ou=Users,dc=my_new_root,dc=org', + }, +} + + +class TestDomainConfig(base.TestCase): + def test_basic(self): + sot = domain_config.DomainConfig() + self.assertEqual('config', sot.resource_key) + self.assertEqual('/domains/%(domain_id)s/config', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + + def test_make_it(self): + sot = domain_config.DomainConfig(**EXAMPLE) + self.assertIsInstance(sot.identity, domain_config.DomainConfigDriver) + self.assertEqual(EXAMPLE['identity']['driver'], sot.identity.driver) + self.assertIsInstance(sot.ldap, domain_config.DomainConfigLDAP) + self.assertEqual(EXAMPLE['ldap']['url'], sot.ldap.url) + self.assertEqual( + EXAMPLE['ldap']['user_tree_dn'], + sot.ldap.user_tree_dn, + ) diff --git a/openstack/tests/unit/identity/v3/test_endpoint.py b/openstack/tests/unit/identity/v3/test_endpoint.py index 44af716217..8ee404d21d 100644 --- a/openstack/tests/unit/identity/v3/test_endpoint.py +++ b/openstack/tests/unit/identity/v3/test_endpoint.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.identity.v3 import endpoint +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -26,20 +26,28 @@ } -class TestEndpoint(testtools.TestCase): - +class TestEndpoint(base.TestCase): def test_basic(self): sot = endpoint.Endpoint() self.assertEqual('endpoint', sot.resource_key) self.assertEqual('endpoints', sot.resources_key) self.assertEqual('/endpoints', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) + self.assertEqual('PATCH', sot.commit_method) + self.assertDictEqual( + { + 'interface': 'interface', + 'service_id': 'service_id', + 'region_id': 'region_id', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = endpoint.Endpoint(**EXAMPLE) diff --git a/openstack/tests/unit/identity/v3/test_federation_protocol.py b/openstack/tests/unit/identity/v3/test_federation_protocol.py new file mode 100644 index 0000000000..4aee07cbb0 --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_federation_protocol.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import federation_protocol +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'idp_id': 'example_idp', + 'mapping_id': 'example_mapping', +} + + +class TestFederationProtocol(base.TestCase): + def test_basic(self): + sot = federation_protocol.FederationProtocol() + self.assertEqual('protocol', sot.resource_key) + self.assertEqual('protocols', sot.resources_key) + self.assertEqual( + '/OS-FEDERATION/identity_providers/%(idp_id)s/protocols', + sot.base_path, + ) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.create_exclude_id_from_body) + self.assertEqual('PATCH', sot.commit_method) + self.assertEqual('PUT', sot.create_method) + + self.assertDictEqual( + { + 'id': 'id', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = federation_protocol.FederationProtocol(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['idp_id'], sot.idp_id) + self.assertEqual(EXAMPLE['mapping_id'], sot.mapping_id) diff --git a/openstack/tests/unit/identity/v3/test_group.py b/openstack/tests/unit/identity/v3/test_group.py index ff30c971e4..f57aa4497b 100644 --- a/openstack/tests/unit/identity/v3/test_group.py +++ b/openstack/tests/unit/identity/v3/test_group.py @@ -10,9 +10,14 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools +from unittest import mock + +from keystoneauth1 import adapter from openstack.identity.v3 import group +from openstack.identity.v3 import user +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,20 +28,38 @@ } -class TestGroup(testtools.TestCase): +class TestGroup(base.TestCase): + def setUp(self): + super().setUp() + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = 1 + self.sess._get_connection = mock.Mock(return_value=self.cloud) + self.good_resp = mock.Mock() + self.good_resp.body = None + self.good_resp.json = mock.Mock(return_value=self.good_resp.body) + self.good_resp.status_code = 204 def test_basic(self): sot = group.Group() self.assertEqual('group', sot.resource_key) self.assertEqual('groups', sot.resources_key) self.assertEqual('/groups', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) + self.assertEqual('PATCH', sot.commit_method) + + self.assertDictEqual( + { + 'domain_id': 'domain_id', + 'name': 'name', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = group.Group(**EXAMPLE) @@ -44,3 +67,43 @@ def test_make_it(self): self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) + + def test_add_user(self): + sot = group.Group(**EXAMPLE) + resp = self.good_resp + self.sess.put = mock.Mock(return_value=resp) + + sot.add_user(self.sess, user.User(id='1')) + + self.sess.put.assert_called_with('groups/IDENTIFIER/users/1') + + def test_remove_user(self): + sot = group.Group(**EXAMPLE) + resp = self.good_resp + self.sess.delete = mock.Mock(return_value=resp) + + sot.remove_user(self.sess, user.User(id='1')) + + self.sess.delete.assert_called_with('groups/IDENTIFIER/users/1') + + def test_check_user(self): + sot = group.Group(**EXAMPLE) + resp = self.good_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertTrue(sot.check_user(self.sess, user.User(id='1'))) + + self.sess.head.assert_called_with('groups/IDENTIFIER/users/1') + + +class TestUserGroup(base.TestCase): + def test_basic(self): + sot = group.UserGroup() + self.assertEqual('group', sot.resource_key) + self.assertEqual('groups', sot.resources_key) + self.assertEqual('/users/%(user_id)s/groups', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/identity/v3/test_identity_provider.py b/openstack/tests/unit/identity/v3/test_identity_provider.py new file mode 100644 index 0000000000..e1c3dfdba5 --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_identity_provider.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import identity_provider +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'domain_id': 'example_domain', + 'description': 'An example description', + 'is_enabled': True, + 'remote_ids': ['https://auth.example.com/auth/realms/ExampleRealm'], + 'authorization_ttl': 7, +} + + +class TestIdentityProvider(base.TestCase): + def test_basic(self): + sot = identity_provider.IdentityProvider() + self.assertEqual('identity_provider', sot.resource_key) + self.assertEqual('identity_providers', sot.resources_key) + self.assertEqual('/OS-FEDERATION/identity_providers', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.create_exclude_id_from_body) + self.assertEqual('PATCH', sot.commit_method) + self.assertEqual('PUT', sot.create_method) + + self.assertDictEqual( + { + 'id': 'id', + 'limit': 'limit', + 'marker': 'marker', + 'is_enabled': 'enabled', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = identity_provider.IdentityProvider(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['id'], sot.name) + self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['is_enabled'], sot.is_enabled) + self.assertEqual(EXAMPLE['remote_ids'], sot.remote_ids) + self.assertEqual(EXAMPLE['authorization_ttl'], sot.authorization_ttl) diff --git a/openstack/tests/unit/identity/v3/test_limit.py b/openstack/tests/unit/identity/v3/test_limit.py new file mode 100644 index 0000000000..7e4dc346e2 --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_limit.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import limit +from openstack.tests.unit import base + + +EXAMPLE = { + "service_id": "8ac43bb0926245cead88676a96c750d3", + "region_id": 'RegionOne', + "resource_name": 'cores', + "resource_limit": 10, + "project_id": 'a8455cdd4249498f99b63d5af2fb4bc8', + "description": "compute cores for project 123", + "links": {"self": "http://example.com/v3/limit_1"}, +} + + +class TestLimit(base.TestCase): + def test_basic(self): + sot = limit.Limit() + self.assertEqual('limits', sot.resources_key) + self.assertEqual('/limits', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + self.assertDictEqual( + { + 'service_id': 'service_id', + 'region_id': 'region_id', + 'resource_name': 'resource_name', + 'project_id': 'project_id', + 'marker': 'marker', + 'limit': 'limit', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = limit.Limit(**EXAMPLE) + self.assertEqual(EXAMPLE['service_id'], sot.service_id) + self.assertEqual(EXAMPLE['region_id'], sot.region_id) + self.assertEqual(EXAMPLE['resource_name'], sot.resource_name) + self.assertEqual(EXAMPLE['resource_limit'], sot.resource_limit) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['links'], sot.links) diff --git a/openstack/tests/unit/identity/v3/test_mapping.py b/openstack/tests/unit/identity/v3/test_mapping.py new file mode 100644 index 0000000000..992dcb70c3 --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_mapping.py @@ -0,0 +1,51 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import mapping +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'rules': [{'local': [], 'remote': []}], + 'schema_version': '2.0', +} + + +class TestMapping(base.TestCase): + def test_basic(self): + sot = mapping.Mapping() + self.assertEqual('mapping', sot.resource_key) + self.assertEqual('mappings', sot.resources_key) + self.assertEqual('/OS-FEDERATION/mappings', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + self.assertEqual('PUT', sot.create_method) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = mapping.Mapping(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['rules'], sot.rules) + self.assertEqual(EXAMPLE['schema_version'], sot.schema_version) diff --git a/openstack/tests/unit/identity/v3/test_policy.py b/openstack/tests/unit/identity/v3/test_policy.py index cdd8b71dcb..cb06f40ac2 100644 --- a/openstack/tests/unit/identity/v3/test_policy.py +++ b/openstack/tests/unit/identity/v3/test_policy.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.identity.v3 import policy +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -25,20 +25,18 @@ } -class TestPolicy(testtools.TestCase): - +class TestPolicy(base.TestCase): def test_basic(self): sot = policy.Policy() self.assertEqual('policy', sot.resource_key) self.assertEqual('policies', sot.resources_key) self.assertEqual('/policies', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) + self.assertEqual('PATCH', sot.commit_method) def test_make_it(self): sot = policy.Policy(**EXAMPLE) diff --git a/openstack/tests/unit/identity/v3/test_project.py b/openstack/tests/unit/identity/v3/test_project.py index f88a0ae5e0..664341debc 100644 --- a/openstack/tests/unit/identity/v3/test_project.py +++ b/openstack/tests/unit/identity/v3/test_project.py @@ -10,9 +10,16 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools +from unittest import mock +from keystoneauth1 import adapter + +from openstack.identity.v3 import group from openstack.identity.v3 import project +from openstack.identity.v3 import role +from openstack.identity.v3 import user +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -21,25 +28,57 @@ 'enabled': True, 'id': IDENTIFIER, 'is_domain': False, + 'links': {'self': f'http://example.com/identity/v3/projects/{IDENTIFIER}'}, 'name': '5', 'parent_id': '6', + 'options': {'foo': 'bar'}, } -class TestProject(testtools.TestCase): +class TestProject(base.TestCase): + def setUp(self): + super().setUp() + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = 1 + self.sess._get_connection = mock.Mock(return_value=self.cloud) + self.good_resp = mock.Mock() + self.good_resp.body = None + self.good_resp.json = mock.Mock(return_value=self.good_resp.body) + self.good_resp.status_code = 204 + + self.bad_resp = mock.Mock() + self.bad_resp.body = None + self.bad_resp.json = mock.Mock(return_value=self.bad_resp.body) + self.bad_resp.status_code = 401 def test_basic(self): sot = project.Project() self.assertEqual('project', sot.resource_key) self.assertEqual('projects', sot.resources_key) self.assertEqual('/projects', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) + self.assertEqual('PATCH', sot.commit_method) + + self.assertDictEqual( + { + 'domain_id': 'domain_id', + 'is_domain': 'is_domain', + 'name': 'name', + 'parent_id': 'parent_id', + 'is_enabled': 'enabled', + 'limit': 'limit', + 'marker': 'marker', + 'tags': 'tags', + 'any_tags': 'tags-any', + 'not_tags': 'not-tags', + 'not_any_tags': 'not-tags-any', + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = project.Project(**EXAMPLE) @@ -48,5 +87,264 @@ def test_make_it(self): self.assertFalse(sot.is_domain) self.assertTrue(sot.is_enabled) self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['parent_id'], sot.parent_id) + self.assertDictEqual(EXAMPLE['options'], sot.options) + + def test_assign_role_to_user_good(self): + sot = project.Project(**EXAMPLE) + resp = self.good_resp + self.sess.put = mock.Mock(return_value=resp) + + self.assertTrue( + sot.assign_role_to_user( + self.sess, user.User(id='1'), role.Role(id='2'), False + ) + ) + + self.sess.put.assert_called_with('projects/IDENTIFIER/users/1/roles/2') + + def test_assign_inherited_role_to_user_good(self): + sot = project.Project(**EXAMPLE) + resp = self.good_resp + self.sess.put = mock.Mock(return_value=resp) + + self.assertTrue( + sot.assign_role_to_user( + self.sess, user.User(id='1'), role.Role(id='2'), True + ) + ) + + self.sess.put.assert_called_with( + 'OS-INHERIT/projects/IDENTIFIER/users/1/roles/2/inherited_to_projects' + ) + + def test_assign_role_to_user_bad(self): + sot = project.Project(**EXAMPLE) + resp = self.bad_resp + self.sess.put = mock.Mock(return_value=resp) + + self.assertFalse( + sot.assign_role_to_user( + self.sess, user.User(id='1'), role.Role(id='2'), False + ) + ) + + def test_validate_user_has_role_good(self): + sot = project.Project(**EXAMPLE) + resp = self.good_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertTrue( + sot.validate_user_has_role( + self.sess, user.User(id='1'), role.Role(id='2'), False + ) + ) + + self.sess.head.assert_called_with( + 'projects/IDENTIFIER/users/1/roles/2' + ) + + def test_validate_user_has_inherited_role_good(self): + sot = project.Project(**EXAMPLE) + resp = self.good_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertTrue( + sot.validate_user_has_role( + self.sess, user.User(id='1'), role.Role(id='2'), True + ) + ) + + self.sess.head.assert_called_with( + 'OS-INHERIT/projects/IDENTIFIER/users/1/roles/2/inherited_to_projects' + ) + + def test_validate_user_has_role_bad(self): + sot = project.Project(**EXAMPLE) + resp = self.bad_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertFalse( + sot.validate_user_has_role( + self.sess, user.User(id='1'), role.Role(id='2'), False + ) + ) + + def test_unassign_role_from_user_good(self): + sot = project.Project(**EXAMPLE) + resp = self.good_resp + self.sess.delete = mock.Mock(return_value=resp) + + self.assertTrue( + sot.unassign_role_from_user( + self.sess, user.User(id='1'), role.Role(id='2'), False + ) + ) + + self.sess.delete.assert_called_with( + 'projects/IDENTIFIER/users/1/roles/2' + ) + + def test_unassign_inherited_role_from_user_good(self): + sot = project.Project(**EXAMPLE) + resp = self.good_resp + self.sess.delete = mock.Mock(return_value=resp) + + self.assertTrue( + sot.unassign_role_from_user( + self.sess, user.User(id='1'), role.Role(id='2'), True + ) + ) + + self.sess.delete.assert_called_with( + 'OS-INHERIT/projects/IDENTIFIER/users/1/roles/2/inherited_to_projects' + ) + + def test_unassign_role_from_user_bad(self): + sot = project.Project(**EXAMPLE) + resp = self.bad_resp + self.sess.delete = mock.Mock(return_value=resp) + + self.assertFalse( + sot.unassign_role_from_user( + self.sess, user.User(id='1'), role.Role(id='2'), False + ) + ) + + def test_assign_role_to_group_good(self): + sot = project.Project(**EXAMPLE) + resp = self.good_resp + self.sess.put = mock.Mock(return_value=resp) + + self.assertTrue( + sot.assign_role_to_group( + self.sess, group.Group(id='1'), role.Role(id='2'), False + ) + ) + + self.sess.put.assert_called_with( + 'projects/IDENTIFIER/groups/1/roles/2' + ) + + def test_assign_inherited_role_to_group_good(self): + sot = project.Project(**EXAMPLE) + resp = self.good_resp + self.sess.put = mock.Mock(return_value=resp) + + self.assertTrue( + sot.assign_role_to_group( + self.sess, group.Group(id='1'), role.Role(id='2'), True + ) + ) + + self.sess.put.assert_called_with( + 'OS-INHERIT/projects/IDENTIFIER/groups/1/roles/2/inherited_to_projects' + ) + + def test_assign_role_to_group_bad(self): + sot = project.Project(**EXAMPLE) + resp = self.bad_resp + self.sess.put = mock.Mock(return_value=resp) + + self.assertFalse( + sot.assign_role_to_group( + self.sess, group.Group(id='1'), role.Role(id='2'), False + ) + ) + + def test_validate_group_has_role_good(self): + sot = project.Project(**EXAMPLE) + resp = self.good_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertTrue( + sot.validate_group_has_role( + self.sess, group.Group(id='1'), role.Role(id='2'), False + ) + ) + + self.sess.head.assert_called_with( + 'projects/IDENTIFIER/groups/1/roles/2' + ) + + def test_validate_group_has_inherited_role_good(self): + sot = project.Project(**EXAMPLE) + resp = self.good_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertTrue( + sot.validate_group_has_role( + self.sess, group.Group(id='1'), role.Role(id='2'), True + ) + ) + + self.sess.head.assert_called_with( + 'OS-INHERIT/projects/IDENTIFIER/groups/1/roles/2/inherited_to_projects' + ) + + def test_validate_group_has_role_bad(self): + sot = project.Project(**EXAMPLE) + resp = self.bad_resp + self.sess.head = mock.Mock(return_value=resp) + + self.assertFalse( + sot.validate_group_has_role( + self.sess, group.Group(id='1'), role.Role(id='2'), False + ) + ) + + def test_unassign_role_from_group_good(self): + sot = project.Project(**EXAMPLE) + resp = self.good_resp + self.sess.delete = mock.Mock(return_value=resp) + + self.assertTrue( + sot.unassign_role_from_group( + self.sess, group.Group(id='1'), role.Role(id='2'), False + ) + ) + + self.sess.delete.assert_called_with( + 'projects/IDENTIFIER/groups/1/roles/2' + ) + + def test_unassign_inherited_role_from_group_good(self): + sot = project.Project(**EXAMPLE) + resp = self.good_resp + self.sess.delete = mock.Mock(return_value=resp) + + self.assertTrue( + sot.unassign_role_from_group( + self.sess, group.Group(id='1'), role.Role(id='2'), True + ) + ) + + self.sess.delete.assert_called_with( + 'OS-INHERIT/projects/IDENTIFIER/groups/1/roles/2/inherited_to_projects' + ) + + def test_unassign_role_from_group_bad(self): + sot = project.Project(**EXAMPLE) + resp = self.bad_resp + self.sess.delete = mock.Mock(return_value=resp) + + self.assertFalse( + sot.unassign_role_from_group( + self.sess, group.Group(id='1'), role.Role(id='2'), False + ) + ) + + +class TestUserProject(base.TestCase): + def test_basic(self): + sot = project.UserProject() + self.assertEqual('project', sot.resource_key) + self.assertEqual('projects', sot.resources_key) + self.assertEqual('/users/%(user_id)s/projects', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/identity/v3/test_proxy.py b/openstack/tests/unit/identity/v3/test_proxy.py index aff747264c..fd153d9781 100644 --- a/openstack/tests/unit/identity/v3/test_proxy.py +++ b/openstack/tests/unit/identity/v3/test_proxy.py @@ -10,37 +10,55 @@ # License for the specific language governing permissions and limitations # under the License. +import uuid + from openstack.identity.v3 import _proxy +from openstack.identity.v3 import access_rule from openstack.identity.v3 import credential from openstack.identity.v3 import domain +from openstack.identity.v3 import domain_config from openstack.identity.v3 import endpoint from openstack.identity.v3 import group from openstack.identity.v3 import policy from openstack.identity.v3 import project from openstack.identity.v3 import region from openstack.identity.v3 import role +from openstack.identity.v3 import role_domain_group_assignment +from openstack.identity.v3 import role_domain_user_assignment +from openstack.identity.v3 import role_project_group_assignment +from openstack.identity.v3 import role_project_user_assignment +from openstack.identity.v3 import role_system_group_assignment +from openstack.identity.v3 import role_system_user_assignment from openstack.identity.v3 import service +from openstack.identity.v3 import service_provider from openstack.identity.v3 import trust from openstack.identity.v3 import user -from openstack.tests.unit import test_proxy_base2 +from openstack.tests.unit import test_proxy_base + +USER_ID = 'user-id-' + uuid.uuid4().hex +ENDPOINT_ID = 'endpoint-id-' + uuid.uuid4().hex +PROJECT_ID = 'project-id-' + uuid.uuid4().hex -class TestIdentityProxy(test_proxy_base2.TestProxyBase): +class TestIdentityProxyBase(test_proxy_base.TestProxyBase): def setUp(self): - super(TestIdentityProxy, self).setUp() + super().setUp() self.proxy = _proxy.Proxy(self.session) + +class TestIdentityProxyCredential(TestIdentityProxyBase): def test_credential_create_attrs(self): - self.verify_create(self.proxy.create_credential, - credential.Credential) + self.verify_create(self.proxy.create_credential, credential.Credential) def test_credential_delete(self): - self.verify_delete(self.proxy.delete_credential, - credential.Credential, False) + self.verify_delete( + self.proxy.delete_credential, credential.Credential, False + ) def test_credential_delete_ignore(self): - self.verify_delete(self.proxy.delete_credential, - credential.Credential, True) + self.verify_delete( + self.proxy.delete_credential, credential.Credential, True + ) def test_credential_find(self): self.verify_find(self.proxy.find_credential, credential.Credential) @@ -49,12 +67,13 @@ def test_credential_get(self): self.verify_get(self.proxy.get_credential, credential.Credential) def test_credentials(self): - self.verify_list(self.proxy.credentials, credential.Credential, - paginated=False) + self.verify_list(self.proxy.credentials, credential.Credential) def test_credential_update(self): self.verify_update(self.proxy.update_credential, credential.Credential) + +class TestIdentityProxyDomain(TestIdentityProxyBase): def test_domain_create_attrs(self): self.verify_create(self.proxy.create_domain, domain.Domain) @@ -71,21 +90,92 @@ def test_domain_get(self): self.verify_get(self.proxy.get_domain, domain.Domain) def test_domains(self): - self.verify_list(self.proxy.domains, domain.Domain, paginated=False) + self.verify_list(self.proxy.domains, domain.Domain) def test_domain_update(self): self.verify_update(self.proxy.update_domain, domain.Domain) + +class TestIdentityProxyDomainConfig(TestIdentityProxyBase): + def test_domain_config_create_attrs(self): + self.verify_create( + self.proxy.create_domain_config, + domain_config.DomainConfig, + method_args=['domain_id'], + method_kwargs={}, + expected_args=[], + expected_kwargs={ + 'domain_id': 'domain_id', + }, + ) + + def test_domain_config_delete(self): + self.verify_delete( + self.proxy.delete_domain_config, + domain_config.DomainConfig, + ignore_missing=False, + method_args=['domain_id'], + method_kwargs={}, + expected_args=[None], + expected_kwargs={ + 'domain_id': 'domain_id', + }, + ) + + def test_domain_config_delete_ignore(self): + self.verify_delete( + self.proxy.delete_domain_config, + domain_config.DomainConfig, + ignore_missing=True, + method_args=['domain_id'], + method_kwargs={}, + expected_args=[None], + expected_kwargs={ + 'domain_id': 'domain_id', + }, + ) + + # no find_domain_config + + def test_domain_config_get(self): + self.verify_get( + self.proxy.get_domain_config, + domain_config.DomainConfig, + method_args=['domain_id'], + method_kwargs={}, + expected_args=[], + expected_kwargs={ + 'domain_id': 'domain_id', + 'requires_id': False, + }, + ) + + # no domain_configs + + def test_domain_config_update(self): + self.verify_update( + self.proxy.update_domain_config, + domain_config.DomainConfig, + method_args=['domain_id'], + method_kwargs={}, + expected_args=[None], + expected_kwargs={ + 'domain_id': 'domain_id', + }, + ) + + +class TestIdentityProxyEndpoint(TestIdentityProxyBase): def test_endpoint_create_attrs(self): self.verify_create(self.proxy.create_endpoint, endpoint.Endpoint) def test_endpoint_delete(self): - self.verify_delete(self.proxy.delete_endpoint, - endpoint.Endpoint, False) + self.verify_delete( + self.proxy.delete_endpoint, endpoint.Endpoint, False + ) def test_endpoint_delete_ignore(self): - self.verify_delete(self.proxy.delete_endpoint, - endpoint.Endpoint, True) + self.verify_delete(self.proxy.delete_endpoint, endpoint.Endpoint, True) def test_endpoint_find(self): self.verify_find(self.proxy.find_endpoint, endpoint.Endpoint) @@ -94,12 +184,21 @@ def test_endpoint_get(self): self.verify_get(self.proxy.get_endpoint, endpoint.Endpoint) def test_endpoints(self): - self.verify_list(self.proxy.endpoints, endpoint.Endpoint, - paginated=False) + self.verify_list(self.proxy.endpoints, endpoint.Endpoint) + + def test_project_endpoints(self): + self.verify_list( + self.proxy.project_endpoints, + endpoint.ProjectEndpoint, + method_kwargs={'project': PROJECT_ID}, + expected_kwargs={'project_id': PROJECT_ID}, + ) def test_endpoint_update(self): self.verify_update(self.proxy.update_endpoint, endpoint.Endpoint) + +class TestIdentityProxyGroup(TestIdentityProxyBase): def test_group_create_attrs(self): self.verify_create(self.proxy.create_group, group.Group) @@ -116,11 +215,54 @@ def test_group_get(self): self.verify_get(self.proxy.get_group, group.Group) def test_groups(self): - self.verify_list(self.proxy.groups, group.Group, paginated=False) + self.verify_list(self.proxy.groups, group.Group) def test_group_update(self): self.verify_update(self.proxy.update_group, group.Group) + def test_add_user_to_group(self): + self._verify( + "openstack.identity.v3.group.Group.add_user", + self.proxy.add_user_to_group, + method_args=['uid', 'gid'], + expected_args=[ + self.proxy, + self.proxy._get_resource(user.User, 'uid'), + ], + ) + + def test_remove_user_from_group(self): + self._verify( + "openstack.identity.v3.group.Group.remove_user", + self.proxy.remove_user_from_group, + method_args=['uid', 'gid'], + expected_args=[ + self.proxy, + self.proxy._get_resource(user.User, 'uid'), + ], + ) + + def test_check_user_in_group(self): + self._verify( + "openstack.identity.v3.group.Group.check_user", + self.proxy.check_user_in_group, + method_args=['uid', 'gid'], + expected_args=[ + self.proxy, + self.proxy._get_resource(user.User, 'uid'), + ], + ) + + def test_group_users(self): + self.verify_list( + self.proxy.group_users, + user.User, + method_kwargs={"group": 'group', "attrs": 1}, + expected_kwargs={"attrs": 1}, + ) + + +class TestIdentityProxyPolicy(TestIdentityProxyBase): def test_policy_create_attrs(self): self.verify_create(self.proxy.create_policy, policy.Policy) @@ -137,11 +279,13 @@ def test_policy_get(self): self.verify_get(self.proxy.get_policy, policy.Policy) def test_policies(self): - self.verify_list(self.proxy.policies, policy.Policy, paginated=False) + self.verify_list(self.proxy.policies, policy.Policy) def test_policy_update(self): self.verify_update(self.proxy.update_policy, policy.Policy) + +class TestIdentityProxyProject(TestIdentityProxyBase): def test_project_create_attrs(self): self.verify_create(self.proxy.create_project, project.Project) @@ -158,11 +302,45 @@ def test_project_get(self): self.verify_get(self.proxy.get_project, project.Project) def test_projects(self): - self.verify_list(self.proxy.projects, project.Project, paginated=False) + self.verify_list(self.proxy.projects, project.Project) + + def test_user_projects(self): + self.verify_list( + self.proxy.user_projects, + project.UserProject, + method_kwargs={'user': USER_ID}, + expected_kwargs={'user_id': USER_ID}, + ) + + def test_endpoint_projects(self): + self.verify_list( + self.proxy.endpoint_projects, + project.EndpointProject, + method_kwargs={'endpoint': ENDPOINT_ID}, + expected_kwargs={'endpoint_id': ENDPOINT_ID}, + ) def test_project_update(self): self.verify_update(self.proxy.update_project, project.Project) + def test_project_associate_endpoint(self): + self._verify( + 'openstack.identity.v3.project.Project.associate_endpoint', + self.proxy.associate_endpoint_with_project, + method_args=['project_id', 'endpoint_id'], + expected_args=[self.proxy, 'endpoint_id'], + ) + + def test_project_disassociate_endpoint(self): + self._verify( + 'openstack.identity.v3.project.Project.disassociate_endpoint', + self.proxy.disassociate_endpoint_from_project, + method_args=['project_id', 'endpoint_id'], + expected_args=[self.proxy, 'endpoint_id'], + ) + + +class TestIdentityProxyService(TestIdentityProxyBase): def test_service_create_attrs(self): self.verify_create(self.proxy.create_service, service.Service) @@ -179,11 +357,13 @@ def test_service_get(self): self.verify_get(self.proxy.get_service, service.Service) def test_services(self): - self.verify_list(self.proxy.services, service.Service, paginated=False) + self.verify_list(self.proxy.services, service.Service) def test_service_update(self): self.verify_update(self.proxy.update_service, service.Service) + +class TestIdentityProxyUser(TestIdentityProxyBase): def test_user_create_attrs(self): self.verify_create(self.proxy.create_user, user.User) @@ -200,11 +380,53 @@ def test_user_get(self): self.verify_get(self.proxy.get_user, user.User) def test_users(self): - self.verify_list(self.proxy.users, user.User, paginated=False) + self.verify_list(self.proxy.users, user.User) def test_user_update(self): self.verify_update(self.proxy.update_user, user.User) + def test_user_groups(self): + self.verify_list( + self.proxy.user_groups, + group.UserGroup, + method_kwargs={"user": 'user'}, + expected_kwargs={"user_id": "user"}, + ) + + +class TestIdentityProxyToken(TestIdentityProxyBase): + def test_token_validate(self): + self._verify( + "openstack.identity.v3.token.Token.validate", + self.proxy.validate_token, + method_args=['token'], + method_kwargs={'nocatalog': False, 'allow_expired': False}, + expected_args=[self.proxy, 'token'], + expected_kwargs={'nocatalog': False, 'allow_expired': False}, + ) + + def test_token_check(self): + self._verify( + "openstack.identity.v3.token.Token.check", + self.proxy.check_token, + method_args=['token'], + method_kwargs={'allow_expired': False}, + expected_args=[self.proxy, 'token'], + expected_kwargs={'allow_expired': False}, + ) + + def test_token_revoke(self): + self._verify( + "openstack.identity.v3.token.Token.revoke", + self.proxy.revoke_token, + method_args=['token'], + method_kwargs={}, + expected_args=[self.proxy, 'token'], + expected_kwargs={}, + ) + + +class TestIdentityProxyTrust(TestIdentityProxyBase): def test_trust_create_attrs(self): self.verify_create(self.proxy.create_trust, trust.Trust) @@ -221,8 +443,10 @@ def test_trust_get(self): self.verify_get(self.proxy.get_trust, trust.Trust) def test_trusts(self): - self.verify_list(self.proxy.trusts, trust.Trust, paginated=False) + self.verify_list(self.proxy.trusts, trust.Trust) + +class TestIdentityProxyRegion(TestIdentityProxyBase): def test_region_create_attrs(self): self.verify_create(self.proxy.create_region, region.Region) @@ -239,11 +463,13 @@ def test_region_get(self): self.verify_get(self.proxy.get_region, region.Region) def test_regions(self): - self.verify_list(self.proxy.regions, region.Region, paginated=False) + self.verify_list(self.proxy.regions, region.Region) def test_region_update(self): self.verify_update(self.proxy.update_region, region.Region) + +class TestIdentityProxyRole(TestIdentityProxyBase): def test_role_create_attrs(self): self.verify_create(self.proxy.create_role, role.Role) @@ -260,7 +486,402 @@ def test_role_get(self): self.verify_get(self.proxy.get_role, role.Role) def test_roles(self): - self.verify_list(self.proxy.roles, role.Role, paginated=False) + self.verify_list(self.proxy.roles, role.Role) def test_role_update(self): self.verify_update(self.proxy.update_role, role.Role) + + +class TestIdentityProxyRoleAssignments(TestIdentityProxyBase): + def test_role_assignments_filter__domain_user(self): + self.verify_list( + self.proxy.role_assignments_filter, + role_domain_user_assignment.RoleDomainUserAssignment, + method_kwargs={'domain': 'domain', 'user': 'user'}, + expected_kwargs={ + 'domain_id': 'domain', + 'user_id': 'user', + }, + ) + + def test_role_assignments_filter__domain_group(self): + self.verify_list( + self.proxy.role_assignments_filter, + role_domain_group_assignment.RoleDomainGroupAssignment, + method_kwargs={'domain': 'domain', 'group': 'group'}, + expected_kwargs={ + 'domain_id': 'domain', + 'group_id': 'group', + }, + ) + + def test_role_assignments_filter__project_user(self): + self.verify_list( + self.proxy.role_assignments_filter, + role_project_user_assignment.RoleProjectUserAssignment, + method_kwargs={'project': 'project', 'user': 'user'}, + expected_kwargs={ + 'project_id': 'project', + 'user_id': 'user', + }, + ) + + def test_role_assignments_filter__project_group(self): + self.verify_list( + self.proxy.role_assignments_filter, + role_project_group_assignment.RoleProjectGroupAssignment, + method_kwargs={'project': 'project', 'group': 'group'}, + expected_kwargs={ + 'project_id': 'project', + 'group_id': 'group', + }, + ) + + def test_role_assignments_filter__system_user(self): + self.verify_list( + self.proxy.role_assignments_filter, + role_system_user_assignment.RoleSystemUserAssignment, + method_kwargs={'system': 'system', 'user': 'user'}, + expected_kwargs={ + 'system_id': 'system', + 'user_id': 'user', + }, + ) + + def test_role_assignments_filter__system_group(self): + self.verify_list( + self.proxy.role_assignments_filter, + role_system_group_assignment.RoleSystemGroupAssignment, + method_kwargs={'system': 'system', 'group': 'group'}, + expected_kwargs={ + 'system_id': 'system', + 'group_id': 'group', + }, + ) + + def test_assign_domain_role_to_user(self): + self._verify( + "openstack.identity.v3.domain.Domain.assign_role_to_user", + self.proxy.assign_domain_role_to_user, + method_args=['dom_id'], + method_kwargs={'user': 'uid', 'role': 'rid'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(user.User, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + False, + ], + ) + + def test_unassign_domain_role_from_user(self): + self._verify( + "openstack.identity.v3.domain.Domain.unassign_role_from_user", + self.proxy.unassign_domain_role_from_user, + method_args=['dom_id'], + method_kwargs={'user': 'uid', 'role': 'rid'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(user.User, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + False, + ], + ) + + def test_validate_user_has_domain_role(self): + self._verify( + "openstack.identity.v3.domain.Domain.validate_user_has_role", + self.proxy.validate_user_has_domain_role, + method_args=['dom_id'], + method_kwargs={'user': 'uid', 'role': 'rid'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(user.User, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + False, + ], + ) + + def test_assign_domain_role_to_group(self): + self._verify( + "openstack.identity.v3.domain.Domain.assign_role_to_group", + self.proxy.assign_domain_role_to_group, + method_args=['dom_id'], + method_kwargs={'group': 'uid', 'role': 'rid'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(group.Group, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + False, + ], + ) + + def test_unassign_domain_role_from_group(self): + self._verify( + "openstack.identity.v3.domain.Domain.unassign_role_from_group", + self.proxy.unassign_domain_role_from_group, + method_args=['dom_id'], + method_kwargs={'group': 'uid', 'role': 'rid'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(group.Group, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + False, + ], + ) + + def test_validate_group_has_domain_role(self): + self._verify( + "openstack.identity.v3.domain.Domain.validate_group_has_role", + self.proxy.validate_group_has_domain_role, + method_args=['dom_id'], + method_kwargs={'group': 'uid', 'role': 'rid'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(group.Group, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + False, + ], + ) + + def test_assign_project_role_to_user(self): + self._verify( + "openstack.identity.v3.project.Project.assign_role_to_user", + self.proxy.assign_project_role_to_user, + method_args=['dom_id'], + method_kwargs={'user': 'uid', 'role': 'rid'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(user.User, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + False, + ], + ) + + def test_unassign_project_role_from_user(self): + self._verify( + "openstack.identity.v3.project.Project.unassign_role_from_user", + self.proxy.unassign_project_role_from_user, + method_args=['dom_id'], + method_kwargs={'user': 'uid', 'role': 'rid'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(user.User, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + False, + ], + ) + + def test_validate_user_has_project_role(self): + self._verify( + "openstack.identity.v3.project.Project.validate_user_has_role", + self.proxy.validate_user_has_project_role, + method_args=['dom_id'], + method_kwargs={'user': 'uid', 'role': 'rid'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(user.User, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + False, + ], + ) + + def test_assign_project_role_to_group(self): + self._verify( + "openstack.identity.v3.project.Project.assign_role_to_group", + self.proxy.assign_project_role_to_group, + method_args=['dom_id'], + method_kwargs={'group': 'uid', 'role': 'rid'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(group.Group, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + False, + ], + ) + + def test_unassign_project_role_from_group(self): + self._verify( + "openstack.identity.v3.project.Project.unassign_role_from_group", + self.proxy.unassign_project_role_from_group, + method_args=['dom_id'], + method_kwargs={'group': 'uid', 'role': 'rid'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(group.Group, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + False, + ], + ) + + def test_validate_group_has_project_role(self): + self._verify( + "openstack.identity.v3.project.Project.validate_group_has_role", + self.proxy.validate_group_has_project_role, + method_args=['dom_id'], + method_kwargs={'group': 'uid', 'role': 'rid'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(group.Group, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + False, + ], + ) + + def test_assign_system_role_to_user(self): + self._verify( + "openstack.identity.v3.system.System.assign_role_to_user", + self.proxy.assign_system_role_to_user, + method_kwargs={'user': 'uid', 'role': 'rid', 'system': 'all'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(user.User, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + ], + ) + + def test_unassign_system_role_from_user(self): + self._verify( + "openstack.identity.v3.system.System.unassign_role_from_user", + self.proxy.unassign_system_role_from_user, + method_kwargs={'user': 'uid', 'role': 'rid', 'system': 'all'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(user.User, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + ], + ) + + def test_validate_user_has_system_role(self): + self._verify( + "openstack.identity.v3.system.System.validate_user_has_role", + self.proxy.validate_user_has_system_role, + method_kwargs={'user': 'uid', 'role': 'rid', 'system': 'all'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(user.User, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + ], + ) + + def test_assign_system_role_to_group(self): + self._verify( + "openstack.identity.v3.system.System.assign_role_to_group", + self.proxy.assign_system_role_to_group, + method_kwargs={'group': 'uid', 'role': 'rid', 'system': 'all'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(group.Group, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + ], + ) + + def test_unassign_system_role_from_group(self): + self._verify( + "openstack.identity.v3.system.System.unassign_role_from_group", + self.proxy.unassign_system_role_from_group, + method_kwargs={'group': 'uid', 'role': 'rid', 'system': 'all'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(group.Group, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + ], + ) + + def test_validate_group_has_system_role(self): + self._verify( + "openstack.identity.v3.system.System.validate_group_has_role", + self.proxy.validate_group_has_system_role, + method_kwargs={'group': 'uid', 'role': 'rid', 'system': 'all'}, + expected_args=[ + self.proxy, + self.proxy._get_resource(group.Group, 'uid'), + self.proxy._get_resource(role.Role, 'rid'), + ], + ) + + +class TestAccessRule(TestIdentityProxyBase): + def test_access_rule_delete(self): + self.verify_delete( + self.proxy.delete_access_rule, + access_rule.AccessRule, + False, + method_args=[], + method_kwargs={'user': USER_ID, 'access_rule': 'access_rule'}, + expected_args=['access_rule'], + expected_kwargs={'user_id': USER_ID}, + ) + + def test_access_rule_delete_ignore(self): + self.verify_delete( + self.proxy.delete_access_rule, + access_rule.AccessRule, + True, + method_args=[], + method_kwargs={'user': USER_ID, 'access_rule': 'access_rule'}, + expected_args=['access_rule'], + expected_kwargs={'user_id': USER_ID}, + ) + + def test_access_rule_get(self): + self.verify_get( + self.proxy.get_access_rule, + access_rule.AccessRule, + method_args=[], + method_kwargs={'user': USER_ID, 'access_rule': 'access_rule'}, + expected_args=['access_rule'], + expected_kwargs={'user_id': USER_ID}, + ) + + def test_access_rules(self): + self.verify_list( + self.proxy.access_rules, + access_rule.AccessRule, + method_kwargs={'user': USER_ID}, + expected_kwargs={'user_id': USER_ID}, + ) + + +class TestServiceProvider(TestIdentityProxyBase): + def test_service_provider_create(self): + self.verify_create( + self.proxy.create_service_provider, + service_provider.ServiceProvider, + ) + + def test_service_provider_delete(self): + self.verify_delete( + self.proxy.delete_service_provider, + service_provider.ServiceProvider, + False, + ) + + def test_service_provider_delete_ignore(self): + self.verify_delete( + self.proxy.delete_service_provider, + service_provider.ServiceProvider, + True, + ) + + def test_service_provider_find(self): + self.verify_find( + self.proxy.find_service_provider, service_provider.ServiceProvider + ) + + def test_service_provider_get(self): + self.verify_get( + self.proxy.get_service_provider, + service_provider.ServiceProvider, + ) + + def test_service_providers(self): + self.verify_list( + self.proxy.service_providers, + service_provider.ServiceProvider, + ) + + def test_service_provider_update(self): + self.verify_update( + self.proxy.update_service_provider, + service_provider.ServiceProvider, + ) diff --git a/openstack/tests/unit/identity/v3/test_region.py b/openstack/tests/unit/identity/v3/test_region.py index 01711d0ea3..c1eb2cadae 100644 --- a/openstack/tests/unit/identity/v3/test_region.py +++ b/openstack/tests/unit/identity/v3/test_region.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.identity.v3 import region +from openstack.tests.unit import base + IDENTIFIER = 'RegionOne' EXAMPLE = { @@ -23,20 +23,27 @@ } -class TestRegion(testtools.TestCase): - +class TestRegion(base.TestCase): def test_basic(self): sot = region.Region() self.assertEqual('region', sot.resource_key) self.assertEqual('regions', sot.resources_key) self.assertEqual('/regions', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) + self.assertEqual('PATCH', sot.commit_method) + + self.assertDictEqual( + { + 'parent_region_id': 'parent_region_id', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = region.Region(**EXAMPLE) diff --git a/openstack/tests/unit/identity/v3/test_registered_limit.py b/openstack/tests/unit/identity/v3/test_registered_limit.py new file mode 100644 index 0000000000..3a64d7936a --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_registered_limit.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import registered_limit +from openstack.tests.unit import base + + +EXAMPLE = { + "service_id": "8ac43bb0926245cead88676a96c750d3", + "region_id": 'RegionOne', + "resource_name": 'cores', + "default_limit": 10, + "description": "compute cores", + "links": {"self": "http://example.com/v3/registered_limit_1"}, +} + + +class TestRegistered_limit(base.TestCase): + def test_basic(self): + sot = registered_limit.RegisteredLimit() + self.assertEqual('registered_limit', sot.resource_key) + self.assertEqual('registered_limits', sot.resources_key) + self.assertEqual('/registered_limits', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertEqual('PATCH', sot.commit_method) + + self.assertDictEqual( + { + 'service_id': 'service_id', + 'region_id': 'region_id', + 'resource_name': 'resource_name', + 'marker': 'marker', + 'limit': 'limit', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = registered_limit.RegisteredLimit(**EXAMPLE) + self.assertEqual(EXAMPLE['service_id'], sot.service_id) + self.assertEqual(EXAMPLE['region_id'], sot.region_id) + self.assertEqual(EXAMPLE['resource_name'], sot.resource_name) + self.assertEqual(EXAMPLE['default_limit'], sot.default_limit) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['links'], sot.links) diff --git a/openstack/tests/unit/identity/v3/test_role.py b/openstack/tests/unit/identity/v3/test_role.py index ef1663ffbe..d8d6b86019 100644 --- a/openstack/tests/unit/identity/v3/test_role.py +++ b/openstack/tests/unit/identity/v3/test_role.py @@ -10,37 +10,49 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.identity.v3 import role +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { - 'domain_id': '1', 'id': IDENTIFIER, 'links': {'self': 'http://example.com/user1'}, 'name': '2', + 'description': 'test description for role', + 'domain_id': 'default', + 'options': {'immutable': True}, } -class TestRole(testtools.TestCase): - +class TestRole(base.TestCase): def test_basic(self): sot = role.Role() self.assertEqual('role', sot.resource_key) self.assertEqual('roles', sot.resources_key) self.assertEqual('/roles', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertTrue(sot.put_create) + self.assertEqual('PATCH', sot.commit_method) + + self.assertDictEqual( + { + 'domain_id': 'domain_id', + 'name': 'name', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = role.Role(**EXAMPLE) - self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) + self.assertEqual(EXAMPLE['options'], sot.options) diff --git a/openstack/tests/unit/identity/v3/test_role_assignment.py b/openstack/tests/unit/identity/v3/test_role_assignment.py new file mode 100644 index 0000000000..63e8d29bef --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_role_assignment.py @@ -0,0 +1,59 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import role_assignment +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'links': {'self': 'http://example.com/user1'}, + 'scope': {'domain': {'id': '2'}}, + 'user': {'id': '3'}, + 'group': {'id': '4'}, +} + + +class TestRoleAssignment(base.TestCase): + def test_basic(self): + sot = role_assignment.RoleAssignment() + self.assertEqual('role_assignment', sot.resource_key) + self.assertEqual('role_assignments', sot.resources_key) + self.assertEqual('/role_assignments', sot.base_path) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + 'group_id': 'group.id', + 'role_id': 'role.id', + 'scope_domain_id': 'scope.domain.id', + 'scope_project_id': 'scope.project.id', + 'scope_system': 'scope.system', + 'user_id': 'user.id', + 'effective': 'effective', + 'inherited_to': 'scope.OS-INHERIT:inherited_to', + 'include_names': 'include_names', + 'include_subtree': 'include_subtree', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = role_assignment.RoleAssignment(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['scope'], sot.scope) + self.assertEqual(EXAMPLE['user'], sot.user) + self.assertEqual(EXAMPLE['group'], sot.group) diff --git a/openstack/tests/unit/identity/v3/test_role_domain_group_assignment.py b/openstack/tests/unit/identity/v3/test_role_domain_group_assignment.py new file mode 100644 index 0000000000..7e4a848775 --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_role_domain_group_assignment.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import role_domain_group_assignment +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'links': {'self': 'http://example.com/user1'}, + 'name': '2', + 'domain_id': '3', + 'group_id': '4', +} + + +class TestRoleDomainGroupAssignment(base.TestCase): + def test_basic(self): + sot = role_domain_group_assignment.RoleDomainGroupAssignment() + self.assertEqual('role', sot.resource_key) + self.assertEqual('roles', sot.resources_key) + self.assertEqual( + '/domains/%(domain_id)s/groups/%(group_id)s/roles', sot.base_path + ) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = role_domain_group_assignment.RoleDomainGroupAssignment(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) + self.assertEqual(EXAMPLE['group_id'], sot.group_id) diff --git a/openstack/tests/unit/identity/v3/test_role_domain_user_assignment.py b/openstack/tests/unit/identity/v3/test_role_domain_user_assignment.py new file mode 100644 index 0000000000..8e5a4901cb --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_role_domain_user_assignment.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import role_domain_user_assignment +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'links': {'self': 'http://example.com/user1'}, + 'name': '2', + 'domain_id': '3', + 'user_id': '4', +} + + +class TestRoleDomainUserAssignment(base.TestCase): + def test_basic(self): + sot = role_domain_user_assignment.RoleDomainUserAssignment() + self.assertEqual('role', sot.resource_key) + self.assertEqual('roles', sot.resources_key) + self.assertEqual( + '/domains/%(domain_id)s/users/%(user_id)s/roles', sot.base_path + ) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = role_domain_user_assignment.RoleDomainUserAssignment(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) + self.assertEqual(EXAMPLE['user_id'], sot.user_id) diff --git a/openstack/tests/unit/identity/v3/test_role_project_group_assignment.py b/openstack/tests/unit/identity/v3/test_role_project_group_assignment.py new file mode 100644 index 0000000000..08926e82d2 --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_role_project_group_assignment.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import role_project_group_assignment +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'links': {'self': 'http://example.com/user1'}, + 'name': '2', + 'project_id': '3', + 'group_id': '4', +} + + +class TestRoleProjectGroupAssignment(base.TestCase): + def test_basic(self): + sot = role_project_group_assignment.RoleProjectGroupAssignment() + self.assertEqual('role', sot.resource_key) + self.assertEqual('roles', sot.resources_key) + self.assertEqual( + '/projects/%(project_id)s/groups/%(group_id)s/roles', sot.base_path + ) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = role_project_group_assignment.RoleProjectGroupAssignment( + **EXAMPLE + ) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['group_id'], sot.group_id) diff --git a/openstack/tests/unit/identity/v3/test_role_project_user_assignment.py b/openstack/tests/unit/identity/v3/test_role_project_user_assignment.py new file mode 100644 index 0000000000..3c72969abe --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_role_project_user_assignment.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import role_project_user_assignment +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'links': {'self': 'http://example.com/user1'}, + 'name': '2', + 'project_id': '3', + 'user_id': '4', +} + + +class TestRoleProjectUserAssignment(base.TestCase): + def test_basic(self): + sot = role_project_user_assignment.RoleProjectUserAssignment() + self.assertEqual('role', sot.resource_key) + self.assertEqual('roles', sot.resources_key) + self.assertEqual( + '/projects/%(project_id)s/users/%(user_id)s/roles', sot.base_path + ) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = role_project_user_assignment.RoleProjectUserAssignment(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['user_id'], sot.user_id) diff --git a/openstack/tests/unit/identity/v3/test_role_system_group_assignment.py b/openstack/tests/unit/identity/v3/test_role_system_group_assignment.py new file mode 100644 index 0000000000..7a1b3a4113 --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_role_system_group_assignment.py @@ -0,0 +1,33 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import role_system_group_assignment +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = {'id': IDENTIFIER, 'name': '2', 'group_id': '4'} + + +class TestRoleSystemGroupAssignment(base.TestCase): + def test_basic(self): + sot = role_system_group_assignment.RoleSystemGroupAssignment() + self.assertEqual('role', sot.resource_key) + self.assertEqual('roles', sot.resources_key) + self.assertEqual('/system/groups/%(group_id)s/roles', sot.base_path) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = role_system_group_assignment.RoleSystemGroupAssignment(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['group_id'], sot.group_id) diff --git a/openstack/tests/unit/identity/v3/test_role_system_user_assignment.py b/openstack/tests/unit/identity/v3/test_role_system_user_assignment.py new file mode 100644 index 0000000000..00547f9f02 --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_role_system_user_assignment.py @@ -0,0 +1,32 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import role_system_user_assignment +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = {'id': IDENTIFIER, 'name': '2', 'user_id': '4'} + + +class TestRoleSystemUserAssignment(base.TestCase): + def test_basic(self): + sot = role_system_user_assignment.RoleSystemUserAssignment() + self.assertEqual('role', sot.resource_key) + self.assertEqual('roles', sot.resources_key) + self.assertEqual('/system/users/%(user_id)s/roles', sot.base_path) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = role_system_user_assignment.RoleSystemUserAssignment(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) diff --git a/openstack/tests/unit/identity/v3/test_service.py b/openstack/tests/unit/identity/v3/test_service.py index 848c847f4b..a957c957a1 100644 --- a/openstack/tests/unit/identity/v3/test_service.py +++ b/openstack/tests/unit/identity/v3/test_service.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.identity.v3 import service +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -25,20 +25,28 @@ } -class TestService(testtools.TestCase): - +class TestService(base.TestCase): def test_basic(self): sot = service.Service() self.assertEqual('service', sot.resource_key) self.assertEqual('services', sot.resources_key) self.assertEqual('/services', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) + self.assertEqual('PATCH', sot.commit_method) + + self.assertDictEqual( + { + 'name': 'name', + 'type': 'type', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = service.Service(**EXAMPLE) diff --git a/openstack/tests/unit/identity/v3/test_service_provider.py b/openstack/tests/unit/identity/v3/test_service_provider.py new file mode 100644 index 0000000000..068b6afd4a --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_service_provider.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.identity.v3 import service_provider +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'description': 'An example description', + 'is_enabled': True, + 'auth_url': ( + "https://auth.example.com/v3/OS-FEDERATION/" + "identity_providers/idp/protocols/saml2/auth" + ), + 'sp_url': 'https://auth.example.com/Shibboleth.sso/SAML2/ECP', +} + + +class TestServiceProvider(base.TestCase): + def test_basic(self): + sot = service_provider.ServiceProvider() + self.assertEqual('service_provider', sot.resource_key) + self.assertEqual('service_providers', sot.resources_key) + self.assertEqual('/OS-FEDERATION/service_providers', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.create_exclude_id_from_body) + self.assertEqual('PATCH', sot.commit_method) + self.assertEqual('PUT', sot.create_method) + + self.assertDictEqual( + { + 'id': 'id', + 'limit': 'limit', + 'marker': 'marker', + 'is_enabled': 'enabled', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = service_provider.ServiceProvider(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['id'], sot.name) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['is_enabled'], sot.is_enabled) + self.assertEqual(EXAMPLE['auth_url'], sot.auth_url) + self.assertEqual(EXAMPLE['sp_url'], sot.sp_url) diff --git a/openstack/tests/unit/identity/v3/test_token.py b/openstack/tests/unit/identity/v3/test_token.py new file mode 100644 index 0000000000..bc78f3c430 --- /dev/null +++ b/openstack/tests/unit/identity/v3/test_token.py @@ -0,0 +1,198 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack import exceptions +from openstack.identity.v3 import token +from openstack.tests.unit import base + +IDENTIFIER = 'IDENTIFIER' +TOKEN_DATA = { + 'audit_ids': ['VcxU2JEMTjufVx7sVk7bPw'], + 'catalog': [ + { + 'endpoints': [ + { + 'id': '068d1b359ee84b438266cb736d81de97', + 'interface': 'public', + 'region': 'RegionOne', + 'region_id': 'RegionOne', + 'url': 'http://example.com/v2.1', + } + ], + 'id': '050726f278654128aba89757ae25950c', + 'name': 'nova', + 'type': 'compute', + } + ], + 'domain': {'id': 'default', 'name': 'Default'}, + 'expires_at': '2013-02-27T18:30:59.999999Z', + 'issued_at': '2013-02-27T16:30:59.999999Z', + 'methods': ['password'], + 'project': { + 'domain': {'id': 'default', 'name': 'Default'}, + 'id': '8538a3f13f9541b28c2620eb19065e45', + 'name': 'admin', + }, + 'roles': [{'id': 'c703057be878458588961ce9a0ce686b', 'name': 'admin'}], + 'system': {'all': True}, + 'user': { + 'domain': {'id': 'default', 'name': 'Default'}, + 'id': '10a2e6e717a245d9acad3e5f97aeca3d', + 'name': 'admin', + 'password_expires_at': None, + }, + 'is_domain': False, +} + +EXAMPLE = {'token': TOKEN_DATA} + + +class TestToken(base.TestCase): + def setUp(self): + super().setUp() + self.session = mock.Mock(spec=adapter.Adapter) + + def test_basic(self): + sot = token.Token() + self.assertEqual('token', sot.resource_key) + self.assertEqual('/auth/tokens', sot.base_path) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) + self.assertFalse(sot.allow_head) + + def test_make_it(self): + sot = token.Token(**TOKEN_DATA) + self.assertEqual(TOKEN_DATA['audit_ids'], sot.audit_ids) + self.assertEqual(TOKEN_DATA['catalog'], sot.catalog) + self.assertEqual(TOKEN_DATA['expires_at'], sot.expires_at) + self.assertEqual(TOKEN_DATA['issued_at'], sot.issued_at) + self.assertEqual(TOKEN_DATA['methods'], sot.methods) + self.assertEqual(TOKEN_DATA['user'], sot.user) + self.assertEqual(TOKEN_DATA['project'], sot.project) + self.assertEqual(TOKEN_DATA['domain'], sot.domain) + self.assertEqual(TOKEN_DATA['is_domain'], sot.is_domain) + self.assertEqual(TOKEN_DATA['system'], sot.system) + self.assertEqual(TOKEN_DATA['roles'], sot.roles) + + def test_validate(self): + response = mock.Mock() + response.status_code = 200 + response.json.return_value = EXAMPLE + response.headers = {'content-type': 'application/json'} + self.session.get.return_value = response + + result = token.Token.validate(self.session, 'token') + + self.session.get.assert_called_once_with( + '/auth/tokens', headers={'x-subject-token': 'token'}, params={} + ) + self.assertIsInstance(result, token.Token) + + def test_validate_with_params(self): + response = mock.Mock() + response.status_code = 200 + response.json.return_value = EXAMPLE + response.headers = {'content-type': 'application/json'} + self.session.get.return_value = response + + result = token.Token.validate( + self.session, 'token', nocatalog=True, allow_expired=True + ) + + self.session.get.assert_called_once_with( + '/auth/tokens', + headers={'x-subject-token': 'token'}, + params={'nocatalog': True, 'allow_expired': True}, + ) + self.assertIsInstance(result, token.Token) + + def test_validate_error(self): + response = mock.Mock() + response.status_code = 404 + response.json.return_value = {} + response.headers = {'content-type': 'application/json'} + self.session.get.return_value = response + + self.assertRaises( + exceptions.NotFoundException, + token.Token.validate, + self.session, + 'token', + ) + + def test_check(self): + response = mock.Mock() + response.status_code = 200 + self.session.head.return_value = response + + result = token.Token.check(self.session, 'token') + + self.session.head.assert_called_once_with( + '/auth/tokens', headers={'x-subject-token': 'token'}, params={} + ) + self.assertTrue(result) + + def test_check_with_param(self): + response = mock.Mock() + response.status_code = 200 + self.session.head.return_value = response + + result = token.Token.check(self.session, 'token', allow_expired=True) + + self.session.head.assert_called_once_with( + '/auth/tokens', + headers={'x-subject-token': 'token'}, + params={'allow_expired': True}, + ) + self.assertTrue(result) + + def test_check_invalid_token(self): + response = mock.Mock() + response.status_code = 404 + self.session.head.return_value = response + + result = token.Token.check(self.session, 'token') + + self.session.head.assert_called_once_with( + '/auth/tokens', headers={'x-subject-token': 'token'}, params={} + ) + self.assertFalse(result) + + def test_revoke(self): + response = mock.Mock() + response.status_code = 204 + self.session.delete.return_value = response + + token.Token.revoke(self.session, 'token') + + self.session.delete.assert_called_once_with( + '/auth/tokens', headers={'x-subject-token': 'token'} + ) + + def test_revoke_error(self): + response = mock.Mock() + response.status_code = 404 + response.json.return_value = {} + response.headers = {'content-type': 'application/json'} + self.session.delete.return_value = response + + self.assertRaises( + exceptions.NotFoundException, + token.Token.revoke, + self.session, + 'token', + ) diff --git a/openstack/tests/unit/identity/v3/test_trust.py b/openstack/tests/unit/identity/v3/test_trust.py index de5c07c6c6..9dfe2d4c0d 100644 --- a/openstack/tests/unit/identity/v3/test_trust.py +++ b/openstack/tests/unit/identity/v3/test_trust.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.identity.v3 import trust +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -32,16 +32,14 @@ } -class TestTrust(testtools.TestCase): - +class TestTrust(base.TestCase): def test_basic(self): sot = trust.Trust() self.assertEqual('trust', sot.resource_key) self.assertEqual('trusts', sot.resources_key) self.assertEqual('/OS-TRUST/trusts', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) + self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -54,8 +52,9 @@ def test_make_it(self): self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['role_links'], sot.role_links) - self.assertEqual(EXAMPLE['redelegated_trust_id'], - sot.redelegated_trust_id) + self.assertEqual( + EXAMPLE['redelegated_trust_id'], sot.redelegated_trust_id + ) self.assertEqual(EXAMPLE['remaining_uses'], sot.remaining_uses) self.assertEqual(EXAMPLE['trustee_user_id'], sot.trustee_user_id) self.assertEqual(EXAMPLE['trustor_user_id'], sot.trustor_user_id) diff --git a/openstack/tests/unit/identity/v3/test_user.py b/openstack/tests/unit/identity/v3/test_user.py index f0202b901f..070279d931 100644 --- a/openstack/tests/unit/identity/v3/test_user.py +++ b/openstack/tests/unit/identity/v3/test_user.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.identity.v3 import user +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -29,25 +29,34 @@ } -class TestUser(testtools.TestCase): - +class TestUser(base.TestCase): def test_basic(self): sot = user.User() self.assertEqual('user', sot.resource_key) self.assertEqual('users', sot.resources_key) self.assertEqual('/users', sot.base_path) - self.assertEqual('identity', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertTrue(sot.patch_update) + self.assertEqual('PATCH', sot.commit_method) + + self.assertDictEqual( + { + 'domain_id': 'domain_id', + 'name': 'name', + 'password_expires_at': 'password_expires_at', + 'is_enabled': 'enabled', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = user.User(**EXAMPLE) - self.assertEqual(EXAMPLE['default_project_id'], - sot.default_project_id) + self.assertEqual(EXAMPLE['default_project_id'], sot.default_project_id) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['domain_id'], sot.domain_id) self.assertEqual(EXAMPLE['email'], sot.email) @@ -56,5 +65,6 @@ def test_make_it(self): self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['password'], sot.password) - self.assertEqual(EXAMPLE['password_expires_at'], - sot.password_expires_at) + self.assertEqual( + EXAMPLE['password_expires_at'], sot.password_expires_at + ) diff --git a/openstack/tests/unit/image/test_image_service.py b/openstack/tests/unit/image/test_image_service.py deleted file mode 100644 index adc641255f..0000000000 --- a/openstack/tests/unit/image/test_image_service.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.image import image_service - - -class TestImageService(testtools.TestCase): - - def test_service(self): - sot = image_service.ImageService() - self.assertEqual('image', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(2, len(sot.valid_versions)) - self.assertEqual('v2', sot.valid_versions[0].module) - self.assertEqual('v2', sot.valid_versions[0].path) - self.assertEqual('v1', sot.valid_versions[1].module) - self.assertEqual('v1', sot.valid_versions[1].path) diff --git a/openstack/tests/unit/image/v1/test_image.py b/openstack/tests/unit/image/v1/test_image.py index a9fb33257e..75df92d7a9 100644 --- a/openstack/tests/unit/image/v1/test_image.py +++ b/openstack/tests/unit/image/v1/test_image.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.image.v1 import image +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -36,22 +36,20 @@ } -class TestImage(testtools.TestCase): - +class TestImage(base.TestCase): def test_basic(self): sot = image.Image() self.assertEqual('image', sot.resource_key) self.assertEqual('images', sot.resources_key) self.assertEqual('/images', sot.base_path) - self.assertEqual('image', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_retrieve) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): - sot = image.Image(EXAMPLE) + sot = image.Image(**EXAMPLE) self.assertEqual(EXAMPLE['checksum'], sot.checksum) self.assertEqual(EXAMPLE['container_format'], sot.container_format) self.assertEqual(EXAMPLE['copy_from'], sot.copy_from) @@ -62,6 +60,7 @@ def test_make_it(self): self.assertEqual(EXAMPLE['min_disk'], sot.min_disk) self.assertEqual(EXAMPLE['min_ram'], sot.min_ram) self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['owner'], sot.owner) self.assertEqual(EXAMPLE['owner'], sot.owner_id) self.assertEqual(EXAMPLE['properties'], sot.properties) self.assertTrue(sot.is_protected) diff --git a/openstack/tests/unit/image/v1/test_proxy.py b/openstack/tests/unit/image/v1/test_proxy.py index e8be295608..155fb579aa 100644 --- a/openstack/tests/unit/image/v1/test_proxy.py +++ b/openstack/tests/unit/image/v1/test_proxy.py @@ -12,12 +12,12 @@ from openstack.image.v1 import _proxy from openstack.image.v1 import image -from openstack.tests.unit import test_proxy_base +from openstack.tests.unit import test_proxy_base as test_proxy_base class TestImageProxy(test_proxy_base.TestProxyBase): def setUp(self): - super(TestImageProxy, self).setUp() + super().setUp() self.proxy = _proxy.Proxy(self.session) def test_image_upload_attrs(self): @@ -36,7 +36,7 @@ def test_image_get(self): self.verify_get(self.proxy.get_image, image.Image) def test_images(self): - self.verify_list(self.proxy.images, image.Image, paginated=True) + self.verify_list(self.proxy.images, image.Image) def test_image_update(self): self.verify_update(self.proxy.update_image, image.Image) diff --git a/openstack/tests/unit/image/v2/test_cache.py b/openstack/tests/unit/image/v2/test_cache.py new file mode 100644 index 0000000000..458f361dfe --- /dev/null +++ b/openstack/tests/unit/image/v2/test_cache.py @@ -0,0 +1,90 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack import exceptions +from openstack.image.v2 import cache +from openstack.tests.unit import base + + +EXAMPLE = { + 'cached_images': [ + { + 'hits': 0, + 'image_id': '1a56983c-f71f-490b-a7ac-6b321a18935a', + 'last_accessed': 1671699579.444378, + 'last_modified': 1671699579.444378, + 'size': 0, + }, + ], + 'queued_images': [ + '3a4560a1-e585-443e-9b39-553b46ec92d1', + '6f99bf80-2ee6-47cf-acfe-1f1fabb7e810', + ], +} + + +class TestCache(base.TestCase): + def test_basic(self): + sot = cache.Cache() + self.assertIsNone(sot.resource_key) + self.assertEqual('/cache', sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_delete) + + def test_make_it(self): + sot = cache.Cache(**EXAMPLE) + self.assertEqual( + [cache.CachedImage(**e) for e in EXAMPLE['cached_images']], + sot.cached_images, + ) + self.assertEqual(EXAMPLE['queued_images'], sot.queued_images) + + @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) + def test_queue(self): + sot = cache.Cache() + sess = mock.Mock() + sess.put = mock.Mock() + sess.default_microversion = '2.14' + + sot.queue(sess, image='image_id') + + sess.put.assert_called_with( + 'cache/image_id', microversion=sess.default_microversion + ) + + @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) + def test_clear(self): + sot = cache.Cache(**EXAMPLE) + session = mock.Mock() + session.delete = mock.Mock() + + sot.clear(session) + session.delete.assert_called_with('/cache', headers={}) + + sot.clear(session, 'both') + session.delete.assert_called_with('/cache', headers={}) + + sot.clear(session, 'cache') + session.delete.assert_called_with( + '/cache', headers={'x-image-cache-clear-target': 'cache'} + ) + + sot.clear(session, 'queue') + session.delete.assert_called_with( + '/cache', headers={'x-image-cache-clear-target': 'queue'} + ) + + self.assertRaises( + exceptions.InvalidRequest, sot.clear, session, 'invalid' + ) diff --git a/openstack/tests/unit/image/v2/test_image.py b/openstack/tests/unit/image/v2/test_image.py index 1e3d674c6b..f548d476b1 100644 --- a/openstack/tests/unit/image/v2/test_image.py +++ b/openstack/tests/unit/image/v2/test_image.py @@ -10,11 +10,20 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +import hashlib +import io +import operator +import os +import tempfile +from unittest import mock +from keystoneauth1 import adapter +import requests + +from openstack import _log from openstack import exceptions from openstack.image.v2 import image +from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -27,11 +36,17 @@ 'min_disk': 5, 'name': '6', 'owner': '7', - 'properties': {'a': 'z', 'b': 'y', }, + 'properties': { + 'a': 'z', + 'b': 'y', + }, 'protected': False, 'status': '8', 'tags': ['g', 'h', 'i'], 'updated_at': '2015-03-09T12:15:57.233772', + 'os_hash_algo': 'sha512', + 'os_hash_value': '073b4523583784fbe01daff81eba092a262ec3', + 'os_hidden': False, 'virtual_size': '10', 'visibility': '11', 'location': '12', @@ -40,12 +55,10 @@ 'file': '15', 'locations': ['15', '16'], 'direct_url': '17', - 'path': '18', - 'value': '19', 'url': '20', 'metadata': {'21': '22'}, 'architecture': '23', - 'hypervisor-type': '24', + 'hypervisor_type': '24', 'instance_type_rxtx_factor': 25.1, 'instance_uuid': '26', 'img_config_drive': '27', @@ -74,31 +87,86 @@ 'vmware_ostype': '48', 'auto_disk_config': True, 'os_type': '49', + 'os_admin_user': 'ubuntu', + 'hw_qemu_guest_agent': 'yes', + 'os_require_quiesce': True, } -class TestImage(testtools.TestCase): +def calculate_md5_checksum(data): + checksum = hashlib.md5(usedforsecurity=False) + for chunk in data: + checksum.update(chunk) + return checksum.hexdigest() + + +class FakeResponse: + def __init__(self, response, status_code=200, headers=None, reason=None): + self.body = response + self.content = response + self.text = response + self.status_code = status_code + headers = headers if headers else {'content-type': 'application/json'} + self.headers = requests.structures.CaseInsensitiveDict(headers) + if reason: + self.reason = reason + # for the sake of "list" response faking + self.links = [] + def json(self): + return self.body + + +class TestImage(base.TestCase): def setUp(self): - super(TestImage, self).setUp() + super().setUp() self.resp = mock.Mock() self.resp.body = None self.resp.json = mock.Mock(return_value=self.resp.body) - self.sess = mock.Mock() + self.sess = mock.Mock(spec=adapter.Adapter) self.sess.post = mock.Mock(return_value=self.resp) + self.sess.put = mock.Mock(return_value=FakeResponse({})) + self.sess.delete = mock.Mock(return_value=FakeResponse({})) + self.sess.get = mock.Mock(return_value=FakeResponse({})) + self.sess.default_microversion = None + self.sess.retriable_status_codes = None + self.sess.log = _log.setup_logging('openstack') def test_basic(self): sot = image.Image() self.assertIsNone(sot.resource_key) self.assertEqual('images', sot.resources_key) self.assertEqual('/images', sot.base_path) - self.assertEqual('image', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) + self.assertDictEqual( + { + 'created_at': 'created_at', + 'id': 'id', + 'is_hidden': 'os_hidden', + 'limit': 'limit', + 'marker': 'marker', + 'member_status': 'member_status', + 'name': 'name', + 'owner': 'owner', + 'protected': 'protected', + 'size_max': 'size_max', + 'size_min': 'size_min', + 'sort': 'sort', + 'sort_dir': 'sort_dir', + 'sort_key': 'sort_key', + 'status': 'status', + 'tag': 'tag', + 'updated_at': 'updated_at', + 'visibility': 'visibility', + }, + sot._query_mapping._mapping, + ) + def test_make_it(self): sot = image.Image(**EXAMPLE) self.assertEqual(IDENTIFIER, sot.id) @@ -108,12 +176,16 @@ def test_make_it(self): self.assertEqual(EXAMPLE['disk_format'], sot.disk_format) self.assertEqual(EXAMPLE['min_disk'], sot.min_disk) self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['owner'], sot.owner) self.assertEqual(EXAMPLE['owner'], sot.owner_id) self.assertEqual(EXAMPLE['properties'], sot.properties) self.assertFalse(sot.is_protected) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['tags'], sot.tags) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE['os_hash_algo'], sot.hash_algo) + self.assertEqual(EXAMPLE['os_hash_value'], sot.hash_value) + self.assertEqual(EXAMPLE['os_hidden'], sot.is_hidden) self.assertEqual(EXAMPLE['virtual_size'], sot.virtual_size) self.assertEqual(EXAMPLE['visibility'], sot.visibility) self.assertEqual(EXAMPLE['size'], sot.size) @@ -121,14 +193,13 @@ def test_make_it(self): self.assertEqual(EXAMPLE['file'], sot.file) self.assertEqual(EXAMPLE['locations'], sot.locations) self.assertEqual(EXAMPLE['direct_url'], sot.direct_url) - self.assertEqual(EXAMPLE['path'], sot.path) - self.assertEqual(EXAMPLE['value'], sot.value) self.assertEqual(EXAMPLE['url'], sot.url) self.assertEqual(EXAMPLE['metadata'], sot.metadata) self.assertEqual(EXAMPLE['architecture'], sot.architecture) - self.assertEqual(EXAMPLE['hypervisor-type'], sot.hypervisor_type) - self.assertEqual(EXAMPLE['instance_type_rxtx_factor'], - sot.instance_type_rxtx_factor) + self.assertEqual(EXAMPLE['hypervisor_type'], sot.hypervisor_type) + self.assertEqual( + EXAMPLE['instance_type_rxtx_factor'], sot.instance_type_rxtx_factor + ) self.assertEqual(EXAMPLE['instance_uuid'], sot.instance_uuid) self.assertEqual(EXAMPLE['img_config_drive'], sot.needs_config_drive) self.assertEqual(EXAMPLE['kernel_id'], sot.kernel_id) @@ -144,133 +215,557 @@ def test_make_it(self): self.assertEqual(EXAMPLE['hw_rng_model'], sot.hw_rng_model) self.assertEqual(EXAMPLE['hw_machine_type'], sot.hw_machine_type) self.assertEqual(EXAMPLE['hw_scsi_model'], sot.hw_scsi_model) - self.assertEqual(EXAMPLE['hw_serial_port_count'], - sot.hw_serial_port_count) + self.assertEqual( + EXAMPLE['hw_serial_port_count'], sot.hw_serial_port_count + ) self.assertEqual(EXAMPLE['hw_video_model'], sot.hw_video_model) self.assertEqual(EXAMPLE['hw_video_ram'], sot.hw_video_ram) self.assertEqual(EXAMPLE['hw_watchdog_action'], sot.hw_watchdog_action) self.assertEqual(EXAMPLE['os_command_line'], sot.os_command_line) self.assertEqual(EXAMPLE['hw_vif_model'], sot.hw_vif_model) - self.assertEqual(EXAMPLE['hw_vif_multiqueue_enabled'], - sot.is_hw_vif_multiqueue_enabled) + self.assertEqual( + EXAMPLE['hw_vif_multiqueue_enabled'], + sot.is_hw_vif_multiqueue_enabled, + ) self.assertEqual(EXAMPLE['hw_boot_menu'], sot.is_hw_boot_menu_enabled) self.assertEqual(EXAMPLE['vmware_adaptertype'], sot.vmware_adaptertype) self.assertEqual(EXAMPLE['vmware_ostype'], sot.vmware_ostype) self.assertEqual(EXAMPLE['auto_disk_config'], sot.has_auto_disk_config) self.assertEqual(EXAMPLE['os_type'], sot.os_type) + self.assertEqual(EXAMPLE['os_admin_user'], sot.os_admin_user) + self.assertEqual( + EXAMPLE['hw_qemu_guest_agent'], sot.hw_qemu_guest_agent + ) + self.assertEqual(EXAMPLE['os_require_quiesce'], sot.os_require_quiesce) def test_deactivate(self): sot = image.Image(**EXAMPLE) self.assertIsNone(sot.deactivate(self.sess)) self.sess.post.assert_called_with( 'images/IDENTIFIER/actions/deactivate', - endpoint_filter=sot.service) + ) def test_reactivate(self): sot = image.Image(**EXAMPLE) self.assertIsNone(sot.reactivate(self.sess)) self.sess.post.assert_called_with( 'images/IDENTIFIER/actions/reactivate', - endpoint_filter=sot.service) + ) def test_add_tag(self): sot = image.Image(**EXAMPLE) tag = "lol" - self.assertIsNone(sot.add_tag(self.sess, tag)) + sot.add_tag(self.sess, tag) self.sess.put.assert_called_with( - 'images/IDENTIFIER/tags/%s' % tag, - endpoint_filter=sot.service) + f'images/IDENTIFIER/tags/{tag}', + microversion=None, + ) def test_remove_tag(self): sot = image.Image(**EXAMPLE) tag = "lol" - self.assertIsNone(sot.remove_tag(self.sess, tag)) + sot.remove_tag(self.sess, tag) self.sess.delete.assert_called_with( - 'images/IDENTIFIER/tags/%s' % tag, - endpoint_filter=sot.service) + f'images/IDENTIFIER/tags/{tag}', + microversion=None, + ) - def test_upload(self): + def test_import_image(self): sot = image.Image(**EXAMPLE) + json = {"method": {"name": "web-download", "uri": "such-a-good-uri"}} + sot.import_image(self.sess, "web-download", uri="such-a-good-uri") + self.sess.post.assert_called_with( + 'images/IDENTIFIER/import', headers={}, json=json + ) - self.assertIsNone(sot.upload(self.sess)) - self.sess.put.assert_called_with('images/IDENTIFIER/file', - endpoint_filter=sot.service, - data=sot.data, - headers={"Content-Type": - "application/octet-stream", - "Accept": ""}) - - def test_download_checksum_match(self): + def test_import_image_with_uri_not_web_download(self): sot = image.Image(**EXAMPLE) - resp = mock.Mock() - resp.content = b"abc" - resp.headers = {"Content-MD5": "900150983cd24fb0d6963f7d28e17f72"} - self.sess.get.return_value = resp + sot.import_image(self.sess, "glance-direct") + self.sess.post.assert_called_with( + 'images/IDENTIFIER/import', + headers={}, + json={"method": {"name": "glance-direct"}}, + ) - rv = sot.download(self.sess) - self.sess.get.assert_called_with('images/IDENTIFIER/file', - endpoint_filter=sot.service) + def test_import_image_with_store(self): + sot = image.Image(**EXAMPLE) + json = { + "method": { + "name": "web-download", + "uri": "such-a-good-uri", + }, + "stores": ["ceph_1"], + } + store = mock.MagicMock() + store.id = "ceph_1" + sot.import_image( + self.sess, + "web-download", + uri="such-a-good-uri", + store=store, + ) + self.sess.post.assert_called_with( + 'images/IDENTIFIER/import', + headers={'X-Image-Meta-Store': 'ceph_1'}, + json=json, + ) - self.assertEqual(rv, resp.content) + def test_import_image_with_stores(self): + sot = image.Image(**EXAMPLE) + json = { + "method": { + "name": "web-download", + "uri": "such-a-good-uri", + }, + "stores": ["ceph_1"], + } + store = mock.MagicMock() + store.id = "ceph_1" + sot.import_image( + self.sess, + "web-download", + uri="such-a-good-uri", + stores=[store], + ) + self.sess.post.assert_called_with( + 'images/IDENTIFIER/import', + headers={}, + json=json, + ) - def test_download_checksum_mismatch(self): + def test_import_image_with_all_stores(self): sot = image.Image(**EXAMPLE) + json = { + "method": { + "name": "web-download", + "uri": "such-a-good-uri", + }, + "all_stores": True, + } + sot.import_image( + self.sess, + "web-download", + uri="such-a-good-uri", + all_stores=True, + ) + self.sess.post.assert_called_with( + 'images/IDENTIFIER/import', + headers={}, + json=json, + ) - resp = mock.Mock() - resp.content = b"abc" - resp.headers = {"Content-MD5": "the wrong checksum"} - self.sess.get.return_value = resp + def test_upload(self): + sot = image.Image(**EXAMPLE) - self.assertRaises(exceptions.InvalidResponse, sot.download, self.sess) + self.assertIsNotNone(sot.upload(self.sess)) + self.sess.put.assert_called_with( + 'images/IDENTIFIER/file', + data=sot.data, + headers={"Content-Type": "application/octet-stream", "Accept": ""}, + ) + + def test_stage(self): + sot = image.Image(**EXAMPLE) + + self.assertIsNotNone(sot.stage(self.sess)) + self.sess.put.assert_called_with( + 'images/IDENTIFIER/stage', + data=sot.data, + headers={"Content-Type": "application/octet-stream", "Accept": ""}, + ) - def test_download_no_checksum_header(self): + def test_stage_error(self): sot = image.Image(**EXAMPLE) - resp1 = mock.Mock() - resp1.content = b"abc" - resp1.headers = {"no_checksum_here": ""} + self.sess.put.return_value = FakeResponse("dummy", status_code=400) + self.assertRaises(exceptions.SDKException, sot.stage, self.sess) - resp2 = mock.Mock() - resp2.json = mock.Mock( - return_value={"checksum": "900150983cd24fb0d6963f7d28e17f72"}) - resp2.headers = {"": ""} + def test_download_checksum_match(self): + expected_hash = hashlib.sha512(b"abc").hexdigest() + example_with_hash = EXAMPLE.copy() + example_with_hash['os_hash_value'] = expected_hash + sot = image.Image(**example_with_hash) + + resp1 = FakeResponse(example_with_hash) + resp2 = FakeResponse( + b"abc", + headers={"Content-Type": "application/octet-stream"}, + ) self.sess.get.side_effect = [resp1, resp2] rv = sot.download(self.sess) self.sess.get.assert_has_calls( - [mock.call('images/IDENTIFIER/file', endpoint_filter=sot.service), - mock.call('images/IDENTIFIER', endpoint_filter=sot.service)]) + [ + mock.call( + 'images/IDENTIFIER', + microversion=None, + params={}, + skip_cache=False, + ), + mock.call('images/IDENTIFIER/file', stream=False), + ] + ) + + self.assertEqual(rv, resp2) + + def test_download_checksum_mismatch(self): + example_with_wrong_hash = EXAMPLE.copy() + example_with_wrong_hash['os_hash_value'] = "wrong_hash_value" + sot = image.Image(**example_with_wrong_hash) - self.assertEqual(rv, resp1.content) + resp1 = FakeResponse(example_with_wrong_hash) + resp2 = FakeResponse( + b"abc", + headers={"Content-Type": "application/octet-stream"}, + ) - def test_download_no_checksum_at_all2(self): - sot = image.Image(**EXAMPLE) + self.sess.get.side_effect = [resp1, resp2] + + self.assertRaises(exceptions.InvalidResponse, sot.download, self.sess) + + def test_download_md5_fallback(self): + expected_md5 = hashlib.md5(b"abc", usedforsecurity=False).hexdigest() + example_md5_only = EXAMPLE.copy() + example_md5_only['os_hash_algo'] = None + example_md5_only['os_hash_value'] = None + example_md5_only['checksum'] = expected_md5 + sot = image.Image(**example_md5_only) + + resp1 = FakeResponse(example_md5_only) + resp2 = FakeResponse( + b"abc", headers={"Content-Type": "application/octet-stream"} + ) + + self.sess.get.side_effect = [resp1, resp2] - resp1 = mock.Mock() - resp1.content = b"abc" - resp1.headers = {"no_checksum_here": ""} + rv = sot.download(self.sess) + self.sess.get.assert_has_calls( + [ + mock.call( + 'images/IDENTIFIER', + microversion=None, + params={}, + skip_cache=False, + ), + mock.call('images/IDENTIFIER/file', stream=False), + ] + ) + + self.assertEqual(rv, resp2) - resp2 = mock.Mock() - resp2.json = mock.Mock(return_value={"checksum": None}) - resp2.headers = {"": ""} + def test_download_no_checksum_at_all2(self): + # No hash available at all + example_no_hash = EXAMPLE.copy() + example_no_hash['os_hash_algo'] = None + example_no_hash['os_hash_value'] = None + example_no_hash['checksum'] = None + sot = image.Image(**example_no_hash) + + resp1 = FakeResponse(example_no_hash) + resp2 = FakeResponse( + b"abc", headers={"Content-Type": "application/octet-stream"} + ) self.sess.get.side_effect = [resp1, resp2] - with self.assertLogs(logger=image.__name__, level="WARNING") as log: + with self.assertLogs(logger='openstack', level="WARNING") as log: rv = sot.download(self.sess) - self.assertEqual(len(log.records), 1, - "Too many warnings were logged") self.assertEqual( - "Unable to verify the integrity of image IDENTIFIER", - log.records[0].msg) + len(log.records), 1, "Too many warnings were logged" + ) + self.assertEqual( + "Unable to verify the integrity of image %s " + "- no hash available", + log.records[0].msg, + ) + self.assertEqual((sot.id,), log.records[0].args) + + self.sess.get.assert_has_calls( + [ + mock.call( + 'images/IDENTIFIER', + microversion=None, + params={}, + skip_cache=False, + ), + mock.call('images/IDENTIFIER/file', stream=False), + ] + ) + + self.assertEqual(rv, resp2) + + def test_download_stream(self): + expected_hash = hashlib.sha512(b"abc").hexdigest() + example_with_hash = EXAMPLE.copy() + example_with_hash['os_hash_value'] = expected_hash + sot = image.Image(**example_with_hash) + + resp1 = FakeResponse(example_with_hash) + resp2 = FakeResponse( + b"abc", + headers={"Content-Type": "application/octet-stream"}, + ) + + self.sess.get.side_effect = [resp1, resp2] + rv = sot.download(self.sess, stream=True) self.sess.get.assert_has_calls( - [mock.call('images/IDENTIFIER/file', endpoint_filter=sot.service), - mock.call('images/IDENTIFIER', endpoint_filter=sot.service)]) + [ + mock.call( + 'images/IDENTIFIER', + microversion=None, + params={}, + skip_cache=False, + ), + mock.call('images/IDENTIFIER/file', stream=True), + ] + ) + + self.assertEqual(rv, resp2) + self.assertIsNone(rv.headers.get('content-md5')) + + def test_image_download_output_fd(self): + output_file = io.BytesIO() + expected_hash = hashlib.sha512(b'0102').hexdigest() + example_with_hash = EXAMPLE.copy() + example_with_hash['os_hash_value'] = expected_hash + sot = image.Image(**example_with_hash) + + fetch_response = FakeResponse(example_with_hash) + response = mock.Mock() + response.status_code = 200 + response.iter_content.return_value = [b'01', b'02'] + response.headers = {} + + self.sess.get = mock.Mock(side_effect=[fetch_response, response]) + sot.download(self.sess, output=output_file) + output_file.seek(0) + self.assertEqual(b'0102', output_file.read()) + + def test_image_download_output_file(self): + expected_hash = hashlib.sha512(b'0102').hexdigest() + example_with_hash = EXAMPLE.copy() + example_with_hash['os_hash_value'] = expected_hash + sot = image.Image(**example_with_hash) + + fetch_response = FakeResponse(example_with_hash) + response = mock.Mock() + response.status_code = 200 + response.iter_content.return_value = [b'01', b'02'] + response.headers = {} + + self.sess.get = mock.Mock(side_effect=[fetch_response, response]) + + output_file = tempfile.NamedTemporaryFile(delete=False) + output_file.close() + try: + sot.download(self.sess, output=output_file.name) + with open(output_file.name, 'rb') as fd: + self.assertEqual(b'0102', fd.read()) + finally: + os.unlink(output_file.name) + + def test_download_secure_hash_sha256(self): + expected_hash = hashlib.sha256(b"abc").hexdigest() + example_with_sha256 = EXAMPLE.copy() + example_with_sha256['os_hash_algo'] = 'sha256' + example_with_sha256['os_hash_value'] = expected_hash + sot = image.Image(**example_with_sha256) + + resp1 = FakeResponse(example_with_sha256) + resp2 = FakeResponse( + b"abc", + headers={"Content-Type": "application/octet-stream"}, + ) + + self.sess.get.side_effect = [resp1, resp2] + + rv = sot.download(self.sess) + self.assertEqual(rv, resp2) + + def test_download_secure_hash_sha384(self): + expected_hash = hashlib.sha384(b"abc").hexdigest() + example_with_sha384 = EXAMPLE.copy() + example_with_sha384['os_hash_algo'] = 'sha384' + example_with_sha384['os_hash_value'] = expected_hash + sot = image.Image(**example_with_sha384) + + resp1 = FakeResponse(example_with_sha384) + resp2 = FakeResponse( + b"abc", + headers={"Content-Type": "application/octet-stream"}, + ) + + self.sess.get.side_effect = [resp1, resp2] + + rv = sot.download(self.sess) + self.assertEqual(rv, resp2) + + def test_download_content_md5_header_ignored(self): + correct_hash = hashlib.sha512(b"abc").hexdigest() + example_with_hash = EXAMPLE.copy() + example_with_hash['os_hash_value'] = correct_hash + sot = image.Image(**example_with_hash) + + resp1 = FakeResponse(example_with_hash) + resp2 = FakeResponse( + b"abc", + headers={ + "Content-MD5": "wrong_header_hash_that_should_be_ignored", + "Content-Type": "application/octet-stream", + }, + ) + + self.sess.get.side_effect = [resp1, resp2] + + # Succeeds since only metadata hash is used + rv = sot.download(self.sess) + self.assertEqual(rv, resp2) + + def test_download_secure_hash_mismatch_sha512(self): + example_with_wrong_hash = EXAMPLE.copy() + example_with_wrong_hash['os_hash_value'] = "wrong_sha512_hash" + sot = image.Image(**example_with_wrong_hash) + + resp1 = FakeResponse(example_with_wrong_hash) + resp2 = FakeResponse( + b"abc", + headers={"Content-Type": "application/octet-stream"}, + ) + + self.sess.get.side_effect = [resp1, resp2] + + self.assertRaises(exceptions.InvalidResponse, sot.download, self.sess) + + def test_download_md5_fallback_mismatch(self): + example_md5_only = EXAMPLE.copy() + example_md5_only['os_hash_algo'] = None + example_md5_only['os_hash_value'] = None + example_md5_only['checksum'] = "wrong_md5_checksum" + sot = image.Image(**example_md5_only) - self.assertEqual(rv, resp1.content) + resp1 = FakeResponse(example_md5_only) + resp2 = FakeResponse( + b"abc", + headers={"Content-Type": "application/octet-stream"}, + ) + + self.sess.get.side_effect = [resp1, resp2] + + self.assertRaises(exceptions.InvalidResponse, sot.download, self.sess) + + def test_download_unsupported_hash_algo_raises(self): + example_unsupported = EXAMPLE.copy() + example_unsupported['os_hash_algo'] = 'unsupported_algo' + example_unsupported['os_hash_value'] = 'some_hash_value' + sot = image.Image(**example_unsupported) + + resp1 = FakeResponse(example_unsupported) + resp2 = FakeResponse( + b"abc", + headers={"Content-Type": "application/octet-stream"}, + ) + + self.sess.get.side_effect = [resp1, resp2] + + self.assertRaises(exceptions.SDKException, sot.download, self.sess) + + def test_download_unsupported_hash_algo_falls_back_to_md5(self): + correct_md5 = hashlib.md5(b"abc", usedforsecurity=False).hexdigest() + example_unsupported = EXAMPLE.copy() + example_unsupported['os_hash_algo'] = 'ancient_hash_algo' + example_unsupported['os_hash_value'] = 'irrelevant_hash' + example_unsupported['checksum'] = correct_md5 + sot = image.Image(**example_unsupported) + + resp1 = FakeResponse(example_unsupported) + resp2 = FakeResponse( + b"abc", + headers={"Content-Type": "application/octet-stream"}, + ) + + self.sess.get.side_effect = [resp1, resp2] + + rv = sot.download(self.sess) + self.assertEqual(rv, resp2) + + def test_image_update(self): + values = EXAMPLE.copy() + del values['instance_uuid'] + sot = image.Image.existing(**values) + # Let the translate pass through, that portion is tested elsewhere + sot._translate_response = mock.Mock() + + resp = mock.Mock() + resp.content = b"abc" + headers = { + 'Content-Type': 'application/openstack-images-v2.1-json-patch', + 'Accept': '', + } + resp.headers = headers + resp.status_code = 200 + self.sess.patch.return_value = resp + + value = [ + {"value": "fake_name", "op": "replace", "path": "/name"}, + {"value": "fake_value", "op": "add", "path": "/instance_uuid"}, + ] + + sot.name = 'fake_name' + sot.instance_uuid = 'fake_value' + sot.commit(self.sess) + url = 'images/' + IDENTIFIER + self.sess.patch.assert_called_once() + call = self.sess.patch.call_args + call_args, call_kwargs = call + self.assertEqual(url, call_args[0]) + self.assertEqual( + sorted(value, key=operator.itemgetter('value')), + sorted(call_kwargs['json'], key=operator.itemgetter('value')), + ) + + def test_image_find(self): + sot = image.Image() + + self.sess._get_connection = mock.Mock(return_value=self.cloud) + self.sess.get.side_effect = [ + # First fetch by name + FakeResponse(None, 404, headers={}, reason='dummy'), + # Then list with no results + FakeResponse({'images': []}), + # And finally new list of hidden images with one searched + FakeResponse({'images': [EXAMPLE]}), + ] + + result = sot.find(self.sess, EXAMPLE['name']) + + self.sess.get.assert_has_calls( + [ + mock.call( + 'images/' + EXAMPLE['name'], + microversion=None, + params={}, + skip_cache=False, + ), + mock.call( + '/images', + headers={'Accept': 'application/json'}, + microversion=None, + params={'name': EXAMPLE['name']}, + ), + mock.call( + '/images', + headers={'Accept': 'application/json'}, + microversion=None, + params={'os_hidden': True}, + ), + ] + ) + + self.assertIsInstance(result, image.Image) + self.assertEqual(IDENTIFIER, result.id) diff --git a/openstack/tests/unit/image/v2/test_image_tasks.py b/openstack/tests/unit/image/v2/test_image_tasks.py new file mode 100644 index 0000000000..05e84ffefc --- /dev/null +++ b/openstack/tests/unit/image/v2/test_image_tasks.py @@ -0,0 +1,60 @@ +# Copyright 2024 RedHat Inc. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +import testtools + +from openstack.image.v2 import image_tasks + +EXAMPLE = { + 'id': '56ab5f98-2bb7-44c7-bc05-52bde37eb53b', + 'type': 'import', + 'status': 'failure', + 'owner': '2858d31bc5f54f4db66e53ab905ef566', + 'expires_at': '2024-10-10T09:28:58.000000', + 'created_at': '2024-10-08T09:28:58.000000', + 'updated_at': '2024-10-08T09:28:58.000000', + 'deleted_at': None, + 'deleted': False, + 'image_id': '56a39162-730d-401c-8a77-11bc078cf3e2', + 'request_id': 'req-7d2f073c-f6f8-4807-9fdb-5ce6b10c65c5', + 'user_id': 'dec9b6d341ec481abddf1027576c2d60', + 'input': {'image_id': '56a39162-730d-401c-8a77-11bc078cf3e2'}, + 'result': None, + 'message': "Input does not contain 'import_from' field", +} + + +class TestImageTasks(testtools.TestCase): + def test_basic(self): + sot = image_tasks.ImageTasks() + self.assertEqual('/images/%(image_id)s/tasks', sot.base_path) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = image_tasks.ImageTasks(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['type'], sot.type) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['owner'], sot.owner) + self.assertEqual(EXAMPLE['expires_at'], sot.expires_at) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE['deleted_at'], sot.deleted_at) + self.assertEqual(EXAMPLE['deleted'], sot.deleted) + self.assertEqual(EXAMPLE['image_id'], sot.image_id) + self.assertEqual(EXAMPLE['request_id'], sot.request_id) + self.assertEqual(EXAMPLE['user_id'], sot.user_id) + self.assertEqual(EXAMPLE['input'], sot.input) + self.assertEqual(EXAMPLE['result'], sot.result) + self.assertEqual(EXAMPLE['message'], sot.message) diff --git a/openstack/tests/unit/image/v2/test_member.py b/openstack/tests/unit/image/v2/test_member.py index 993639f52c..8168b12543 100644 --- a/openstack/tests/unit/image/v2/test_member.py +++ b/openstack/tests/unit/image/v2/test_member.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.image.v2 import member +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -24,17 +24,16 @@ } -class TestMember(testtools.TestCase): +class TestMember(base.TestCase): def test_basic(self): sot = member.Member() self.assertIsNone(sot.resource_key) self.assertEqual('members', sot.resources_key) self.assertEqual('/images/%(image_id)s/members', sot.base_path) - self.assertEqual('image', sot.service.service_type) self.assertEqual('member', sot._alternate_id()) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/image/v2/test_metadef_namespace.py b/openstack/tests/unit/image/v2/test_metadef_namespace.py new file mode 100644 index 0000000000..b8a829055a --- /dev/null +++ b/openstack/tests/unit/image/v2/test_metadef_namespace.py @@ -0,0 +1,152 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack import exceptions +from openstack.image.v2 import metadef_namespace +from openstack.tests.unit import base +from openstack.tests.unit.test_resource import FakeResponse + + +EXAMPLE = { + 'display_name': 'Cinder Volume Type', + 'created_at': '2022-08-24T17:46:24Z', + 'protected': True, + 'namespace': 'OS::Cinder::Volumetype', + 'description': ( + 'The Cinder volume type configuration option. Volume type ' + 'assignment provides a mechanism not only to provide scheduling to a ' + 'specific storage back-end, but also can be used to specify specific ' + 'information for a back-end storage device to act upon.' + ), + 'visibility': 'public', + 'owner': 'admin', + 'resource_type_associations': [ + { + 'name': 'OS::Glance::Image', + 'prefix': 'cinder_', + 'created_at': '2022-08-24T17:46:24Z', + }, + ], +} + + +class TestMetadefNamespace(base.TestCase): + def test_basic(self): + sot = metadef_namespace.MetadefNamespace() + self.assertIsNone(sot.resource_key) + self.assertEqual('namespaces', sot.resources_key) + self.assertEqual('/metadefs/namespaces', sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_delete) + + def test_make_it(self): + sot = metadef_namespace.MetadefNamespace(**EXAMPLE) + self.assertEqual(EXAMPLE['namespace'], sot.namespace) + self.assertEqual(EXAMPLE['visibility'], sot.visibility) + self.assertEqual(EXAMPLE['owner'], sot.owner) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['protected'], sot.is_protected) + self.assertEqual(EXAMPLE['display_name'], sot.display_name) + self.assertEqual( + EXAMPLE['resource_type_associations'], + sot.resource_type_associations, + ) + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'resource_types': 'resource_types', + 'sort_dir': 'sort_dir', + 'sort_key': 'sort_key', + 'visibility': 'visibility', + }, + sot._query_mapping._mapping, + ) + + @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) + def test_delete_all_properties(self): + sot = metadef_namespace.MetadefNamespace(**EXAMPLE) + session = mock.Mock(spec=adapter.Adapter) + sot._translate_response = mock.Mock() + sot.delete_all_properties(session) + session.delete.assert_called_with( + 'metadefs/namespaces/OS::Cinder::Volumetype/properties' + ) + + @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) + def test_delete_all_objects(self): + sot = metadef_namespace.MetadefNamespace(**EXAMPLE) + session = mock.Mock(spec=adapter.Adapter) + sot._translate_response = mock.Mock() + sot.delete_all_objects(session) + session.delete.assert_called_with( + 'metadefs/namespaces/OS::Cinder::Volumetype/objects' + ) + + +class TestMetadefNamespaceTags(base.TestCase): + # The tests in this class are very similar to those provided by + # TestTagMixin. The main differences are: + # - test_add_tag uses a ``PUT`` call instead of a ``POST`` call + # - test_set_tag uses a ``PUT`` call instead of a ``POST`` call + # - test_set_tag uses an optional ``X-OpenStack-Append`` header + def setUp(self): + super().setUp() + self.base_path = 'metadefs/namespaces' + self.response = FakeResponse({}) + + self.session = mock.Mock(spec=adapter.Adapter) + self.session.post = mock.Mock(return_value=self.response) + + def test_add_tag(self): + res = metadef_namespace.MetadefNamespace(**EXAMPLE) + sess = self.session + + # Set some initial value to check add + res.tags = ['blue', 'green'] + + result = res.add_tag(sess, 'lila') + # Check tags attribute is updated + self.assertEqual(['blue', 'green', 'lila'], res.tags) + # Check the passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags/lila' + sess.post.assert_called_once_with(url) + + def test_set_tags(self): + res = metadef_namespace.MetadefNamespace(**EXAMPLE) + sess = self.session + + # Set some initial value to check rewrite + res.tags = ['blue_old', 'green_old'] + + result = res.set_tags(sess, ['blue', 'green']) + # Check tags attribute is updated + self.assertEqual(['blue', 'green'], res.tags) + # Check the passed resource is returned + self.assertEqual(res, result) + url = self.base_path + '/' + res.id + '/tags' + headers = {'X-OpenStack-Append': 'False'} + jsonargs = { + 'tags': [ + {'name': 'blue'}, + {'name': 'green'}, + ] + } + sess.post.assert_called_once_with(url, headers=headers, json=jsonargs) diff --git a/openstack/tests/unit/image/v2/test_metadef_object.py b/openstack/tests/unit/image/v2/test_metadef_object.py new file mode 100644 index 0000000000..10d36c2c9a --- /dev/null +++ b/openstack/tests/unit/image/v2/test_metadef_object.py @@ -0,0 +1,77 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import metadef_object +from openstack.tests.unit import base + + +EXAMPLE = { + 'created_at': '2014-09-19T18:20:56Z', + 'description': 'The CPU limits with control parameters.', + 'name': 'CPU Limits', + 'properties': { + 'quota:cpu_period': { + 'description': 'The enforcement interval', + 'maximum': 1000000, + 'minimum': 1000, + 'title': 'Quota: CPU Period', + 'type': 'integer', + }, + 'quota:cpu_quota': { + 'description': 'The maximum allowed bandwidth', + 'title': 'Quota: CPU Quota', + 'type': 'integer', + }, + 'quota:cpu_shares': { + 'description': 'The proportional weighted', + 'title': 'Quota: CPU Shares', + 'type': 'integer', + }, + }, + 'required': [], + 'schema': '/v2/schemas/metadefs/object', + 'updated_at': '2014-09-19T18:20:56Z', +} + + +class TestMetadefObject(base.TestCase): + def test_basic(self): + sot = metadef_object.MetadefObject() + self.assertIsNone(sot.resource_key) + self.assertEqual('objects', sot.resources_key) + test_base_path = '/metadefs/namespaces/%(namespace_name)s/objects' + self.assertEqual(test_base_path, sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = metadef_object.MetadefObject(**EXAMPLE) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['properties'], sot.properties) + self.assertEqual(EXAMPLE['required'], sot.required) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + "visibility": "visibility", + "resource_types": "resource_types", + "sort_key": "sort_key", + "sort_dir": "sort_dir", + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/image/v2/test_metadef_property.py b/openstack/tests/unit/image/v2/test_metadef_property.py new file mode 100644 index 0000000000..cfd7768a81 --- /dev/null +++ b/openstack/tests/unit/image/v2/test_metadef_property.py @@ -0,0 +1,82 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import metadef_property +from openstack.tests.unit import base + +EXAMPLE = { + 'namespace_name': 'CIM::StorageAllocationSettingData', + 'name': 'Access', + 'type': 'string', + 'title': 'Access', + 'description': ( + 'Access describes whether the allocated storage extent is ' + '1 (readable), 2 (writeable), or 3 (both).' + ), + 'operators': [''], + 'default': None, + 'readonly': None, + 'minimum': None, + 'maximum': None, + 'enum': [ + 'Unknown', + 'Readable', + 'Writeable', + 'Read/Write Supported', + 'DMTF Reserved', + ], + 'pattern': None, + 'min_length': 0, + 'max_length': None, + 'items': None, + 'unique_items': False, + 'min_items': 0, + 'max_items': None, + 'additional_items': None, +} + + +class TestMetadefProperty(base.TestCase): + def test_basic(self): + sot = metadef_property.MetadefProperty() + self.assertEqual( + '/metadefs/namespaces/%(namespace_name)s/properties', sot.base_path + ) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = metadef_property.MetadefProperty(**EXAMPLE) + self.assertEqual(EXAMPLE['namespace_name'], sot.namespace_name) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['type'], sot.type) + self.assertEqual(EXAMPLE['title'], sot.title) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertListEqual(EXAMPLE['operators'], sot.operators) + self.assertEqual(EXAMPLE['default'], sot.default) + self.assertEqual(EXAMPLE['readonly'], sot.is_readonly) + self.assertEqual(EXAMPLE['minimum'], sot.minimum) + self.assertEqual(EXAMPLE['maximum'], sot.maximum) + self.assertListEqual(EXAMPLE['enum'], sot.enum) + self.assertEqual(EXAMPLE['pattern'], sot.pattern) + self.assertEqual(EXAMPLE['min_length'], sot.min_length) + self.assertEqual(EXAMPLE['max_length'], sot.max_length) + self.assertEqual(EXAMPLE['items'], sot.items) + self.assertEqual(EXAMPLE['unique_items'], sot.require_unique_items) + self.assertEqual(EXAMPLE['min_items'], sot.min_items) + self.assertEqual(EXAMPLE['max_items'], sot.max_items) + self.assertEqual( + EXAMPLE['additional_items'], sot.allow_additional_items + ) diff --git a/openstack/tests/unit/image/v2/test_metadef_resource_type.py b/openstack/tests/unit/image/v2/test_metadef_resource_type.py new file mode 100644 index 0000000000..50bf40b804 --- /dev/null +++ b/openstack/tests/unit/image/v2/test_metadef_resource_type.py @@ -0,0 +1,35 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import metadef_resource_type +from openstack.tests.unit import base + + +EXAMPLE = {"name": "OS::Nova::Aggregate", "created_at": "2022-07-09T04:10:37Z"} + + +class TestMetadefResourceType(base.TestCase): + def test_basic(self): + sot = metadef_resource_type.MetadefResourceType() + self.assertIsNone(sot.resource_key) + self.assertEqual('resource_types', sot.resources_key) + self.assertEqual('/metadefs/resource_types', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = metadef_resource_type.MetadefResourceType(**EXAMPLE) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) diff --git a/openstack/tests/unit/image/v2/test_metadef_resource_type_association.py b/openstack/tests/unit/image/v2/test_metadef_resource_type_association.py new file mode 100644 index 0000000000..e9d34923c4 --- /dev/null +++ b/openstack/tests/unit/image/v2/test_metadef_resource_type_association.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import metadef_resource_type +from openstack.tests.unit import base + + +EXAMPLE = { + "name": "OS::Cinder::Volume", + "prefix": "CIM_PASD_", + "properties_target": "image", + "created_at": "2022-07-09T04:10:38Z", +} + + +class TestMetadefResourceTypeAssociation(base.TestCase): + def test_basic(self): + sot = metadef_resource_type.MetadefResourceTypeAssociation() + self.assertIsNone(sot.resource_key) + self.assertEqual('resource_type_associations', sot.resources_key) + self.assertEqual( + '/metadefs/namespaces/%(namespace_name)s/resource_types', + sot.base_path, + ) + self.assertTrue(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = metadef_resource_type.MetadefResourceTypeAssociation(**EXAMPLE) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['prefix'], sot.prefix) + self.assertEqual(EXAMPLE['properties_target'], sot.properties_target) diff --git a/openstack/tests/unit/image/v2/test_metadef_schema.py b/openstack/tests/unit/image/v2/test_metadef_schema.py new file mode 100644 index 0000000000..7b56c96198 --- /dev/null +++ b/openstack/tests/unit/image/v2/test_metadef_schema.py @@ -0,0 +1,108 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import metadef_schema +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'name': 'namespace', + 'properties': { + 'namespace': { + 'type': 'string', + 'description': 'The unique namespace text.', + 'maxLength': 80, + }, + 'visibility': { + 'type': 'string', + 'description': 'Scope of namespace accessibility.', + 'enum': ['public', 'private'], + }, + 'created_at': { + 'type': 'string', + 'readOnly': True, + 'description': 'Date and time of namespace creation', + 'format': 'date-time', + }, + 'resource_type_associations': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'prefix': {'type': 'string'}, + 'properties_target': {'type': 'string'}, + }, + }, + }, + 'properties': {'$ref': '#/definitions/property'}, + 'objects': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': { + 'name': {'type': 'string'}, + 'description': {'type': 'string'}, + 'required': {'$ref': '#/definitions/stringArray'}, + 'properties': {'$ref': '#/definitions/property'}, + }, + }, + }, + 'tags': { + 'type': 'array', + 'items': { + 'type': 'object', + 'properties': {'name': {'type': 'string'}}, + }, + }, + }, + 'additionalProperties': False, + 'definitions': { + 'positiveInteger': {'type': 'integer', 'minimum': 0}, + 'positiveIntegerDefault0': { + 'allOf': [ + {'$ref': '#/definitions/positiveInteger'}, + {'default': 0}, + ] + }, + 'stringArray': { + 'type': 'array', + 'items': {'type': 'string'}, + 'uniqueItems': True, + }, + }, + 'required': ['namespace'], +} + + +class TestMetadefSchema(base.TestCase): + def test_basic(self): + sot = metadef_schema.MetadefSchema() + self.assertIsNone(sot.resource_key) + self.assertIsNone(sot.resources_key) + self.assertEqual('/schemas/metadefs', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) + + def test_make_it(self): + sot = metadef_schema.MetadefSchema(**EXAMPLE) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['properties'], sot.properties) + self.assertEqual( + EXAMPLE['additionalProperties'], sot.additional_properties + ) + self.assertEqual(EXAMPLE['definitions'], sot.definitions) + self.assertEqual(EXAMPLE['required'], sot.required) diff --git a/openstack/tests/unit/image/v2/test_proxy.py b/openstack/tests/unit/image/v2/test_proxy.py index b8a1ee9d0d..7646cfcaaf 100644 --- a/openstack/tests/unit/image/v2/test_proxy.py +++ b/openstack/tests/unit/image/v2/test_proxy.py @@ -10,133 +10,1235 @@ # License for the specific language governing permissions and limitations # under the License. -import mock +import io +import os.path +import tempfile +from unittest import mock + +import requests from openstack import exceptions from openstack.image.v2 import _proxy -from openstack.image.v2 import image -from openstack.image.v2 import member -from openstack.tests.unit import test_proxy_base2 +from openstack.image.v2 import cache as _cache +from openstack.image.v2 import image as _image +from openstack.image.v2 import image_tasks as _image_tasks +from openstack.image.v2 import member as _member +from openstack.image.v2 import metadef_namespace as _metadef_namespace +from openstack.image.v2 import metadef_object as _metadef_object +from openstack.image.v2 import metadef_resource_type as _metadef_resource_type +from openstack.image.v2 import metadef_schema as _metadef_schema +from openstack.image.v2 import schema as _schema +from openstack.image.v2 import service_info as _service_info +from openstack.image.v2 import task as _task +from openstack import proxy as proxy_base +from openstack.tests.unit.image.v2 import test_image as fake_image +from openstack.tests.unit import test_proxy_base + +EXAMPLE = fake_image.EXAMPLE + + +class FakeResponse: + def __init__(self, response, status_code=200, headers=None): + self.body = response + self.status_code = status_code + headers = headers if headers else {'content-type': 'application/json'} + self.headers = requests.structures.CaseInsensitiveDict(headers) + + def json(self): + return self.body -class TestImageProxy(test_proxy_base2.TestProxyBase): +class TestImageProxy(test_proxy_base.TestProxyBase): def setUp(self): - super(TestImageProxy, self).setUp() + super().setUp() self.proxy = _proxy.Proxy(self.session) + self.proxy._connection = self.cloud + + +class TestImage(TestImageProxy): + def test_image_import_no_required_attrs(self): + # container_format and disk_format are required attrs of the image + existing_image = _image.Image(id="id") + self.assertRaises( + exceptions.InvalidRequest, + self.proxy.import_image, + existing_image, + ) + + def test_image_import(self): + original_image = _image.Image(**EXAMPLE) + self._verify( + "openstack.image.v2.image.Image.import_image", + self.proxy.import_image, + method_args=[original_image, "method"], + method_kwargs={ + "uri": "uri", + }, + expected_args=[self.proxy], + expected_kwargs={ + "method": "method", + "store": None, + "uri": "uri", + "remote_region": None, + "remote_image_id": None, + "remote_service_interface": None, + "stores": [], + "all_stores": None, + "all_stores_must_succeed": None, + }, + ) + + def test_image_create_conflicting_options(self): + exc = self.assertRaises( + exceptions.SDKException, + self.proxy.create_image, + name='fake', + filename='fake', + data='fake', + container='bare', + disk_format='raw', + ) + self.assertIn('filename and data are mutually exclusive', str(exc)) + + def test_image_create(self): + self.verify_create( + self.proxy.create_image, + _image.Image, + method_kwargs={ + 'name': 'fake', + 'disk_format': 'fake_dformat', + 'container_format': 'fake_cformat', + 'allow_duplicates': True, + 'is_protected': True, + }, + expected_kwargs={ + 'name': 'fake', + 'disk_format': 'fake_dformat', + 'container_format': 'fake_cformat', + 'is_protected': True, + 'owner_specified.openstack.md5': '', + 'owner_specified.openstack.object': 'images/fake', + 'owner_specified.openstack.sha256': '', + }, + ) + + def test_image_create_file_as_name(self): + # if we pass a filename as an image name, we should upload the file + # itself (and use the upload flow) + with tempfile.NamedTemporaryFile() as tmpfile: + name = os.path.basename(tmpfile.name) + self._verify( + 'openstack.image.v2._proxy.Proxy._upload_image', + self.proxy.create_image, + method_kwargs={ + 'name': tmpfile.name, + 'allow_duplicates': True, + }, + expected_args=[ + name, + ], + expected_kwargs={ + 'filename': tmpfile.name, + 'data': None, + 'meta': {}, + 'wait': False, + 'timeout': 3600, + 'validate_checksum': False, + 'use_import': False, + 'stores': None, + 'import_method': None, + 'uri': None, + 'remote_region': None, + 'remote_image_id': None, + 'remote_service_interface': None, + 'all_stores': None, + 'all_stores_must_succeed': None, + 'disk_format': 'qcow2', + 'container_format': 'bare', + 'properties': { + 'owner_specified.openstack.md5': '', + 'owner_specified.openstack.object': f'images/{name}', + 'owner_specified.openstack.sha256': '', + }, + }, + ) + + # but not if we use a directory... + with tempfile.TemporaryDirectory() as tmpdir: + self.verify_create( + self.proxy.create_image, + _image.Image, + method_kwargs={ + 'name': tmpdir, + 'allow_duplicates': True, + }, + expected_kwargs={ + 'container_format': 'bare', + 'disk_format': 'qcow2', + 'name': tmpdir, + 'owner_specified.openstack.md5': '', + 'owner_specified.openstack.object': f'images/{tmpdir}', + 'owner_specified.openstack.sha256': '', + }, + ) + + def test_image_create_checksum_match(self): + fake_image = _image.Image( + id="fake", + properties={ + self.proxy._IMAGE_MD5_KEY: 'fake_md5', + self.proxy._IMAGE_SHA256_KEY: 'fake_sha256', + }, + ) + self.proxy.find_image = mock.Mock(return_value=fake_image) + + self.proxy._upload_image = mock.Mock() + + res = self.proxy.create_image( + name='fake', md5='fake_md5', sha256='fake_sha256' + ) + self.assertEqual(fake_image, res) + self.proxy._upload_image.assert_not_called() + + def test_image_create_checksum_mismatch(self): + fake_image = _image.Image( + id="fake", + properties={ + self.proxy._IMAGE_MD5_KEY: 'fake_md5', + self.proxy._IMAGE_SHA256_KEY: 'fake_sha256', + }, + ) + self.proxy.find_image = mock.Mock(return_value=fake_image) + + self.proxy._upload_image = mock.Mock() + + self.proxy.create_image( + name='fake', data=b'fake', md5='fake2_md5', sha256='fake2_sha256' + ) + self.proxy._upload_image.assert_called() + + def test_image_create_allow_duplicates_find_not_called(self): + self.proxy.find_image = mock.Mock() + + self.proxy._upload_image = mock.Mock() + + self.proxy.create_image( + name='fake', + data=b'fake', + allow_duplicates=True, + ) + + self.proxy.find_image.assert_not_called() + + def test_image_create_validate_checksum_data_binary(self): + """Pass real data as binary""" + self.proxy.find_image = mock.Mock() + + self.proxy._upload_image = mock.Mock() + + self.proxy.create_image( + name='fake', + data=b'fake', + validate_checksum=True, + container='bare', + disk_format='raw', + ) + + self.proxy.find_image.assert_called_with('fake') + + self.proxy._upload_image.assert_called_with( + 'fake', + container_format='bare', + disk_format='raw', + filename=None, + data=b'fake', + meta={}, + properties={ + self.proxy._IMAGE_MD5_KEY: '144c9defac04969c7bfad8efaa8ea194', + self.proxy._IMAGE_SHA256_KEY: 'b5d54c39e66671c9731b9f471e585d8262cd4f54963f0c93082d8dcf334d4c78', # noqa: E501 + self.proxy._IMAGE_OBJECT_KEY: 'bare/fake', + }, + timeout=3600, + validate_checksum=True, + use_import=False, + import_method=None, + uri=None, + remote_region=None, + remote_image_id=None, + remote_service_interface=None, + stores=None, + all_stores=None, + all_stores_must_succeed=None, + wait=False, + ) + + def test_image_create_validate_checksum_data_not_binary(self): + self.assertRaises( + exceptions.SDKException, + self.proxy.create_image, + name='fake', + data=io.StringIO(), + validate_checksum=True, + container='bare', + disk_format='raw', + ) + + def test_image_create_data_binary(self): + """Pass binary file-like object""" + self.proxy.find_image = mock.Mock() + + self.proxy._upload_image = mock.Mock() + + data = io.BytesIO(b'\0\0') + + self.proxy.create_image( + name='fake', + data=data, + validate_checksum=False, + container='bare', + disk_format='raw', + ) + + self.proxy._upload_image.assert_called_with( + 'fake', + container_format='bare', + disk_format='raw', + filename=None, + data=data, + meta={}, + properties={ + self.proxy._IMAGE_MD5_KEY: '', + self.proxy._IMAGE_SHA256_KEY: '', + self.proxy._IMAGE_OBJECT_KEY: 'bare/fake', + }, + timeout=3600, + validate_checksum=False, + use_import=False, + import_method=None, + uri=None, + remote_region=None, + remote_image_id=None, + remote_service_interface=None, + stores=None, + all_stores=None, + all_stores_must_succeed=None, + wait=False, + ) + + def test_image_create_protected(self): + self.proxy.find_image = mock.Mock() + + created_image = mock.Mock(spec=_image.Image(id="id")) + self.proxy._create = mock.Mock() + self.proxy._create.return_value = created_image + self.proxy._create.return_value.image_import_methods = [] + + created_image.upload = mock.Mock() + created_image.upload.return_value = FakeResponse( + response="", status_code=200 + ) - def test_image_create_no_args(self): + properties = {"is_protected": True} + + self.proxy.create_image( + name="fake", + data="data", + container_format="bare", + disk_format="raw", + **properties, + ) + + _, kwargs = self.proxy._create.call_args + self.assertEqual(kwargs["is_protected"], True) + + def test_image_create_with_stores(self): + self.proxy.find_image = mock.Mock() + self.proxy._upload_image = mock.Mock() + + self.proxy.create_image( + name='fake', + data=b'fake', + container='bare', + disk_format='raw', + use_import=True, + stores=['cinder', 'swift'], + ) + + self.proxy.find_image.assert_called_with('fake') + + self.proxy._upload_image.assert_called_with( + 'fake', + container_format='bare', + disk_format='raw', + filename=None, + data=b'fake', + meta={}, + properties={ + self.proxy._IMAGE_MD5_KEY: '', + self.proxy._IMAGE_SHA256_KEY: '', + self.proxy._IMAGE_OBJECT_KEY: 'bare/fake', + }, + timeout=3600, + validate_checksum=False, + use_import=True, + import_method=None, + uri=None, + remote_region=None, + remote_image_id=None, + remote_service_interface=None, + stores=['cinder', 'swift'], + all_stores=None, + all_stores_must_succeed=None, + wait=False, + ) + + def test_image_create_with_all_stores(self): + self.proxy.find_image = mock.Mock() + self.proxy._upload_image = mock.Mock() + + self.proxy.create_image( + name='fake', + data=b'fake', + container='bare', + disk_format='raw', + use_import=True, + all_stores=True, + all_stores_must_succeed=True, + ) + + self.proxy.find_image.assert_called_with('fake') + + self.proxy._upload_image.assert_called_with( + 'fake', + container_format='bare', + disk_format='raw', + filename=None, + data=b'fake', + meta={}, + properties={ + self.proxy._IMAGE_MD5_KEY: '', + self.proxy._IMAGE_SHA256_KEY: '', + self.proxy._IMAGE_OBJECT_KEY: 'bare/fake', + }, + timeout=3600, + validate_checksum=False, + use_import=True, + import_method=None, + uri=None, + remote_region=None, + remote_image_id=None, + remote_service_interface=None, + stores=None, + all_stores=True, + all_stores_must_succeed=True, + wait=False, + ) + + def test_image_upload_no_args(self): # container_format and disk_format are required args self.assertRaises(exceptions.InvalidRequest, self.proxy.upload_image) - def test_image_create(self): + def test_image_upload(self): # NOTE: This doesn't use any of the base class verify methods # because it ends up making two separate calls to complete the # operation. - created_image = mock.Mock(spec=image.Image(id="id")) + created_image = mock.Mock(spec=_image.Image(id="id")) + + self.proxy._create = mock.Mock() + self.proxy._create.return_value = created_image + + rv = self.proxy.upload_image( + data="data", container_format="x", disk_format="y", name="z" + ) + + self.proxy._create.assert_called_with( + _image.Image, + container_format="x", + disk_format="y", + name="z", + ) + created_image.upload.assert_called_with(self.proxy) + self.assertEqual(rv, created_image) + + def test_image_upload_positional_args(self): + """Test upload_image with positional arguments only""" + created_image = mock.Mock(spec=_image.Image(id="id")) self.proxy._create = mock.Mock() self.proxy._create.return_value = created_image - rv = self.proxy.upload_image(data="data", container_format="x", - disk_format="y", name="z") + # Call with positional args for container_format, disk_format, data + rv = self.proxy.upload_image("bare", "qcow2", "imagedata") - self.proxy._create.assert_called_with(image.Image, - container_format="x", - disk_format="y", - name="z") - created_image.upload.assert_called_with(self.session) + self.proxy._create.assert_called_with( + _image.Image, + container_format="bare", + disk_format="qcow2", + ) + created_image.upload.assert_called_with(self.proxy) self.assertEqual(rv, created_image) + self.assertEqual(created_image.data, "imagedata") + + def test_image_upload_keyword_args(self): + """Test upload_image with keyword arguments only""" + created_image = mock.Mock(spec=_image.Image(id="id")) + + self.proxy._create = mock.Mock() + self.proxy._create.return_value = created_image + + rv = self.proxy.upload_image( + container_format="bare", + disk_format="qcow2", + data="imagedata", + name="test-image", + visibility="public", + ) + + self.proxy._create.assert_called_with( + _image.Image, + container_format="bare", + disk_format="qcow2", + name="test-image", + visibility="public", + ) + created_image.upload.assert_called_with(self.proxy) + self.assertEqual(rv, created_image) + self.assertEqual(created_image.data, "imagedata") + + def test_image_upload_mixed_args(self): + """Test upload_image with both positional and keyword arguments""" + created_image = mock.Mock(spec=_image.Image(id="id")) + + self.proxy._create = mock.Mock() + self.proxy._create.return_value = created_image + + # Positional: container_format, disk_format + # Keyword: data, name, tags + rv = self.proxy.upload_image( + "bare", + "qcow2", + data="imagedata", + name="test-image", + tags=["tag1", "tag2"], + ) + + self.proxy._create.assert_called_with( + _image.Image, + container_format="bare", + disk_format="qcow2", + name="test-image", + tags=["tag1", "tag2"], + ) + created_image.upload.assert_called_with(self.proxy) + self.assertEqual(rv, created_image) + self.assertEqual(created_image.data, "imagedata") + + def test_image_download(self): + original_image = _image.Image(**EXAMPLE) + self._verify( + 'openstack.image.v2.image.Image.download', + self.proxy.download_image, + method_args=[original_image], + method_kwargs={ + 'output': 'some_output', + 'chunk_size': 1, + 'stream': True, + }, + expected_args=[self.proxy], + expected_kwargs={ + 'output': 'some_output', + 'chunk_size': 1, + 'stream': True, + }, + ) + + @mock.patch("openstack.image.v2.image.Image.fetch") + def test_image_stage(self, mock_fetch): + image = _image.Image(id="id", status="queued") + image.stage = mock.Mock() + + self.proxy.stage_image(image) + mock_fetch.assert_called() + image.stage.assert_called_with(self.proxy) + + @mock.patch("openstack.image.v2.image.Image.fetch") + def test_image_stage_with_data(self, mock_fetch): + image = _image.Image(id="id", status="queued") + image.stage = mock.Mock() + mock_fetch.return_value = image + + rv = self.proxy.stage_image(image, data="data") + + image.stage.assert_called_with(self.proxy) + mock_fetch.assert_called() + self.assertEqual(rv.data, "data") + + def test_image_stage_conflicting_options(self): + image = _image.Image(id="id", status="queued") + image.stage = mock.Mock() + + exc = self.assertRaises( + exceptions.SDKException, + self.proxy.stage_image, + image, + filename='foo', + data='data', + ) + self.assertIn( + 'filename and data are mutually exclusive', + str(exc), + ) + image.stage.assert_not_called() + + def test_image_stage_wrong_status(self): + image = _image.Image(id="id", status="active") + image.stage = mock.Mock() + + exc = self.assertRaises( + exceptions.SDKException, + self.proxy.stage_image, + image, + data="data", + ) + self.assertIn( + 'Image stage is only possible for images in the queued state.', + str(exc), + ) + image.stage.assert_not_called() def test_image_delete(self): - self.verify_delete(self.proxy.delete_image, image.Image, False) + self.verify_delete(self.proxy.delete_image, _image.Image, False) + + def test_image_delete__ignore(self): + self.verify_delete(self.proxy.delete_image, _image.Image, True) - def test_image_delete_ignore(self): - self.verify_delete(self.proxy.delete_image, image.Image, True) + def test_delete_image__from_store(self): + store = _service_info.Store(id='fast', is_default=True) + store.delete_image = mock.Mock() + image = _image.Image(id="id", status="queued") - def test_image_update(self): - self.verify_update(self.proxy.update_image, image.Image) + self.proxy.delete_image(image, store=store) + + store.delete_image.assert_called_with( + self.proxy, + image, + ignore_missing=True, + ) + + @mock.patch("openstack.resource.Resource._translate_response") + @mock.patch("openstack.proxy.Proxy._get") + @mock.patch("openstack.image.v2.image.Image.commit") + def test_image_update( + self, mock_commit_image, mock_get_image, mock_transpose + ): + original_image = _image.Image(**EXAMPLE) + mock_get_image.return_value = original_image + EXAMPLE['name'] = 'fake_name' + updated_image = _image.Image(**EXAMPLE) + mock_commit_image.return_value = updated_image.to_dict() + result = self.proxy.update_image( + original_image, **updated_image.to_dict() + ) + self.assertEqual('fake_name', result.get('name')) def test_image_get(self): - self.verify_get(self.proxy.get_image, image.Image) + self.verify_get(self.proxy.get_image, _image.Image) def test_images(self): - self.verify_list(self.proxy.images, image.Image, paginated=True) + self.verify_list(self.proxy.images, _image.Image) def test_add_tag(self): - self._verify("openstack.image.v2.image.Image.add_tag", - self.proxy.add_tag, - method_args=["image", "tag"], - expected_args=["tag"]) + self._verify( + "openstack.image.v2.image.Image.add_tag", + self.proxy.add_tag, + method_args=["image", "tag"], + expected_args=[self.proxy, "tag"], + ) def test_remove_tag(self): - self._verify("openstack.image.v2.image.Image.remove_tag", - self.proxy.remove_tag, - method_args=["image", "tag"], - expected_args=["tag"]) + self._verify( + "openstack.image.v2.image.Image.remove_tag", + self.proxy.remove_tag, + method_args=["image", "tag"], + expected_args=[self.proxy, "tag"], + ) def test_deactivate_image(self): - self._verify("openstack.image.v2.image.Image.deactivate", - self.proxy.deactivate_image, - method_args=["image"]) + self._verify( + "openstack.image.v2.image.Image.deactivate", + self.proxy.deactivate_image, + method_args=["image"], + expected_args=[self.proxy], + ) def test_reactivate_image(self): - self._verify("openstack.image.v2.image.Image.reactivate", - self.proxy.reactivate_image, - method_args=["image"]) + self._verify( + "openstack.image.v2.image.Image.reactivate", + self.proxy.reactivate_image, + method_args=["image"], + expected_args=[self.proxy], + ) + + def test_image_tasks(self): + self.verify_list( + self.proxy.image_tasks, + _image_tasks.ImageTasks, + method_kwargs={'image': 'image_1'}, + expected_kwargs={'image_id': 'image_1'}, + ) + +class TestMember(TestImageProxy): def test_member_create(self): - self.verify_create(self.proxy.add_member, member.Member, - method_kwargs={"image": "test_id"}, - expected_kwargs={"image_id": "test_id"}) + self.verify_create( + self.proxy.add_member, + _member.Member, + method_kwargs={"image": "test_id"}, + expected_kwargs={"image_id": "test_id"}, + ) def test_member_delete(self): - self._verify2("openstack.proxy2.BaseProxy._delete", - self.proxy.remove_member, - method_args=["member_id"], - method_kwargs={"image": "image_id", - "ignore_missing": False}, - expected_args=[member.Member], - expected_kwargs={"member_id": "member_id", - "image_id": "image_id", - "ignore_missing": False}) + self._verify( + "openstack.proxy.Proxy._delete", + self.proxy.remove_member, + method_args=["member_id"], + method_kwargs={"image": "image_id", "ignore_missing": False}, + expected_args=[_member.Member, None], + expected_kwargs={ + "member_id": "member_id", + "image_id": "image_id", + "ignore_missing": False, + }, + ) def test_member_delete_ignore(self): - self._verify2("openstack.proxy2.BaseProxy._delete", - self.proxy.remove_member, - method_args=["member_id"], - method_kwargs={"image": "image_id"}, - expected_args=[member.Member], - expected_kwargs={"member_id": "member_id", - "image_id": "image_id", - "ignore_missing": True}) + self._verify( + "openstack.proxy.Proxy._delete", + self.proxy.remove_member, + method_args=["member_id"], + method_kwargs={"image": "image_id"}, + expected_args=[_member.Member, None], + expected_kwargs={ + "member_id": "member_id", + "image_id": "image_id", + "ignore_missing": True, + }, + ) def test_member_update(self): - self._verify2("openstack.proxy2.BaseProxy._update", - self.proxy.update_member, - method_args=['member_id', 'image_id'], - expected_args=[member.Member], - expected_kwargs={'member_id': 'member_id', - 'image_id': 'image_id'}) + self._verify( + "openstack.proxy.Proxy._update", + self.proxy.update_member, + method_args=['member_id', 'image_id'], + expected_args=[_member.Member, None], + expected_kwargs={'member_id': 'member_id', 'image_id': 'image_id'}, + ) def test_member_get(self): - self._verify2("openstack.proxy2.BaseProxy._get", - self.proxy.get_member, - method_args=['member_id'], - method_kwargs={"image": "image_id"}, - expected_args=[member.Member], - expected_kwargs={'member_id': 'member_id', - 'image_id': 'image_id'}) + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_member, + method_args=['member_id'], + method_kwargs={"image": "image_id"}, + expected_args=[_member.Member], + expected_kwargs={'member_id': 'member_id', 'image_id': 'image_id'}, + ) def test_member_find(self): - self._verify2("openstack.proxy2.BaseProxy._find", - self.proxy.find_member, - method_args=['member_id'], - method_kwargs={"image": "image_id"}, - expected_args=[member.Member, "member_id"], - expected_kwargs={'ignore_missing': True, - 'image_id': 'image_id'}) + self._verify( + "openstack.proxy.Proxy._find", + self.proxy.find_member, + method_args=['member_id'], + method_kwargs={"image": "image_id"}, + expected_args=[_member.Member, "member_id"], + expected_kwargs={'ignore_missing': True, 'image_id': 'image_id'}, + ) def test_members(self): - self.verify_list(self.proxy.members, member.Member, paginated=False, - method_args=('image_1',), - expected_kwargs={'image_id': 'image_1'}) + self.verify_list( + self.proxy.members, + _member.Member, + method_kwargs={'image': 'image_1'}, + expected_kwargs={'image_id': 'image_1'}, + ) + + +class TestMetadefNamespace(TestImageProxy): + def test_metadef_namespace_create(self): + self.verify_create( + self.proxy.create_metadef_namespace, + _metadef_namespace.MetadefNamespace, + ) + + def test_metadef_namespace_delete(self): + self.verify_delete( + self.proxy.delete_metadef_namespace, + _metadef_namespace.MetadefNamespace, + False, + ) + + def test_metadef_namespace_delete__ignore(self): + self.verify_delete( + self.proxy.delete_metadef_namespace, + _metadef_namespace.MetadefNamespace, + True, + ) + + def test_metadef_namespace_get(self): + self.verify_get( + self.proxy.get_metadef_namespace, + _metadef_namespace.MetadefNamespace, + ) + + def test_metadef_namespaces(self): + self.verify_list( + self.proxy.metadef_namespaces, + _metadef_namespace.MetadefNamespace, + ) + + def test_metadef_namespace_update(self): + # we're (intentionally) adding an additional field, 'namespace', to the + # request body + self.verify_update( + self.proxy.update_metadef_namespace, + _metadef_namespace.MetadefNamespace, + method_kwargs={'is_protected': True}, + expected_kwargs={'namespace': 'resource_id', 'is_protected': True}, + ) + + +class TestMetadefObject(TestImageProxy): + def test_create_metadef_object(self): + self.verify_create( + self.proxy.create_metadef_object, + _metadef_object.MetadefObject, + method_kwargs={"namespace": "test_namespace_name"}, + expected_kwargs={"namespace_name": "test_namespace_name"}, + ) + + def test_get_metadef_object(self): + self.verify_get( + self.proxy.get_metadef_object, + _metadef_object.MetadefObject, + method_kwargs={"namespace": "test_namespace_name"}, + expected_kwargs={ + "namespace_name": "test_namespace_name", + 'name': 'resource_id', + }, + expected_args=[], + ) + + def test_metadef_objects(self): + self.verify_list( + self.proxy.metadef_objects, + _metadef_object.MetadefObject, + method_kwargs={"namespace": "test_namespace_name"}, + expected_kwargs={"namespace_name": "test_namespace_name"}, + ) + + def test_update_metadef_object(self): + self._verify( + "openstack.proxy.Proxy._update", + self.proxy.update_metadef_object, + method_args=["test_metadef_object", "test_namespace_name"], + method_kwargs={"name": "new_object"}, + expected_args=[ + _metadef_object.MetadefObject, + 'test_metadef_object', + ], + expected_kwargs={ + "name": "new_object", + "namespace_name": "test_namespace_name", + }, + ) + + def test_delete_metadef_object(self): + self.verify_delete( + self.proxy.delete_metadef_object, + _metadef_object.MetadefObject, + False, + method_kwargs={"namespace": "test_namespace_name"}, + expected_kwargs={"namespace_name": "test_namespace_name"}, + ) + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_delete_all_metadef_objects(self, mock_get_resource): + fake_object = _metadef_namespace.MetadefNamespace() + mock_get_resource.return_value = fake_object + self._verify( + "openstack.image.v2.metadef_namespace.MetadefNamespace.delete_all_objects", + self.proxy.delete_all_metadef_objects, + method_args=['test_namespace'], + expected_args=[self.proxy], + ) + mock_get_resource.assert_called_once_with( + _metadef_namespace.MetadefNamespace, 'test_namespace' + ) + + +class TestMetadefResourceType(TestImageProxy): + def test_metadef_resource_types(self): + self.verify_list( + self.proxy.metadef_resource_types, + _metadef_resource_type.MetadefResourceType, + ) + + +class TestMetadefResourceTypeAssociation(TestImageProxy): + def test_create_metadef_resource_type_association(self): + self.verify_create( + self.proxy.create_metadef_resource_type_association, + _metadef_resource_type.MetadefResourceTypeAssociation, + method_kwargs={'metadef_namespace': 'namespace_name'}, + expected_kwargs={'namespace_name': 'namespace_name'}, + ) + + def test_delete_metadef_resource_type_association(self): + self.verify_delete( + self.proxy.delete_metadef_resource_type_association, + _metadef_resource_type.MetadefResourceTypeAssociation, + False, + method_kwargs={'metadef_namespace': 'namespace_name'}, + expected_kwargs={'namespace_name': 'namespace_name'}, + ) + + def test_delete_metadef_resource_type_association_ignore(self): + self.verify_delete( + self.proxy.delete_metadef_resource_type_association, + _metadef_resource_type.MetadefResourceTypeAssociation, + True, + method_kwargs={'metadef_namespace': 'namespace_name'}, + expected_kwargs={'namespace_name': 'namespace_name'}, + ) + + def test_metadef_resource_type_associations(self): + self.verify_list( + self.proxy.metadef_resource_type_associations, + _metadef_resource_type.MetadefResourceTypeAssociation, + method_kwargs={'metadef_namespace': 'namespace_name'}, + expected_kwargs={'namespace_name': 'namespace_name'}, + ) + + +class TestSchema(TestImageProxy): + def test_images_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_images_schema, + expected_args=[_schema.Schema], + expected_kwargs={ + 'base_path': '/schemas/images', + 'requires_id': False, + }, + ) + + def test_image_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_image_schema, + expected_args=[_schema.Schema], + expected_kwargs={ + 'base_path': '/schemas/image', + 'requires_id': False, + }, + ) + + def test_members_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_members_schema, + expected_args=[_schema.Schema], + expected_kwargs={ + 'base_path': '/schemas/members', + 'requires_id': False, + }, + ) + + def test_member_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_member_schema, + expected_args=[_schema.Schema], + expected_kwargs={ + 'base_path': '/schemas/member', + 'requires_id': False, + }, + ) + + +class TestTask(TestImageProxy): + def test_task_get(self): + self.verify_get(self.proxy.get_task, _task.Task) + + def test_tasks(self): + self.verify_list(self.proxy.tasks, _task.Task) + + def test_task_create(self): + self.verify_create(self.proxy.create_task, _task.Task) + + def test_wait_for_task_immediate_status(self): + status = 'success' + res = _task.Task(id='1234', status=status) + + result = self.proxy.wait_for_task(res, status, "failure", 0.01, 0.1) + + self.assertEqual(res, result) + + def test_wait_for_task_immediate_status_case(self): + status = "SUCcess" + res = _task.Task(id='1234', status=status) + + result = self.proxy.wait_for_task(res, status, "failure", 0.01, 0.1) + + self.assertEqual(res, result) + + def test_wait_for_task_error_396(self): + # Ensure we create a new task when we get 396 error + res = _task.Task( + id='id', + status='waiting', + type='some_type', + input='some_input', + result='some_result', + ) + + mock_fetch = mock.Mock() + mock_fetch.side_effect = [ + _task.Task( + id='id', + status='failure', + type='some_type', + input='some_input', + result='some_result', + message=_proxy._IMAGE_ERROR_396, + ), + _task.Task(id='fake', status='waiting'), + _task.Task(id='fake', status='success'), + ] + + self.proxy._create = mock.Mock() + self.proxy._create.side_effect = [ + _task.Task(id='fake', status='success') + ] + + with mock.patch.object(_task.Task, 'fetch', mock_fetch): + result = self.proxy.wait_for_task(res, interval=0.01, wait=0.5) + + self.assertEqual('success', result.status) + + self.proxy._create.assert_called_with( + mock.ANY, input=res.input, type=res.type + ) + + def test_wait_for_task_wait(self): + res = _task.Task(id='id', status='waiting') + + mock_fetch = mock.Mock() + mock_fetch.side_effect = [ + _task.Task(id='id', status='waiting'), + _task.Task(id='id', status='waiting'), + _task.Task(id='id', status='success'), + ] + + with mock.patch.object(_task.Task, 'fetch', mock_fetch): + result = self.proxy.wait_for_task(res, interval=0.01, wait=0.5) + + self.assertEqual('success', result.status) + + def test_tasks_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_tasks_schema, + expected_args=[_schema.Schema], + expected_kwargs={ + 'base_path': '/schemas/tasks', + 'requires_id': False, + }, + ) + + def test_task_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_task_schema, + expected_args=[_schema.Schema], + expected_kwargs={ + 'base_path': '/schemas/task', + 'requires_id': False, + }, + ) + + +class TestMisc(TestImageProxy): + def test_stores(self): + self.verify_list(self.proxy.stores, _service_info.Store) + + def test_import_info(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_import_info, + method_args=[], + method_kwargs={}, + expected_args=[_service_info.Import], + expected_kwargs={'requires_id': False}, + ) + + +class TestMetadefSchema(TestImageProxy): + def test_metadef_namespace_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_metadef_namespace_schema, + expected_args=[_metadef_schema.MetadefSchema], + expected_kwargs={ + 'base_path': '/schemas/metadefs/namespace', + 'requires_id': False, + }, + ) + + def test_metadef_namespaces_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_metadef_namespaces_schema, + expected_args=[_metadef_schema.MetadefSchema], + expected_kwargs={ + 'base_path': '/schemas/metadefs/namespaces', + 'requires_id': False, + }, + ) + + def test_metadef_resource_type_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_metadef_resource_type_schema, + expected_args=[_metadef_schema.MetadefSchema], + expected_kwargs={ + 'base_path': '/schemas/metadefs/resource_type', + 'requires_id': False, + }, + ) + + def test_metadef_resource_types_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_metadef_resource_types_schema, + expected_args=[_metadef_schema.MetadefSchema], + expected_kwargs={ + 'base_path': '/schemas/metadefs/resource_types', + 'requires_id': False, + }, + ) + + def test_metadef_object_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_metadef_object_schema, + expected_args=[_metadef_schema.MetadefSchema], + expected_kwargs={ + 'base_path': '/schemas/metadefs/object', + 'requires_id': False, + }, + ) + + def test_metadef_objects_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_metadef_objects_schema, + expected_args=[_metadef_schema.MetadefSchema], + expected_kwargs={ + 'base_path': '/schemas/metadefs/objects', + 'requires_id': False, + }, + ) + + def test_metadef_property_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_metadef_property_schema, + expected_args=[_metadef_schema.MetadefSchema], + expected_kwargs={ + 'base_path': '/schemas/metadefs/property', + 'requires_id': False, + }, + ) + + def test_metadef_properties_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_metadef_properties_schema, + expected_args=[_metadef_schema.MetadefSchema], + expected_kwargs={ + 'base_path': '/schemas/metadefs/properties', + 'requires_id': False, + }, + ) + + def test_metadef_tag_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_metadef_tag_schema, + expected_args=[_metadef_schema.MetadefSchema], + expected_kwargs={ + 'base_path': '/schemas/metadefs/tag', + 'requires_id': False, + }, + ) + + def test_metadef_tags_schema_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_metadef_tags_schema, + expected_args=[_metadef_schema.MetadefSchema], + expected_kwargs={ + 'base_path': '/schemas/metadefs/tags', + 'requires_id': False, + }, + ) + + +class TestCache(TestImageProxy): + def test_image_cache_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_image_cache, + expected_args=[_cache.Cache], + expected_kwargs={'requires_id': False}, + ) + + def test_cache_image_delete(self): + self.verify_delete( + self.proxy.cache_delete_image, + _cache.Cache, + ) + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_image_queue(self, mock_get_resource): + fake_cache = _cache.Cache() + mock_get_resource.return_value = fake_cache + self._verify( + "openstack.image.v2.cache.Cache.queue", + self.proxy.queue_image, + method_args=['image-id'], + expected_args=[self.proxy, 'image-id'], + ) + mock_get_resource.assert_called_once_with(_cache.Cache, None) + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_image_clear_cache(self, mock_get_resource): + fake_cache = _cache.Cache() + mock_get_resource.return_value = fake_cache + self._verify( + "openstack.image.v2.cache.Cache.clear", + self.proxy.clear_cache, + method_args=['both'], + expected_args=[self.proxy, 'both'], + ) + mock_get_resource.assert_called_once_with(_cache.Cache, None) + + mock_get_resource.reset_mock() + self._verify( + "openstack.image.v2.cache.Cache.clear", + self.proxy.clear_cache, + method_args=[], + expected_args=[self.proxy, 'both'], + ) + mock_get_resource.assert_called_once_with(_cache.Cache, None) diff --git a/openstack/tests/unit/image/v2/test_schema.py b/openstack/tests/unit/image/v2/test_schema.py new file mode 100644 index 0000000000..56807f955a --- /dev/null +++ b/openstack/tests/unit/image/v2/test_schema.py @@ -0,0 +1,59 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import schema +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'additionalProperties': {'type': 'string'}, + 'links': [ + {'href': '{self}', 'rel': 'self'}, + {'href': '{file}', 'rel': 'enclosure'}, + {'href': '{schema}', 'rel': 'describedby'}, + ], + 'name': 'image', + 'properties': { + 'architecture': { + 'description': 'Operating system architecture', + 'is_base': False, + 'type': 'string', + }, + 'visibility': { + 'description': 'Scope of image accessibility', + 'enum': ['public', 'private'], + 'type': 'string', + }, + }, +} + + +class TestSchema(base.TestCase): + def test_basic(self): + sot = schema.Schema() + self.assertIsNone(sot.resource_key) + self.assertIsNone(sot.resources_key) + self.assertEqual('/schemas', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) + + def test_make_it(self): + sot = schema.Schema(**EXAMPLE) + self.assertEqual(EXAMPLE['properties'], sot.properties) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual( + EXAMPLE['additionalProperties'], sot.additional_properties + ) diff --git a/openstack/tests/unit/image/v2/test_service_info.py b/openstack/tests/unit/image/v2/test_service_info.py new file mode 100644 index 0000000000..b9d3542188 --- /dev/null +++ b/openstack/tests/unit/image/v2/test_service_info.py @@ -0,0 +1,84 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack import exceptions +from openstack.image.v2 import service_info as si +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE_IMPORT = { + 'import-methods': { + 'description': 'Import methods available.', + 'type': 'array', + 'value': ['glance-direct', 'web-download'], + } +} +EXAMPLE_STORE = { + 'id': IDENTIFIER, + 'description': 'Fast access to rbd store', + 'default': True, + 'properties': { + "pool": "pool1", + "chunk_size": 65536, + "thin_provisioning": False, + }, +} + + +class TestStore(base.TestCase): + def test_basic(self): + sot = si.Store() + self.assertIsNone(sot.resource_key) + self.assertEqual('stores', sot.resources_key) + self.assertEqual('/info/stores', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = si.Store(**EXAMPLE_STORE) + self.assertEqual(EXAMPLE_STORE['id'], sot.id) + self.assertEqual(EXAMPLE_STORE['description'], sot.description) + self.assertEqual(EXAMPLE_STORE['default'], sot.is_default) + self.assertEqual(EXAMPLE_STORE['properties'], sot.properties) + + @mock.patch.object(exceptions, 'raise_from_response', mock.Mock()) + def test_delete_image(self): + sot = si.Store(**EXAMPLE_STORE) + session = mock.Mock() + session.delete = mock.Mock() + + sot.delete_image(session, image='image_id') + + session.delete.assert_called_with('stores/IDENTIFIER/image_id') + + +class TestImport(base.TestCase): + def test_basic(self): + sot = si.Import() + self.assertIsNone(sot.resource_key) + self.assertIsNone(sot.resources_key) + self.assertEqual('/info/import', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) + + def test_make_it(self): + sot = si.Import(**EXAMPLE_IMPORT) + self.assertEqual(EXAMPLE_IMPORT['import-methods'], sot.import_methods) diff --git a/openstack/tests/unit/image/v2/test_task.py b/openstack/tests/unit/image/v2/test_task.py new file mode 100644 index 0000000000..c93e1388e7 --- /dev/null +++ b/openstack/tests/unit/image/v2/test_task.py @@ -0,0 +1,71 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.image.v2 import task +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'created_at': '2016-06-24T14:40:19Z', + 'id': IDENTIFIER, + 'input': { + 'image_properties': {'container_format': 'ovf', 'disk_format': 'vhd'}, + 'import_from': 'http://example.com', + 'import_from_format': 'qcow2', + }, + 'message': 'message', + 'owner': 'fa6c8c1600f4444281658a23ee6da8e8', + 'result': 'some result', + 'schema': '/v2/schemas/task', + 'status': 'processing', + 'type': 'import', + 'updated_at': '2016-06-24T14:40:20Z', +} + + +class TestTask(base.TestCase): + def test_basic(self): + sot = task.Task() + self.assertIsNone(sot.resource_key) + self.assertEqual('tasks', sot.resources_key) + self.assertEqual('/tasks', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'sort_dir': 'sort_dir', + 'sort_key': 'sort_key', + 'status': 'status', + 'type': 'type', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = task.Task(**EXAMPLE) + self.assertEqual(IDENTIFIER, sot.id) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['input'], sot.input) + self.assertEqual(EXAMPLE['message'], sot.message) + self.assertEqual(EXAMPLE['owner'], sot.owner_id) + self.assertEqual(EXAMPLE['result'], sot.result) + self.assertEqual(EXAMPLE['schema'], sot.schema) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['type'], sot.type) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/instance_ha/__init__.py b/openstack/tests/unit/instance_ha/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/instance_ha/v1/__init__.py b/openstack/tests/unit/instance_ha/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/instance_ha/v1/test_host.py b/openstack/tests/unit/instance_ha/v1/test_host.py new file mode 100644 index 0000000000..d1a79743cf --- /dev/null +++ b/openstack/tests/unit/instance_ha/v1/test_host.py @@ -0,0 +1,75 @@ +# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.instance_ha.v1 import host +from openstack.tests.unit import base + +FAKE_ID = "1c2f1795-ce78-4d4c-afd0-ce141fdb3952" +FAKE_UUID = "11f7597f-87d2-4057-b754-ba611f989807" +FAKE_HOST_ID = "c27dec16-ed4d-4ebe-8e77-f1e28ec32417" +FAKE_CONTROL_ATTRIBUTES = {"mcastaddr": "239.255.1.1", "mcastport": "5405"} +HOST = { + "id": FAKE_ID, + "uuid": FAKE_UUID, + "segment_id": FAKE_HOST_ID, + "created_at": "2018-03-22T00:00:00.000000", + "updated_at": "2018-03-23T00:00:00.000000", + "name": "my_host", + "type": "pacemaker", + "control_attributes": FAKE_CONTROL_ATTRIBUTES, + "on_maintenance": False, + "reserved": False, + "failover_segment_id": FAKE_HOST_ID, +} + + +class TestHost(base.TestCase): + def test_basic(self): + sot = host.Host(HOST) + self.assertEqual("host", sot.resource_key) + self.assertEqual("hosts", sot.resources_key) + self.assertEqual("/segments/%(segment_id)s/hosts", sot.base_path) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + + self.assertDictEqual( + { + "failover_segment_id": "failover_segment_id", + "limit": "limit", + "marker": "marker", + "on_maintenance": "on_maintenance", + "reserved": "reserved", + "sort_dir": "sort_dir", + "sort_key": "sort_key", + "type": "type", + }, + sot._query_mapping._mapping, + ) + + def test_create(self): + sot = host.Host(**HOST) + self.assertEqual(HOST["id"], sot.id) + self.assertEqual(HOST["uuid"], sot.uuid) + self.assertEqual(HOST["segment_id"], sot.segment_id) + self.assertEqual(HOST["created_at"], sot.created_at) + self.assertEqual(HOST["updated_at"], sot.updated_at) + self.assertEqual(HOST["name"], sot.name) + self.assertEqual(HOST["type"], sot.type) + self.assertEqual(HOST["control_attributes"], sot.control_attributes) + self.assertEqual(HOST["on_maintenance"], sot.on_maintenance) + self.assertEqual(HOST["reserved"], sot.reserved) + self.assertEqual(HOST["failover_segment_id"], sot.failover_segment_id) diff --git a/openstack/tests/unit/instance_ha/v1/test_notification.py b/openstack/tests/unit/instance_ha/v1/test_notification.py new file mode 100644 index 0000000000..4d41cd8186 --- /dev/null +++ b/openstack/tests/unit/instance_ha/v1/test_notification.py @@ -0,0 +1,137 @@ +# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.instance_ha.v1 import notification +from openstack.tests.unit import base + +FAKE_ID = "569429e9-7f14-41be-a38e-920277e637db" +FAKE_UUID = "a0e70d3a-b3a2-4616-b65d-a7c03a2c85fc" +FAKE_HOST_UUID = "cad9ff01-c354-4414-ba3c-31b925be67f1" +PAYLOAD = { + "instance_uuid": "4032bc1d-d723-47f6-b5ac-b9b3e6dbb795", + "vir_domain_event": "STOPPED_FAILED", + "event": "LIFECYCLE", +} + +PROGRESS_DETAILS = [ + { + "timestamp": "2019-02-28 07:21:33.291810", + "progress": 1.0, + "message": "Skipping recovery for process " + "nova-compute as it is already disabled", + } +] + +RECOVERY_WORKFLOW_DETAILS = [ + { + "progress": 1.0, + "state": "SUCCESS", + "name": "DisableComputeNodeTask", + "progress_details": PROGRESS_DETAILS, + } +] + +NOTIFICATION = { + "id": FAKE_ID, + "notification_uuid": FAKE_UUID, + "created_at": "2018-03-22T00:00:00.000000", + "updated_at": "2018-03-23T00:00:00.000000", + "type": "pacemaker", + "hostname": "fake_host", + "status": "new", + "generated_time": "2018-03-21T00:00:00.000000", + "payload": PAYLOAD, + "source_host_uuid": FAKE_HOST_UUID, + "recovery_workflow_details": RECOVERY_WORKFLOW_DETAILS, +} + + +class TestNotification(base.TestCase): + def test_basic(self): + sot = notification.Notification(NOTIFICATION) + self.assertEqual("notification", sot.resource_key) + self.assertEqual("notifications", sot.resources_key) + self.assertEqual("/notifications", sot.base_path) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_create) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + + self.assertDictEqual( + { + "generated_since": "generated-since", + "limit": "limit", + "marker": "marker", + "sort_dir": "sort_dir", + "sort_key": "sort_key", + "source_host_uuid": "source_host_uuid", + "status": "status", + "type": "type", + }, + sot._query_mapping._mapping, + ) + + def test_create(self): + sot = notification.Notification(**NOTIFICATION) + rec_workflow_details = NOTIFICATION["recovery_workflow_details"][0] + self.assertEqual(NOTIFICATION["id"], sot.id) + self.assertEqual( + NOTIFICATION["notification_uuid"], sot.notification_uuid + ) + self.assertEqual(NOTIFICATION["created_at"], sot.created_at) + self.assertEqual(NOTIFICATION["updated_at"], sot.updated_at) + self.assertEqual(NOTIFICATION["type"], sot.type) + self.assertEqual(NOTIFICATION["hostname"], sot.hostname) + self.assertEqual(NOTIFICATION["status"], sot.status) + self.assertEqual(NOTIFICATION["generated_time"], sot.generated_time) + self.assertEqual(NOTIFICATION["payload"], sot.payload) + self.assertEqual( + NOTIFICATION["source_host_uuid"], sot.source_host_uuid + ) + self.assertEqual( + rec_workflow_details["name"], sot.recovery_workflow_details[0].name + ) + self.assertEqual( + rec_workflow_details["state"], + sot.recovery_workflow_details[0].state, + ) + self.assertEqual( + rec_workflow_details["progress"], + sot.recovery_workflow_details[0].progress, + ) + self.assertEqual( + rec_workflow_details["progress_details"][0]['progress'], + sot.recovery_workflow_details[0].progress_details[0].progress, + ) + self.assertEqual( + rec_workflow_details["progress_details"][0]['message'], + sot.recovery_workflow_details[0].progress_details[0].message, + ) + self.assertEqual( + rec_workflow_details["progress_details"][0]['timestamp'], + sot.recovery_workflow_details[0].progress_details[0].timestamp, + ) + self.assertIsInstance(sot.recovery_workflow_details, list) + self.assertIsInstance( + sot.recovery_workflow_details[0].progress_details, list + ) + self.assertIsInstance( + sot.recovery_workflow_details[0], + notification.RecoveryWorkflowDetailItem, + ) + self.assertIsInstance( + sot.recovery_workflow_details[0].progress_details[0], + notification.ProgressDetailsItem, + ) diff --git a/openstack/tests/unit/instance_ha/v1/test_proxy.py b/openstack/tests/unit/instance_ha/v1/test_proxy.py new file mode 100644 index 0000000000..3d2727146b --- /dev/null +++ b/openstack/tests/unit/instance_ha/v1/test_proxy.py @@ -0,0 +1,127 @@ +# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.instance_ha.v1 import _proxy +from openstack.instance_ha.v1 import host +from openstack.instance_ha.v1 import notification +from openstack.instance_ha.v1 import segment +from openstack.instance_ha.v1 import vmove +from openstack.tests.unit import test_proxy_base + +SEGMENT_ID = "c50b96eb-2a66-40f8-bca8-c5fa90d595c0" +HOST_ID = "52d05e43-d08e-42b8-ae33-e47c8ea2ad47" +NOTIFICATION_ID = "a0e70d3a-b3a2-4616-b65d-a7c03a2c85fc" +VMOVE_ID = "16a7c91f-8342-49a7-c731-3a632293f845" + + +class TestInstanceHaProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + +class TestInstanceHaHosts(TestInstanceHaProxy): + def test_hosts(self): + self.verify_list( + self.proxy.hosts, + host.Host, + method_args=[SEGMENT_ID], + expected_args=[], + expected_kwargs={"segment_id": SEGMENT_ID}, + ) + + def test_host_get(self): + self.verify_get( + self.proxy.get_host, + host.Host, + method_args=[HOST_ID], + method_kwargs={"segment_id": SEGMENT_ID}, + expected_kwargs={"segment_id": SEGMENT_ID}, + ) + + def test_host_create(self): + self.verify_create( + self.proxy.create_host, + host.Host, + method_args=[SEGMENT_ID], + method_kwargs={}, + expected_args=[], + expected_kwargs={"segment_id": SEGMENT_ID}, + ) + + def test_host_update(self): + self.verify_update( + self.proxy.update_host, + host.Host, + method_kwargs={"segment_id": SEGMENT_ID}, + ) + + def test_host_delete(self): + self.verify_delete( + self.proxy.delete_host, + host.Host, + True, + method_kwargs={"segment_id": SEGMENT_ID}, + expected_kwargs={"segment_id": SEGMENT_ID}, + ) + + +class TestInstanceHaNotifications(TestInstanceHaProxy): + def test_notifications(self): + self.verify_list(self.proxy.notifications, notification.Notification) + + def test_notification_get(self): + self.verify_get(self.proxy.get_notification, notification.Notification) + + def test_notification_create(self): + self.verify_create( + self.proxy.create_notification, notification.Notification + ) + + +class TestInstanceHaSegments(TestInstanceHaProxy): + def test_segments(self): + self.verify_list(self.proxy.segments, segment.Segment) + + def test_segment_get(self): + self.verify_get(self.proxy.get_segment, segment.Segment) + + def test_segment_create(self): + self.verify_create(self.proxy.create_segment, segment.Segment) + + def test_segment_update(self): + self.verify_update(self.proxy.update_segment, segment.Segment) + + def test_segment_delete(self): + self.verify_delete(self.proxy.delete_segment, segment.Segment, True) + + +class TestInstanceHaVMoves(TestInstanceHaProxy): + def test_vmoves(self): + self.verify_list( + self.proxy.vmoves, + vmove.VMove, + method_args=[NOTIFICATION_ID], + expected_args=[], + expected_kwargs={"notification_id": NOTIFICATION_ID}, + ) + + def test_vmove_get(self): + self.verify_get( + self.proxy.get_vmove, + vmove.VMove, + method_args=[VMOVE_ID, NOTIFICATION_ID], + expected_args=[VMOVE_ID], + expected_kwargs={"notification_id": NOTIFICATION_ID}, + ) diff --git a/openstack/tests/unit/instance_ha/v1/test_segment.py b/openstack/tests/unit/instance_ha/v1/test_segment.py new file mode 100644 index 0000000000..334265f1b4 --- /dev/null +++ b/openstack/tests/unit/instance_ha/v1/test_segment.py @@ -0,0 +1,68 @@ +# Copyright(c) 2018 Nippon Telegraph and Telephone Corporation +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.instance_ha.v1 import segment +from openstack.tests.unit import base + +FAKE_ID = "1c2f1795-ce78-4d4c-afd0-ce141fdb3952" +FAKE_UUID = "11f7597f-87d2-4057-b754-ba611f989807" +SEGMENT = { + "id": FAKE_ID, + "uuid": FAKE_UUID, + "created_at": "2018-03-22T00:00:00.000000", + "updated_at": "2018-03-23T00:00:00.000000", + "name": "my_segment", + "description": "something", + "recovery_method": "auto", + "service_type": "COMPUTE_HOST", + "enabled": True, +} + + +class TestSegment(base.TestCase): + def test_basic(self): + sot = segment.Segment(SEGMENT) + self.assertEqual("segment", sot.resource_key) + self.assertEqual("segments", sot.resources_key) + self.assertEqual("/segments", sot.base_path) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + "recovery_method": "recovery_method", + "service_type": "service_type", + "is_enabled": "enabled", + "sort_dir": "sort_dir", + "sort_key": "sort_key", + }, + sot._query_mapping._mapping, + ) + + def test_create(self): + sot = segment.Segment(**SEGMENT) + self.assertEqual(SEGMENT["id"], sot.id) + self.assertEqual(SEGMENT["uuid"], sot.uuid) + self.assertEqual(SEGMENT["created_at"], sot.created_at) + self.assertEqual(SEGMENT["updated_at"], sot.updated_at) + self.assertEqual(SEGMENT["name"], sot.name) + self.assertEqual(SEGMENT["description"], sot.description) + self.assertEqual(SEGMENT["recovery_method"], sot.recovery_method) + self.assertEqual(SEGMENT["service_type"], sot.service_type) + self.assertEqual(SEGMENT["enabled"], sot.is_enabled) diff --git a/openstack/tests/unit/instance_ha/v1/test_vmove.py b/openstack/tests/unit/instance_ha/v1/test_vmove.py new file mode 100644 index 0000000000..c5c708d96f --- /dev/null +++ b/openstack/tests/unit/instance_ha/v1/test_vmove.py @@ -0,0 +1,79 @@ +# Copyright(c) 2022 Inspur +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +from openstack.instance_ha.v1 import vmove +from openstack.tests.unit import base + +FAKE_ID = "1" +FAKE_UUID = "16a7c91f-8342-49a7-c731-3a632293f845" +FAKE_NOTIFICATION_ID = "a0e70d3a-b3a2-4616-b65d-a7c03a2c85fc" +FAKE_SERVER_ID = "1c2f1795-ce78-4d4c-afd0-ce141fdb3952" + +VMOVE = { + 'id': FAKE_ID, + 'uuid': FAKE_UUID, + 'notification_id': FAKE_NOTIFICATION_ID, + 'created_at': "2023-01-28T14:55:26.000000", + 'updated_at': "2023-01-28T14:55:31.000000", + 'server_id': FAKE_SERVER_ID, + 'server_name': 'vm1', + 'source_host': 'host1', + 'dest_host': 'host2', + 'start_time': "2023-01-28T14:55:27.000000", + 'end_time': "2023-01-28T14:55:31.000000", + 'status': 'succeeded', + 'type': 'evacuation', + 'message': None, +} + + +class TestVMove(base.TestCase): + def test_basic(self): + sot = vmove.VMove(VMOVE) + self.assertEqual("vmove", sot.resource_key) + self.assertEqual("vmoves", sot.resources_key) + self.assertEqual( + "/notifications/%(notification_id)s/vmoves", sot.base_path + ) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_fetch) + + self.assertDictEqual( + { + "status": "status", + "type": "type", + "limit": "limit", + "marker": "marker", + "sort_dir": "sort_dir", + "sort_key": "sort_key", + }, + sot._query_mapping._mapping, + ) + + def test_create(self): + sot = vmove.VMove(**VMOVE) + self.assertEqual(VMOVE["id"], sot.id) + self.assertEqual(VMOVE["uuid"], sot.uuid) + self.assertEqual(VMOVE["notification_id"], sot.notification_id) + self.assertEqual(VMOVE["created_at"], sot.created_at) + self.assertEqual(VMOVE["updated_at"], sot.updated_at) + self.assertEqual(VMOVE["server_id"], sot.server_id) + self.assertEqual(VMOVE["server_name"], sot.server_name) + self.assertEqual(VMOVE["source_host"], sot.source_host) + self.assertEqual(VMOVE["dest_host"], sot.dest_host) + self.assertEqual(VMOVE["start_time"], sot.start_time) + self.assertEqual(VMOVE["end_time"], sot.end_time) + self.assertEqual(VMOVE["status"], sot.status) + self.assertEqual(VMOVE["type"], sot.type) + self.assertEqual(VMOVE["message"], sot.message) diff --git a/openstack/tests/unit/key_manager/test_key_management_service.py b/openstack/tests/unit/key_manager/test_key_management_service.py deleted file mode 100644 index b458daf73c..0000000000 --- a/openstack/tests/unit/key_manager/test_key_management_service.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.key_manager import key_manager_service - - -class TestKeyManagerService(testtools.TestCase): - - def test_service(self): - sot = key_manager_service.KeyManagerService() - self.assertEqual('key-manager', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(1, len(sot.valid_versions)) - self.assertEqual('v1', sot.valid_versions[0].module) - self.assertEqual('v1', sot.valid_versions[0].path) diff --git a/openstack/tests/unit/key_manager/v1/test_container.py b/openstack/tests/unit/key_manager/v1/test_container.py index 98fe51b6fd..82d9b2376e 100644 --- a/openstack/tests/unit/key_manager/v1/test_container.py +++ b/openstack/tests/unit/key_manager/v1/test_container.py @@ -10,12 +10,12 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.key_manager.v1 import container +from openstack.tests.unit import base + ID_VAL = "123" -IDENTIFIER = 'http://localhost/containers/%s' % ID_VAL +IDENTIFIER = f'http://localhost/containers/{ID_VAL}' EXAMPLE = { 'container_ref': IDENTIFIER, 'created': '2015-03-09T12:14:57.233772', @@ -24,21 +24,19 @@ 'status': '5', 'type': '6', 'updated': '2015-03-09T12:15:57.233772', - 'consumers': ['7'] + 'consumers': ['7'], } -class TestContainer(testtools.TestCase): - +class TestContainer(base.TestCase): def test_basic(self): sot = container.Container() self.assertIsNone(sot.resource_key) self.assertEqual('containers', sot.resources_key) self.assertEqual('/containers', sot.base_path) - self.assertEqual('key-manager', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/key_manager/v1/test_order.py b/openstack/tests/unit/key_manager/v1/test_order.py index 7325005228..a7b93c5d68 100644 --- a/openstack/tests/unit/key_manager/v1/test_order.py +++ b/openstack/tests/unit/key_manager/v1/test_order.py @@ -10,38 +10,36 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.key_manager.v1 import order +from openstack.tests.unit import base + ID_VAL = "123" SECRET_ID = "5" -IDENTIFIER = 'http://localhost/orders/%s' % ID_VAL +IDENTIFIER = f'http://localhost/orders/{ID_VAL}' EXAMPLE = { 'created': '1', 'creator_id': '2', 'meta': {'key': '3'}, 'order_ref': IDENTIFIER, - 'secret_ref': 'http://localhost/secrets/%s' % SECRET_ID, + 'secret_ref': f'http://localhost/secrets/{SECRET_ID}', 'status': '6', 'sub_status': '7', 'sub_status_message': '8', 'type': '9', - 'updated': '10' + 'updated': '10', } -class TestOrder(testtools.TestCase): - +class TestOrder(base.TestCase): def test_basic(self): sot = order.Order() self.assertIsNone(sot.resource_key) self.assertEqual('orders', sot.resources_key) self.assertEqual('/orders', sot.base_path) - self.assertEqual('key-manager', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/key_manager/v1/test_project_quota.py b/openstack/tests/unit/key_manager/v1/test_project_quota.py new file mode 100644 index 0000000000..7bb4b8b539 --- /dev/null +++ b/openstack/tests/unit/key_manager/v1/test_project_quota.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.key_manager.v1 import project_quota +from openstack.tests.unit import base + + +EXAMPLE = { + 'secrets': 10, + 'orders': 20, + 'containers': -1, + 'consumers': 10, + 'cas': 5, +} + + +class TestProjectQuota(base.TestCase): + def test_basic(self): + sot = project_quota.ProjectQuota() + self.assertEqual('project_quotas', sot.resource_key) + self.assertEqual('project_quotas', sot.resources_key) + self.assertEqual('/project-quotas', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = project_quota.ProjectQuota(**EXAMPLE) + self.assertEqual(EXAMPLE['secrets'], sot.secrets) + self.assertEqual(EXAMPLE['orders'], sot.orders) + self.assertEqual(EXAMPLE['containers'], sot.containers) + self.assertEqual(EXAMPLE['consumers'], sot.consumers) + self.assertEqual(EXAMPLE['cas'], sot.cas) diff --git a/openstack/tests/unit/key_manager/v1/test_proxy.py b/openstack/tests/unit/key_manager/v1/test_proxy.py index e3652fb0e2..146e8decc0 100644 --- a/openstack/tests/unit/key_manager/v1/test_proxy.py +++ b/openstack/tests/unit/key_manager/v1/test_proxy.py @@ -13,25 +13,31 @@ from openstack.key_manager.v1 import _proxy from openstack.key_manager.v1 import container from openstack.key_manager.v1 import order +from openstack.key_manager.v1 import project_quota from openstack.key_manager.v1 import secret -from openstack.tests.unit import test_proxy_base2 +from openstack.key_manager.v1 import secret_store +from openstack.tests.unit import test_proxy_base -class TestKeyManagerProxy(test_proxy_base2.TestProxyBase): +class TestKeyManagerProxy(test_proxy_base.TestProxyBase): def setUp(self): - super(TestKeyManagerProxy, self).setUp() + super().setUp() self.proxy = _proxy.Proxy(self.session) + +class TestKeyManagerContainer(TestKeyManagerProxy): def test_server_create_attrs(self): self.verify_create(self.proxy.create_container, container.Container) def test_container_delete(self): - self.verify_delete(self.proxy.delete_container, - container.Container, False) + self.verify_delete( + self.proxy.delete_container, container.Container, False + ) def test_container_delete_ignore(self): - self.verify_delete(self.proxy.delete_container, - container.Container, True) + self.verify_delete( + self.proxy.delete_container, container.Container, True + ) def test_container_find(self): self.verify_find(self.proxy.find_container, container.Container) @@ -40,12 +46,13 @@ def test_container_get(self): self.verify_get(self.proxy.get_container, container.Container) def test_containers(self): - self.verify_list(self.proxy.containers, container.Container, - paginated=False) + self.verify_list(self.proxy.containers, container.Container) def test_container_update(self): self.verify_update(self.proxy.update_container, container.Container) + +class TestKeyManagerOrder(TestKeyManagerProxy): def test_order_create_attrs(self): self.verify_create(self.proxy.create_order, order.Order) @@ -62,11 +69,13 @@ def test_order_get(self): self.verify_get(self.proxy.get_order, order.Order) def test_orders(self): - self.verify_list(self.proxy.orders, order.Order, paginated=False) + self.verify_list(self.proxy.orders, order.Order) def test_order_update(self): self.verify_update(self.proxy.update_order, order.Order) + +class TestKeyManagerSecret(TestKeyManagerProxy): def test_secret_create_attrs(self): self.verify_create(self.proxy.create_secret, secret.Secret) @@ -81,9 +90,39 @@ def test_secret_find(self): def test_secret_get(self): self.verify_get(self.proxy.get_secret, secret.Secret) + self.verify_get_overrided( + self.proxy, secret.Secret, 'openstack.key_manager.v1.secret.Secret' + ) def test_secrets(self): - self.verify_list(self.proxy.secrets, secret.Secret, paginated=False) + self.verify_list(self.proxy.secrets, secret.Secret) def test_secret_update(self): self.verify_update(self.proxy.update_secret, secret.Secret) + + +class TestKeyManagerSecretStore(TestKeyManagerProxy): + def test_secret_stores(self): + self.verify_list(self.proxy.secret_stores, secret_store.SecretStore) + + +class TestKeyManagerProjectQuota(TestKeyManagerProxy): + def test_project_quota_delete(self): + self.verify_delete( + self.proxy.delete_project_quota, project_quota.ProjectQuota, False + ) + + def test_project_quota_delete_ignore(self): + self.verify_delete( + self.proxy.delete_project_quota, project_quota.ProjectQuota, True + ) + + def test_project_quota_get(self): + self.verify_get( + self.proxy.get_project_quota, project_quota.ProjectQuota + ) + + def test_project_quota_update(self): + self.verify_update( + self.proxy.update_project_quota, project_quota.ProjectQuota + ) diff --git a/openstack/tests/unit/key_manager/v1/test_secret.py b/openstack/tests/unit/key_manager/v1/test_secret.py index 9e103ecbfb..6a9aa8317f 100644 --- a/openstack/tests/unit/key_manager/v1/test_secret.py +++ b/openstack/tests/unit/key_manager/v1/test_secret.py @@ -10,13 +10,13 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +from unittest import mock from openstack.key_manager.v1 import secret +from openstack.tests.unit import base ID_VAL = "123" -IDENTIFIER = 'http://localhost:9311/v1/secrets/%s' % ID_VAL +IDENTIFIER = f'http://localhost:9311/v1/secrets/{ID_VAL}' EXAMPLE = { 'algorithm': '1', 'bit_length': '2', @@ -31,37 +31,39 @@ 'secret_type': '9', 'payload': '10', 'payload_content_type': '11', - 'payload_content_encoding': '12' + 'payload_content_encoding': '12', } -class TestSecret(testtools.TestCase): - +class TestSecret(base.TestCase): def test_basic(self): sot = secret.Secret() self.assertIsNone(sot.resource_key) self.assertEqual('secrets', sot.resources_key) self.assertEqual('/secrets', sot.base_path) - self.assertEqual('key-manager', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) - self.assertDictEqual({"name": "name", - "mode": "mode", - "bits": "bits", - "secret_type": "secret_type", - "acl_only": "acl_only", - "created": "created", - "updated": "updated", - "expiration": "expiration", - "sort": "sort", - "algorithm": "alg", - "limit": "limit", - "marker": "marker"}, - sot._query_mapping._mapping) + self.assertDictEqual( + { + "name": "name", + "mode": "mode", + "bits": "bits", + "secret_type": "secret_type", + "acl_only": "acl_only", + "created": "created", + "updated": "updated", + "expiration": "expiration", + "sort": "sort", + "algorithm": "alg", + "limit": "limit", + "marker": "marker", + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = secret.Secret(**EXAMPLE) @@ -78,10 +80,12 @@ def test_make_it(self): self.assertEqual(EXAMPLE['updated'], sot.updated_at) self.assertEqual(EXAMPLE['secret_type'], sot.secret_type) self.assertEqual(EXAMPLE['payload'], sot.payload) - self.assertEqual(EXAMPLE['payload_content_type'], - sot.payload_content_type) - self.assertEqual(EXAMPLE['payload_content_encoding'], - sot.payload_content_encoding) + self.assertEqual( + EXAMPLE['payload_content_type'], sot.payload_content_type + ) + self.assertEqual( + EXAMPLE['payload_content_encoding'], sot.payload_content_encoding + ) def test_get_no_payload(self): sot = secret.Secret(id="id") @@ -92,33 +96,42 @@ def test_get_no_payload(self): rv.json = mock.Mock(return_value=return_body) sess.get = mock.Mock(return_value=rv) - sot.get(sess) - - sess.get.assert_called_once_with("secrets/id", - endpoint_filter=sot.service) + sot.fetch(sess) - def _test_payload(self, sot, metadata, content_type): - content_type = "some/type" - sot = secret.Secret(id="id", payload_content_type=content_type) + sess.get.assert_called_once_with("secrets/id") + def _test_payload(self, sot, metadata, content_type="some/type"): metadata_response = mock.Mock() - metadata_response.json = mock.Mock(return_value=metadata) + # Use copy because the dict gets consumed. + metadata_response.json = mock.Mock(return_value=metadata.copy()) payload_response = mock.Mock() - payload = "secret info" - payload_response.text = payload + payload = b"secret info" + payload_response.content = payload sess = mock.Mock() sess.get = mock.Mock(side_effect=[metadata_response, payload_response]) - rv = sot.get(sess) + rv = sot.fetch(sess) sess.get.assert_has_calls( - [mock.call("secrets/id", endpoint_filter=sot.service), - mock.call("secrets/id/payload", endpoint_filter=sot.service, - headers={"Accept": content_type})]) - - self.assertEqual(rv.payload, payload) + [ + mock.call( + "secrets/id", + ), + mock.call( + "secrets/id/payload", + headers={"Accept": content_type}, + skip_cache=False, + ), + ] + ) + + if content_type == "text/plain": + expected_payload = payload.decode("utf-8") + else: + expected_payload = payload + self.assertEqual(rv.payload, expected_payload) self.assertEqual(rv.status, metadata["status"]) def test_get_with_payload_from_argument(self): @@ -129,7 +142,18 @@ def test_get_with_payload_from_argument(self): def test_get_with_payload_from_content_types(self): content_type = "some/type" - metadata = {"status": "fine", - "content_types": {"default": content_type}} + metadata = { + "status": "fine", + "content_types": {"default": content_type}, + } + sot = secret.Secret(id="id") + self._test_payload(sot, metadata, content_type) + + def test_get_with_text_payload(self): + content_type = "text/plain" + metadata = { + "status": "fine", + "content_types": {"default": content_type}, + } sot = secret.Secret(id="id") self._test_payload(sot, metadata, content_type) diff --git a/openstack/tests/unit/key_manager/v1/test_secret_store.py b/openstack/tests/unit/key_manager/v1/test_secret_store.py new file mode 100644 index 0000000000..08fee411d2 --- /dev/null +++ b/openstack/tests/unit/key_manager/v1/test_secret_store.py @@ -0,0 +1,55 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.key_manager.v1 import secret_store +from openstack.tests.unit import base + + +EXAMPLE = { + "status": "ACTIVE", + "updated": "2016-08-22T23:46:45.114283", + "name": "PKCS11 HSM", + "created": "2016-08-22T23:46:45.114283", + "secret_store_ref": "http://localhost:9311/v1/secret-stores/4d27b7a7-b82f-491d-88c0-746bd67dadc8", + "global_default": True, + "crypto_plugin": "p11_crypto", + "secret_store_plugin": "store_crypto", +} + + +class TestSecretStore(base.TestCase): + def test_basic(self): + sot = secret_store.SecretStore() + self.assertEqual('secret_stores', sot.resources_key) + self.assertEqual('/secret-stores', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = secret_store.SecretStore(**EXAMPLE) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['created'], sot.created_at) + self.assertEqual(EXAMPLE['updated'], sot.updated_at) + self.assertEqual(EXAMPLE['secret_store_ref'], sot.secret_store_ref) + self.assertEqual(EXAMPLE['global_default'], sot.global_default) + self.assertEqual(EXAMPLE['crypto_plugin'], sot.crypto_plugin) + self.assertEqual( + EXAMPLE['secret_store_plugin'], sot.secret_store_plugin + ) + # Test the alternate_id extraction + self.assertEqual( + '4d27b7a7-b82f-491d-88c0-746bd67dadc8', sot.secret_store_id + ) diff --git a/openstack/tests/unit/load_balancer/__init__.py b/openstack/tests/unit/load_balancer/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/load_balancer/test_amphora.py b/openstack/tests/unit/load_balancer/test_amphora.py new file mode 100644 index 0000000000..323337a3c9 --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_amphora.py @@ -0,0 +1,148 @@ +# Copyright 2019 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.load_balancer.v2 import amphora +from openstack.tests.unit import base + + +IDENTIFIER = uuid.uuid4() +LB_ID = uuid.uuid4() +LISTENER_ID = uuid.uuid4() +COMPUTE_ID = uuid.uuid4() +VRRP_PORT_ID = uuid.uuid4() +HA_PORT_ID = uuid.uuid4() +IMAGE_ID = uuid.uuid4() +COMPUTE_FLAVOR = uuid.uuid4() +AMPHORA_ID = uuid.uuid4() + +EXAMPLE = { + 'id': IDENTIFIER, + 'loadbalancer_id': LB_ID, + 'compute_id': COMPUTE_ID, + 'lb_network_ip': '192.168.1.2', + 'vrrp_ip': '192.168.1.5', + 'ha_ip': '192.168.1.10', + 'vrrp_port_id': VRRP_PORT_ID, + 'ha_port_id': HA_PORT_ID, + 'cert_expiration': '2019-09-19 00:34:51', + 'cert_busy': 0, + 'role': 'MASTER', + 'status': 'ALLOCATED', + 'vrrp_interface': 'eth1', + 'vrrp_id': 1, + 'vrrp_priority': 100, + 'cached_zone': 'zone1', + 'created_at': '2017-05-10T18:14:44', + 'updated_at': '2017-05-10T23:08:12', + 'image_id': IMAGE_ID, + 'compute_flavor': COMPUTE_FLAVOR, +} + + +class TestAmphora(base.TestCase): + def test_basic(self): + test_amphora = amphora.Amphora() + self.assertEqual('amphora', test_amphora.resource_key) + self.assertEqual('amphorae', test_amphora.resources_key) + self.assertEqual('/octavia/amphorae', test_amphora.base_path) + self.assertFalse(test_amphora.allow_create) + self.assertTrue(test_amphora.allow_fetch) + self.assertFalse(test_amphora.allow_commit) + self.assertFalse(test_amphora.allow_delete) + self.assertTrue(test_amphora.allow_list) + + def test_make_it(self): + test_amphora = amphora.Amphora(**EXAMPLE) + self.assertEqual(IDENTIFIER, test_amphora.id) + self.assertEqual(LB_ID, test_amphora.loadbalancer_id) + self.assertEqual(COMPUTE_ID, test_amphora.compute_id) + self.assertEqual(EXAMPLE['lb_network_ip'], test_amphora.lb_network_ip) + self.assertEqual(EXAMPLE['vrrp_ip'], test_amphora.vrrp_ip) + self.assertEqual(EXAMPLE['ha_ip'], test_amphora.ha_ip) + self.assertEqual(VRRP_PORT_ID, test_amphora.vrrp_port_id) + self.assertEqual(HA_PORT_ID, test_amphora.ha_port_id) + self.assertEqual( + EXAMPLE['cert_expiration'], test_amphora.cert_expiration + ) + self.assertEqual(EXAMPLE['cert_busy'], test_amphora.cert_busy) + self.assertEqual(EXAMPLE['role'], test_amphora.role) + self.assertEqual(EXAMPLE['status'], test_amphora.status) + self.assertEqual( + EXAMPLE['vrrp_interface'], test_amphora.vrrp_interface + ) + self.assertEqual(EXAMPLE['vrrp_id'], test_amphora.vrrp_id) + self.assertEqual(EXAMPLE['vrrp_priority'], test_amphora.vrrp_priority) + self.assertEqual(EXAMPLE['cached_zone'], test_amphora.cached_zone) + self.assertEqual(EXAMPLE['created_at'], test_amphora.created_at) + self.assertEqual(EXAMPLE['updated_at'], test_amphora.updated_at) + self.assertEqual(IMAGE_ID, test_amphora.image_id) + self.assertEqual(COMPUTE_FLAVOR, test_amphora.compute_flavor) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'id': 'id', + 'loadbalancer_id': 'loadbalancer_id', + 'compute_flavor': 'compute_flavor', + 'compute_id': 'compute_id', + 'lb_network_ip': 'lb_network_ip', + 'vrrp_ip': 'vrrp_ip', + 'ha_ip': 'ha_ip', + 'vrrp_port_id': 'vrrp_port_id', + 'ha_port_id': 'ha_port_id', + 'cert_expiration': 'cert_expiration', + 'cert_busy': 'cert_busy', + 'role': 'role', + 'status': 'status', + 'vrrp_interface': 'vrrp_interface', + 'vrrp_id': 'vrrp_id', + 'vrrp_priority': 'vrrp_priority', + 'cached_zone': 'cached_zone', + 'created_at': 'created_at', + 'updated_at': 'updated_at', + 'image_id': 'image_id', + }, + test_amphora._query_mapping._mapping, + ) + + +class TestAmphoraConfig(base.TestCase): + def test_basic(self): + test_amp_config = amphora.AmphoraConfig() + self.assertEqual( + '/octavia/amphorae/%(amphora_id)s/config', + test_amp_config.base_path, + ) + self.assertFalse(test_amp_config.allow_create) + self.assertFalse(test_amp_config.allow_fetch) + self.assertTrue(test_amp_config.allow_commit) + self.assertFalse(test_amp_config.allow_delete) + self.assertFalse(test_amp_config.allow_list) + + +class TestAmphoraFailover(base.TestCase): + def test_basic(self): + test_amp_failover = amphora.AmphoraFailover() + self.assertEqual( + '/octavia/amphorae/%(amphora_id)s/failover', + test_amp_failover.base_path, + ) + self.assertFalse(test_amp_failover.allow_create) + self.assertFalse(test_amp_failover.allow_fetch) + self.assertTrue(test_amp_failover.allow_commit) + self.assertFalse(test_amp_failover.allow_delete) + self.assertFalse(test_amp_failover.allow_list) diff --git a/openstack/tests/unit/load_balancer/test_availability_zone.py b/openstack/tests/unit/load_balancer/test_availability_zone.py new file mode 100644 index 0000000000..2cdbad6abe --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_availability_zone.py @@ -0,0 +1,69 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.load_balancer.v2 import availability_zone +from openstack.tests.unit import base + + +AVAILABILITY_ZONE_PROFILE_ID = uuid.uuid4() +EXAMPLE = { + 'name': 'strawberry', + 'description': 'tasty', + 'is_enabled': False, + 'availability_zone_profile_id': AVAILABILITY_ZONE_PROFILE_ID, +} + + +class TestAvailabilityZone(base.TestCase): + def test_basic(self): + test_availability_zone = availability_zone.AvailabilityZone() + self.assertEqual( + 'availability_zone', test_availability_zone.resource_key + ) + self.assertEqual( + 'availability_zones', test_availability_zone.resources_key + ) + self.assertEqual( + '/lbaas/availabilityzones', test_availability_zone.base_path + ) + self.assertTrue(test_availability_zone.allow_create) + self.assertTrue(test_availability_zone.allow_fetch) + self.assertTrue(test_availability_zone.allow_commit) + self.assertTrue(test_availability_zone.allow_delete) + self.assertTrue(test_availability_zone.allow_list) + + def test_make_it(self): + test_availability_zone = availability_zone.AvailabilityZone(**EXAMPLE) + self.assertEqual(EXAMPLE['name'], test_availability_zone.name) + self.assertEqual( + EXAMPLE['description'], test_availability_zone.description + ) + self.assertFalse(test_availability_zone.is_enabled) + self.assertEqual( + EXAMPLE['availability_zone_profile_id'], + test_availability_zone.availability_zone_profile_id, + ) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'name': 'name', + 'description': 'description', + 'is_enabled': 'enabled', + 'availability_zone_profile_id': 'availability_zone_profile_id', + }, + test_availability_zone._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/load_balancer/test_availability_zone_profile.py b/openstack/tests/unit/load_balancer/test_availability_zone_profile.py new file mode 100644 index 0000000000..bdc87da181 --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_availability_zone_profile.py @@ -0,0 +1,69 @@ +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.load_balancer.v2 import availability_zone_profile +from openstack.tests.unit import base + + +IDENTIFIER = uuid.uuid4() +EXAMPLE = { + 'id': IDENTIFIER, + 'name': 'acidic', + 'provider_name': 'best', + 'availability_zone_data': '{"loadbalancer_topology": "SINGLE"}', +} + + +class TestAvailabilityZoneProfile(base.TestCase): + def test_basic(self): + test_profile = availability_zone_profile.AvailabilityZoneProfile() + self.assertEqual( + 'availability_zone_profile', test_profile.resource_key + ) + self.assertEqual( + 'availability_zone_profiles', test_profile.resources_key + ) + self.assertEqual( + '/lbaas/availabilityzoneprofiles', test_profile.base_path + ) + self.assertTrue(test_profile.allow_create) + self.assertTrue(test_profile.allow_fetch) + self.assertTrue(test_profile.allow_commit) + self.assertTrue(test_profile.allow_delete) + self.assertTrue(test_profile.allow_list) + + def test_make_it(self): + test_profile = availability_zone_profile.AvailabilityZoneProfile( + **EXAMPLE + ) + self.assertEqual(EXAMPLE['id'], test_profile.id) + self.assertEqual(EXAMPLE['name'], test_profile.name) + self.assertEqual(EXAMPLE['provider_name'], test_profile.provider_name) + self.assertEqual( + EXAMPLE['availability_zone_data'], + test_profile.availability_zone_data, + ) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'id': 'id', + 'name': 'name', + 'provider_name': 'provider_name', + 'availability_zone_data': 'availability_zone_data', + }, + test_profile._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/load_balancer/test_flavor.py b/openstack/tests/unit/load_balancer/test_flavor.py new file mode 100644 index 0000000000..ce199efac4 --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_flavor.py @@ -0,0 +1,65 @@ +# Copyright 2019 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.load_balancer.v2 import flavor +from openstack.tests.unit import base + + +IDENTIFIER = uuid.uuid4() +FLAVOR_PROFILE_ID = uuid.uuid4() +EXAMPLE = { + 'id': IDENTIFIER, + 'name': 'strawberry', + 'description': 'tasty', + 'is_enabled': False, + 'flavor_profile_id': FLAVOR_PROFILE_ID, +} + + +class TestFlavor(base.TestCase): + def test_basic(self): + test_flavor = flavor.Flavor() + self.assertEqual('flavor', test_flavor.resource_key) + self.assertEqual('flavors', test_flavor.resources_key) + self.assertEqual('/lbaas/flavors', test_flavor.base_path) + self.assertTrue(test_flavor.allow_create) + self.assertTrue(test_flavor.allow_fetch) + self.assertTrue(test_flavor.allow_commit) + self.assertTrue(test_flavor.allow_delete) + self.assertTrue(test_flavor.allow_list) + + def test_make_it(self): + test_flavor = flavor.Flavor(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], test_flavor.id) + self.assertEqual(EXAMPLE['name'], test_flavor.name) + self.assertEqual(EXAMPLE['description'], test_flavor.description) + self.assertFalse(test_flavor.is_enabled) + self.assertEqual( + EXAMPLE['flavor_profile_id'], test_flavor.flavor_profile_id + ) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'id': 'id', + 'name': 'name', + 'description': 'description', + 'is_enabled': 'enabled', + 'flavor_profile_id': 'flavor_profile_id', + }, + test_flavor._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/load_balancer/test_flavor_profile.py b/openstack/tests/unit/load_balancer/test_flavor_profile.py new file mode 100644 index 0000000000..5b9d9fc7a5 --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_flavor_profile.py @@ -0,0 +1,59 @@ +# Copyright 2019 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.load_balancer.v2 import flavor_profile +from openstack.tests.unit import base + + +IDENTIFIER = uuid.uuid4() +EXAMPLE = { + 'id': IDENTIFIER, + 'name': 'acidic', + 'provider_name': 'best', + 'flavor_data': '{"loadbalancer_topology": "SINGLE"}', +} + + +class TestFlavorProfile(base.TestCase): + def test_basic(self): + test_profile = flavor_profile.FlavorProfile() + self.assertEqual('flavorprofile', test_profile.resource_key) + self.assertEqual('flavorprofiles', test_profile.resources_key) + self.assertEqual('/lbaas/flavorprofiles', test_profile.base_path) + self.assertTrue(test_profile.allow_create) + self.assertTrue(test_profile.allow_fetch) + self.assertTrue(test_profile.allow_commit) + self.assertTrue(test_profile.allow_delete) + self.assertTrue(test_profile.allow_list) + + def test_make_it(self): + test_profile = flavor_profile.FlavorProfile(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], test_profile.id) + self.assertEqual(EXAMPLE['name'], test_profile.name) + self.assertEqual(EXAMPLE['provider_name'], test_profile.provider_name) + self.assertEqual(EXAMPLE['flavor_data'], test_profile.flavor_data) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'id': 'id', + 'name': 'name', + 'provider_name': 'provider_name', + 'flavor_data': 'flavor_data', + }, + test_profile._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/load_balancer/test_health_monitor.py b/openstack/tests/unit/load_balancer/test_health_monitor.py new file mode 100644 index 0000000000..7db7329e4f --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_health_monitor.py @@ -0,0 +1,103 @@ +# Copyright 2017 Rackspace, US Inc. +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.load_balancer.v2 import health_monitor +from openstack.tests.unit import base + + +EXAMPLE = { + 'admin_state_up': True, + 'created_at': '2017-07-17T12:14:57.233772', + 'delay': 10, + 'expected_codes': '200, 202', + 'http_method': 'HEAD', + 'id': uuid.uuid4(), + 'max_retries': 2, + 'max_retries_down': 3, + 'name': 'test_health_monitor', + 'operating_status': 'ONLINE', + 'pools': [{'id': uuid.uuid4()}], + 'pool_id': uuid.uuid4(), + 'project_id': uuid.uuid4(), + 'provisioning_status': 'ACTIVE', + 'timeout': 4, + 'type': 'HTTP', + 'updated_at': '2017-07-17T12:16:57.233772', + 'url_path': '/health_page.html', +} + + +class TestPoolHealthMonitor(base.TestCase): + def test_basic(self): + test_hm = health_monitor.HealthMonitor() + self.assertEqual('healthmonitor', test_hm.resource_key) + self.assertEqual('healthmonitors', test_hm.resources_key) + self.assertEqual('/lbaas/healthmonitors', test_hm.base_path) + self.assertTrue(test_hm.allow_create) + self.assertTrue(test_hm.allow_fetch) + self.assertTrue(test_hm.allow_commit) + self.assertTrue(test_hm.allow_delete) + self.assertTrue(test_hm.allow_list) + + def test_make_it(self): + test_hm = health_monitor.HealthMonitor(**EXAMPLE) + self.assertTrue(test_hm.is_admin_state_up) + self.assertEqual(EXAMPLE['created_at'], test_hm.created_at) + self.assertEqual(EXAMPLE['delay'], test_hm.delay) + self.assertEqual(EXAMPLE['expected_codes'], test_hm.expected_codes) + self.assertEqual(EXAMPLE['http_method'], test_hm.http_method) + self.assertEqual(EXAMPLE['id'], test_hm.id) + self.assertEqual(EXAMPLE['max_retries'], test_hm.max_retries) + self.assertEqual(EXAMPLE['max_retries_down'], test_hm.max_retries_down) + self.assertEqual(EXAMPLE['name'], test_hm.name) + self.assertEqual(EXAMPLE['operating_status'], test_hm.operating_status) + self.assertEqual(EXAMPLE['pools'], test_hm.pools) + self.assertEqual(EXAMPLE['pool_id'], test_hm.pool_id) + self.assertEqual(EXAMPLE['project_id'], test_hm.project_id) + self.assertEqual( + EXAMPLE['provisioning_status'], test_hm.provisioning_status + ) + self.assertEqual(EXAMPLE['timeout'], test_hm.timeout) + self.assertEqual(EXAMPLE['type'], test_hm.type) + self.assertEqual(EXAMPLE['updated_at'], test_hm.updated_at) + self.assertEqual(EXAMPLE['url_path'], test_hm.url_path) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'created_at': 'created_at', + 'updated_at': 'updated_at', + 'name': 'name', + 'project_id': 'project_id', + 'tags': 'tags', + 'any_tags': 'tags-any', + 'not_tags': 'not-tags', + 'not_any_tags': 'not-tags-any', + 'operating_status': 'operating_status', + 'provisioning_status': 'provisioning_status', + 'is_admin_state_up': 'admin_state_up', + 'delay': 'delay', + 'expected_codes': 'expected_codes', + 'http_method': 'http_method', + 'max_retries': 'max_retries', + 'max_retries_down': 'max_retries_down', + 'pool_id': 'pool_id', + 'timeout': 'timeout', + 'type': 'type', + 'url_path': 'url_path', + }, + test_hm._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/load_balancer/test_l7policy.py b/openstack/tests/unit/load_balancer/test_l7policy.py new file mode 100644 index 0000000000..bb8768abc1 --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_l7policy.py @@ -0,0 +1,100 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.load_balancer.v2 import l7_policy +from openstack.tests.unit import base + + +EXAMPLE = { + 'action': 'REJECT', + 'admin_state_up': True, + 'created_at': '2017-07-17T12:14:57.233772', + 'description': 'test_description', + 'id': uuid.uuid4(), + 'listener_id': uuid.uuid4(), + 'name': 'test_l7_policy', + 'operating_status': 'ONLINE', + 'position': 7, + 'project_id': uuid.uuid4(), + 'provisioning_status': 'ACTIVE', + 'redirect_pool_id': uuid.uuid4(), + 'redirect_prefix': 'https://www.example.com', + 'redirect_url': '/test_url', + 'rules': [{'id': uuid.uuid4()}], + 'updated_at': '2017-07-17T12:16:57.233772', +} + + +class TestL7Policy(base.TestCase): + def test_basic(self): + test_l7_policy = l7_policy.L7Policy() + self.assertEqual('l7policy', test_l7_policy.resource_key) + self.assertEqual('l7policies', test_l7_policy.resources_key) + self.assertEqual('/lbaas/l7policies', test_l7_policy.base_path) + self.assertTrue(test_l7_policy.allow_create) + self.assertTrue(test_l7_policy.allow_fetch) + self.assertTrue(test_l7_policy.allow_commit) + self.assertTrue(test_l7_policy.allow_delete) + self.assertTrue(test_l7_policy.allow_list) + + def test_make_it(self): + test_l7_policy = l7_policy.L7Policy(**EXAMPLE) + self.assertTrue(test_l7_policy.is_admin_state_up) + self.assertEqual(EXAMPLE['action'], test_l7_policy.action) + self.assertEqual(EXAMPLE['created_at'], test_l7_policy.created_at) + self.assertEqual(EXAMPLE['description'], test_l7_policy.description) + self.assertEqual(EXAMPLE['id'], test_l7_policy.id) + self.assertEqual(EXAMPLE['listener_id'], test_l7_policy.listener_id) + self.assertEqual(EXAMPLE['name'], test_l7_policy.name) + self.assertEqual( + EXAMPLE['operating_status'], test_l7_policy.operating_status + ) + self.assertEqual(EXAMPLE['position'], test_l7_policy.position) + self.assertEqual(EXAMPLE['project_id'], test_l7_policy.project_id) + self.assertEqual( + EXAMPLE['provisioning_status'], test_l7_policy.provisioning_status + ) + self.assertEqual( + EXAMPLE['redirect_pool_id'], test_l7_policy.redirect_pool_id + ) + self.assertEqual( + EXAMPLE['redirect_prefix'], test_l7_policy.redirect_prefix + ) + self.assertEqual(EXAMPLE['redirect_url'], test_l7_policy.redirect_url) + self.assertEqual(EXAMPLE['rules'], test_l7_policy.rules) + self.assertEqual(EXAMPLE['updated_at'], test_l7_policy.updated_at) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'name': 'name', + 'description': 'description', + 'project_id': 'project_id', + 'tags': 'tags', + 'any_tags': 'tags-any', + 'not_tags': 'not-tags', + 'not_any_tags': 'not-tags-any', + 'operating_status': 'operating_status', + 'provisioning_status': 'provisioning_status', + 'is_admin_state_up': 'admin_state_up', + 'action': 'action', + 'listener_id': 'listener_id', + 'position': 'position', + 'redirect_pool_id': 'redirect_pool_id', + 'redirect_url': 'redirect_url', + 'redirect_prefix': 'redirect_prefix', + }, + test_l7_policy._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/load_balancer/test_l7rule.py b/openstack/tests/unit/load_balancer/test_l7rule.py new file mode 100644 index 0000000000..acded4a333 --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_l7rule.py @@ -0,0 +1,92 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.load_balancer.v2 import l7_rule +from openstack.tests.unit import base + + +EXAMPLE = { + 'admin_state_up': True, + 'compare_type': 'REGEX', + 'created_at': '2017-08-17T12:14:57.233772', + 'id': uuid.uuid4(), + 'invert': False, + 'key': 'my_cookie', + 'l7_policy_id': uuid.uuid4(), + 'operating_status': 'ONLINE', + 'project_id': uuid.uuid4(), + 'provisioning_status': 'ACTIVE', + 'type': 'COOKIE', + 'updated_at': '2017-08-17T12:16:57.233772', + 'value': 'chocolate', +} + + +class TestL7Rule(base.TestCase): + def test_basic(self): + test_l7rule = l7_rule.L7Rule() + self.assertEqual('rule', test_l7rule.resource_key) + self.assertEqual('rules', test_l7rule.resources_key) + self.assertEqual( + '/lbaas/l7policies/%(l7policy_id)s/rules', test_l7rule.base_path + ) + self.assertTrue(test_l7rule.allow_create) + self.assertTrue(test_l7rule.allow_fetch) + self.assertTrue(test_l7rule.allow_commit) + self.assertTrue(test_l7rule.allow_delete) + self.assertTrue(test_l7rule.allow_list) + + def test_make_it(self): + test_l7rule = l7_rule.L7Rule(**EXAMPLE) + self.assertTrue(test_l7rule.is_admin_state_up) + self.assertEqual(EXAMPLE['compare_type'], test_l7rule.compare_type) + self.assertEqual(EXAMPLE['created_at'], test_l7rule.created_at) + self.assertEqual(EXAMPLE['id'], test_l7rule.id) + self.assertEqual(EXAMPLE['invert'], test_l7rule.invert) + self.assertEqual(EXAMPLE['key'], test_l7rule.key) + self.assertEqual(EXAMPLE['l7_policy_id'], test_l7rule.l7_policy_id) + self.assertEqual( + EXAMPLE['operating_status'], test_l7rule.operating_status + ) + self.assertEqual(EXAMPLE['project_id'], test_l7rule.project_id) + self.assertEqual( + EXAMPLE['provisioning_status'], test_l7rule.provisioning_status + ) + self.assertEqual(EXAMPLE['type'], test_l7rule.type) + self.assertEqual(EXAMPLE['updated_at'], test_l7rule.updated_at) + self.assertEqual(EXAMPLE['value'], test_l7rule.rule_value) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'created_at': 'created_at', + 'updated_at': 'updated_at', + 'project_id': 'project_id', + 'tags': 'tags', + 'any_tags': 'tags-any', + 'not_tags': 'not-tags', + 'not_any_tags': 'not-tags-any', + 'operating_status': 'operating_status', + 'provisioning_status': 'provisioning_status', + 'is_admin_state_up': 'admin_state_up', + 'compare_type': 'compare_type', + 'invert': 'invert', + 'key': 'key', + 'type': 'type', + 'rule_value': 'rule_value', + 'l7_policy_id': 'l7policy_id', + }, + test_l7rule._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/load_balancer/test_listener.py b/openstack/tests/unit/load_balancer/test_listener.py new file mode 100644 index 0000000000..2840564379 --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_listener.py @@ -0,0 +1,202 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.load_balancer.v2 import listener +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'admin_state_up': True, + 'allowed_cidrs': ['192.168.1.0/24'], + 'connection_limit': '2', + 'default_pool_id': uuid.uuid4(), + 'description': 'test description', + 'id': IDENTIFIER, + 'insert_headers': {"X-Forwarded-For": "true"}, + 'l7policies': [{'id': uuid.uuid4()}], + 'loadbalancers': [{'id': uuid.uuid4()}], + 'name': 'test_listener', + 'project_id': uuid.uuid4(), + 'protocol': 'TEST_PROTOCOL', + 'protocol_port': 10, + 'default_tls_container_ref': ( + 'http://198.51.100.10:9311/v1/containers/' + 'a570068c-d295-4780-91d4-3046a325db51' + ), + 'sni_container_refs': [], + 'created_at': '2017-07-17T12:14:57.233772', + 'updated_at': '2017-07-17T12:16:57.233772', + 'operating_status': 'ONLINE', + 'provisioning_status': 'ACTIVE', + 'hsts_include_subdomains': True, + 'hsts_max_age': 30_000_000, + 'hsts_preload': False, + 'timeout_client_data': 50000, + 'timeout_member_connect': 5000, + 'timeout_member_data': 50000, + 'timeout_tcp_inspect': 0, + 'tls_ciphers': 'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256', + 'tls_versions': ['TLSv1.1', 'TLSv1.2'], + 'alpn_protocols': ['h2', 'http/1.1', 'http/1.0'], +} + +EXAMPLE_STATS = { + 'active_connections': 1, + 'bytes_in': 2, + 'bytes_out': 3, + 'request_errors': 4, + 'total_connections': 5, +} + + +class TestListener(base.TestCase): + def test_basic(self): + test_listener = listener.Listener() + self.assertEqual('listener', test_listener.resource_key) + self.assertEqual('listeners', test_listener.resources_key) + self.assertEqual('/lbaas/listeners', test_listener.base_path) + self.assertTrue(test_listener.allow_create) + self.assertTrue(test_listener.allow_fetch) + self.assertTrue(test_listener.allow_commit) + self.assertTrue(test_listener.allow_delete) + self.assertTrue(test_listener.allow_list) + + def test_make_it(self): + test_listener = listener.Listener(**EXAMPLE) + self.assertTrue(test_listener.is_admin_state_up) + self.assertEqual(EXAMPLE['allowed_cidrs'], test_listener.allowed_cidrs) + self.assertEqual( + EXAMPLE['connection_limit'], test_listener.connection_limit + ) + self.assertEqual( + EXAMPLE['default_pool_id'], test_listener.default_pool_id + ) + self.assertEqual(EXAMPLE['description'], test_listener.description) + self.assertEqual(EXAMPLE['id'], test_listener.id) + self.assertEqual( + EXAMPLE['insert_headers'], test_listener.insert_headers + ) + self.assertEqual(EXAMPLE['l7policies'], test_listener.l7_policies) + self.assertEqual( + EXAMPLE['loadbalancers'], test_listener.load_balancers + ) + self.assertEqual(EXAMPLE['name'], test_listener.name) + self.assertEqual(EXAMPLE['project_id'], test_listener.project_id) + self.assertEqual(EXAMPLE['protocol'], test_listener.protocol) + self.assertEqual(EXAMPLE['protocol_port'], test_listener.protocol_port) + self.assertEqual( + EXAMPLE['default_tls_container_ref'], + test_listener.default_tls_container_ref, + ) + self.assertEqual( + EXAMPLE['sni_container_refs'], test_listener.sni_container_refs + ) + self.assertEqual(EXAMPLE['created_at'], test_listener.created_at) + self.assertEqual(EXAMPLE['updated_at'], test_listener.updated_at) + self.assertTrue(test_listener.is_hsts_include_subdomains) + self.assertEqual(EXAMPLE['hsts_max_age'], test_listener.hsts_max_age) + self.assertFalse(test_listener.is_hsts_preload) + self.assertEqual( + EXAMPLE['provisioning_status'], test_listener.provisioning_status + ) + self.assertEqual( + EXAMPLE['operating_status'], test_listener.operating_status + ) + self.assertEqual( + EXAMPLE['timeout_client_data'], test_listener.timeout_client_data + ) + self.assertEqual( + EXAMPLE['timeout_member_connect'], + test_listener.timeout_member_connect, + ) + self.assertEqual( + EXAMPLE['timeout_member_data'], test_listener.timeout_member_data + ) + self.assertEqual( + EXAMPLE['timeout_tcp_inspect'], test_listener.timeout_tcp_inspect + ) + self.assertEqual(EXAMPLE['tls_ciphers'], test_listener.tls_ciphers) + self.assertEqual(EXAMPLE['tls_versions'], test_listener.tls_versions) + self.assertEqual( + EXAMPLE['alpn_protocols'], test_listener.alpn_protocols + ) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'created_at': 'created_at', + 'updated_at': 'updated_at', + 'description': 'description', + 'name': 'name', + 'project_id': 'project_id', + 'tags': 'tags', + 'any_tags': 'tags-any', + 'not_tags': 'not-tags', + 'not_any_tags': 'not-tags-any', + 'operating_status': 'operating_status', + 'provisioning_status': 'provisioning_status', + 'is_admin_state_up': 'admin_state_up', + 'is_hsts_include_subdomains': 'hsts_include_subdomains', + 'hsts_max_age': 'hsts_max_age', + 'is_hsts_preload': 'hsts_preload', + 'allowed_cidrs': 'allowed_cidrs', + 'connection_limit': 'connection_limit', + 'default_pool_id': 'default_pool_id', + 'default_tls_container_ref': 'default_tls_container_ref', + 'sni_container_refs': 'sni_container_refs', + 'insert_headers': 'insert_headers', + 'load_balancer_id': 'load_balancer_id', + 'protocol': 'protocol', + 'protocol_port': 'protocol_port', + 'timeout_client_data': 'timeout_client_data', + 'timeout_member_connect': 'timeout_member_connect', + 'timeout_member_data': 'timeout_member_data', + 'timeout_tcp_inspect': 'timeout_tcp_inspect', + 'tls_ciphers': 'tls_ciphers', + 'tls_versions': 'tls_versions', + 'alpn_protocols': 'alpn_protocols', + }, + test_listener._query_mapping._mapping, + ) + + +class TestListenerStats(base.TestCase): + def test_basic(self): + test_listener = listener.ListenerStats() + self.assertEqual('stats', test_listener.resource_key) + self.assertEqual( + '/lbaas/listeners/%(listener_id)s/stats', test_listener.base_path + ) + self.assertFalse(test_listener.allow_create) + self.assertTrue(test_listener.allow_fetch) + self.assertFalse(test_listener.allow_delete) + self.assertFalse(test_listener.allow_list) + self.assertFalse(test_listener.allow_commit) + + def test_make_it(self): + test_listener = listener.ListenerStats(**EXAMPLE_STATS) + self.assertEqual( + EXAMPLE_STATS['active_connections'], + test_listener.active_connections, + ) + self.assertEqual(EXAMPLE_STATS['bytes_in'], test_listener.bytes_in) + self.assertEqual(EXAMPLE_STATS['bytes_out'], test_listener.bytes_out) + self.assertEqual( + EXAMPLE_STATS['request_errors'], test_listener.request_errors + ) + self.assertEqual( + EXAMPLE_STATS['total_connections'], test_listener.total_connections + ) diff --git a/openstack/tests/unit/load_balancer/test_load_balancer.py b/openstack/tests/unit/load_balancer/test_load_balancer.py new file mode 100644 index 0000000000..607bc97b02 --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_load_balancer.py @@ -0,0 +1,222 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock +import uuid + +from openstack.load_balancer.v2 import load_balancer +from openstack.tests.unit import base + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'admin_state_up': True, + 'availability_zone': 'my_fake_az', + 'created_at': '2017-07-17T12:14:57.233772', + 'description': 'fake_description', + 'flavor_id': uuid.uuid4(), + 'id': IDENTIFIER, + 'listeners': [{'id', uuid.uuid4()}], + 'name': 'test_load_balancer', + 'operating_status': 'ONLINE', + 'pools': [{'id', uuid.uuid4()}], + 'project_id': uuid.uuid4(), + 'provider': 'fake_provider', + 'provisioning_status': 'ACTIVE', + 'updated_at': '2017-07-17T12:16:57.233772', + 'vip_address': '192.0.2.5', + 'vip_network_id': uuid.uuid4(), + 'vip_port_id': uuid.uuid4(), + 'vip_subnet_id': uuid.uuid4(), + 'vip_qos_policy_id': uuid.uuid4(), + 'additional_vips': [ + {'subnet_id': uuid.uuid4(), 'ip_address': '192.0.2.6'}, + {'subnet_id': uuid.uuid4(), 'ip_address': '192.0.2.7'}, + ], +} + +EXAMPLE_STATS = { + 'active_connections': 1, + 'bytes_in': 2, + 'bytes_out': 3, + 'request_errors': 4, + 'total_connections': 5, +} + + +class TestLoadBalancer(base.TestCase): + def test_basic(self): + test_load_balancer = load_balancer.LoadBalancer() + self.assertEqual('loadbalancer', test_load_balancer.resource_key) + self.assertEqual('loadbalancers', test_load_balancer.resources_key) + self.assertEqual('/lbaas/loadbalancers', test_load_balancer.base_path) + self.assertTrue(test_load_balancer.allow_create) + self.assertTrue(test_load_balancer.allow_fetch) + self.assertTrue(test_load_balancer.allow_delete) + self.assertTrue(test_load_balancer.allow_list) + self.assertTrue(test_load_balancer.allow_commit) + + def test_make_it(self): + test_load_balancer = load_balancer.LoadBalancer(**EXAMPLE) + self.assertTrue(test_load_balancer.is_admin_state_up) + self.assertEqual( + EXAMPLE['availability_zone'], test_load_balancer.availability_zone + ) + self.assertEqual(EXAMPLE['created_at'], test_load_balancer.created_at) + self.assertEqual( + EXAMPLE['description'], test_load_balancer.description + ) + self.assertEqual(EXAMPLE['flavor_id'], test_load_balancer.flavor_id) + self.assertEqual(EXAMPLE['id'], test_load_balancer.id) + self.assertEqual(EXAMPLE['listeners'], test_load_balancer.listeners) + self.assertEqual(EXAMPLE['name'], test_load_balancer.name) + self.assertEqual( + EXAMPLE['operating_status'], test_load_balancer.operating_status + ) + self.assertEqual(EXAMPLE['pools'], test_load_balancer.pools) + self.assertEqual(EXAMPLE['project_id'], test_load_balancer.project_id) + self.assertEqual(EXAMPLE['provider'], test_load_balancer.provider) + self.assertEqual( + EXAMPLE['provisioning_status'], + test_load_balancer.provisioning_status, + ) + self.assertEqual(EXAMPLE['updated_at'], test_load_balancer.updated_at) + self.assertEqual( + EXAMPLE['vip_address'], test_load_balancer.vip_address + ) + self.assertEqual( + EXAMPLE['vip_network_id'], test_load_balancer.vip_network_id + ) + self.assertEqual( + EXAMPLE['vip_port_id'], test_load_balancer.vip_port_id + ) + self.assertEqual( + EXAMPLE['vip_subnet_id'], test_load_balancer.vip_subnet_id + ) + self.assertEqual( + EXAMPLE['vip_qos_policy_id'], test_load_balancer.vip_qos_policy_id + ) + self.assertEqual( + EXAMPLE['additional_vips'], test_load_balancer.additional_vips + ) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'availability_zone': 'availability_zone', + 'description': 'description', + 'flavor_id': 'flavor_id', + 'name': 'name', + 'project_id': 'project_id', + 'provider': 'provider', + 'operating_status': 'operating_status', + 'provisioning_status': 'provisioning_status', + 'is_admin_state_up': 'admin_state_up', + 'vip_address': 'vip_address', + 'vip_network_id': 'vip_network_id', + 'vip_port_id': 'vip_port_id', + 'vip_subnet_id': 'vip_subnet_id', + 'vip_qos_policy_id': 'vip_qos_policy_id', + 'tags': 'tags', + 'any_tags': 'tags-any', + 'not_tags': 'not-tags', + 'not_any_tags': 'not-tags-any', + }, + test_load_balancer._query_mapping._mapping, + ) + + def test_delete_non_cascade(self): + sess = mock.Mock() + resp = mock.Mock() + sess.delete.return_value = resp + + sot = load_balancer.LoadBalancer(**EXAMPLE) + sot.cascade = False + sot._translate_response = mock.Mock() + sot.delete(sess) + + url = 'lbaas/loadbalancers/{lb}'.format(lb=EXAMPLE['id']) + params = {} + sess.delete.assert_called_with(url, params=params) + sot._translate_response.assert_called_once_with( + resp, + error_message=None, + has_body=False, + ) + + def test_delete_cascade(self): + sess = mock.Mock() + resp = mock.Mock() + sess.delete.return_value = resp + + sot = load_balancer.LoadBalancer(**EXAMPLE) + sot.cascade = True + sot._translate_response = mock.Mock() + sot.delete(sess) + + url = 'lbaas/loadbalancers/{lb}'.format(lb=EXAMPLE['id']) + params = {'cascade': True} + sess.delete.assert_called_with(url, params=params) + sot._translate_response.assert_called_once_with( + resp, + error_message=None, + has_body=False, + ) + + +class TestLoadBalancerStats(base.TestCase): + def test_basic(self): + test_load_balancer = load_balancer.LoadBalancerStats() + self.assertEqual('stats', test_load_balancer.resource_key) + self.assertEqual( + '/lbaas/loadbalancers/%(lb_id)s/stats', + test_load_balancer.base_path, + ) + self.assertFalse(test_load_balancer.allow_create) + self.assertTrue(test_load_balancer.allow_fetch) + self.assertFalse(test_load_balancer.allow_delete) + self.assertFalse(test_load_balancer.allow_list) + self.assertFalse(test_load_balancer.allow_commit) + + def test_make_it(self): + test_load_balancer = load_balancer.LoadBalancerStats(**EXAMPLE_STATS) + self.assertEqual( + EXAMPLE_STATS['active_connections'], + test_load_balancer.active_connections, + ) + self.assertEqual( + EXAMPLE_STATS['bytes_in'], test_load_balancer.bytes_in + ) + self.assertEqual( + EXAMPLE_STATS['bytes_out'], test_load_balancer.bytes_out + ) + self.assertEqual( + EXAMPLE_STATS['request_errors'], test_load_balancer.request_errors + ) + self.assertEqual( + EXAMPLE_STATS['total_connections'], + test_load_balancer.total_connections, + ) + + +class TestLoadBalancerFailover(base.TestCase): + def test_basic(self): + test_load_balancer = load_balancer.LoadBalancerFailover() + self.assertEqual( + '/lbaas/loadbalancers/%(lb_id)s/failover', + test_load_balancer.base_path, + ) + self.assertFalse(test_load_balancer.allow_create) + self.assertFalse(test_load_balancer.allow_fetch) + self.assertFalse(test_load_balancer.allow_delete) + self.assertFalse(test_load_balancer.allow_list) + self.assertTrue(test_load_balancer.allow_commit) diff --git a/openstack/tests/unit/load_balancer/test_member.py b/openstack/tests/unit/load_balancer/test_member.py new file mode 100644 index 0000000000..9f4ab8d142 --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_member.py @@ -0,0 +1,91 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.load_balancer.v2 import member +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'address': '192.0.2.16', + 'admin_state_up': True, + 'id': IDENTIFIER, + 'monitor_address': '192.0.2.17', + 'monitor_port': 9, + 'name': 'test_member', + 'pool_id': uuid.uuid4(), + 'project_id': uuid.uuid4(), + 'protocol_port': 5, + 'subnet_id': uuid.uuid4(), + 'weight': 7, + 'backup': False, +} + + +class TestPoolMember(base.TestCase): + def test_basic(self): + test_member = member.Member() + self.assertEqual('member', test_member.resource_key) + self.assertEqual('members', test_member.resources_key) + self.assertEqual( + '/lbaas/pools/%(pool_id)s/members', test_member.base_path + ) + self.assertTrue(test_member.allow_create) + self.assertTrue(test_member.allow_fetch) + self.assertTrue(test_member.allow_commit) + self.assertTrue(test_member.allow_delete) + self.assertTrue(test_member.allow_list) + + def test_make_it(self): + test_member = member.Member(**EXAMPLE) + self.assertEqual(EXAMPLE['address'], test_member.address) + self.assertTrue(test_member.is_admin_state_up) + self.assertEqual(EXAMPLE['id'], test_member.id) + self.assertEqual( + EXAMPLE['monitor_address'], test_member.monitor_address + ) + self.assertEqual(EXAMPLE['monitor_port'], test_member.monitor_port) + self.assertEqual(EXAMPLE['name'], test_member.name) + self.assertEqual(EXAMPLE['pool_id'], test_member.pool_id) + self.assertEqual(EXAMPLE['project_id'], test_member.project_id) + self.assertEqual(EXAMPLE['protocol_port'], test_member.protocol_port) + self.assertEqual(EXAMPLE['subnet_id'], test_member.subnet_id) + self.assertEqual(EXAMPLE['weight'], test_member.weight) + self.assertFalse(test_member.backup) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'created_at': 'created_at', + 'updated_at': 'updated_at', + 'name': 'name', + 'project_id': 'project_id', + 'tags': 'tags', + 'any_tags': 'tags-any', + 'not_tags': 'not-tags', + 'not_any_tags': 'not-tags-any', + 'operating_status': 'operating_status', + 'provisioning_status': 'provisioning_status', + 'is_admin_state_up': 'admin_state_up', + 'address': 'address', + 'protocol_port': 'protocol_port', + 'subnet_id': 'subnet_id', + 'weight': 'weight', + 'monitor_address': 'monitor_address', + 'monitor_port': 'monitor_port', + 'backup': 'backup', + }, + test_member._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/load_balancer/test_pool.py b/openstack/tests/unit/load_balancer/test_pool.py new file mode 100644 index 0000000000..53bc70dc60 --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_pool.py @@ -0,0 +1,135 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.load_balancer.v2 import pool +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'name': 'test_pool', + 'description': 'fake_description', + 'admin_state_up': True, + 'provisioning_status': 'ACTIVE', + 'operating_status': 'ONLINE', + 'protocol': 'HTTP', + 'listener_id': uuid.uuid4(), + 'loadbalancer_id': uuid.uuid4(), + 'lb_algorithm': 'ROUND_ROBIN', + 'session_persistence': {"type": "SOURCE_IP"}, + 'project_id': uuid.uuid4(), + 'loadbalancers': [{'id': uuid.uuid4()}], + 'listeners': [{'id': uuid.uuid4()}], + 'created_at': '2017-07-17T12:14:57.233772', + 'updated_at': '2017-07-17T12:16:57.233772', + 'health_monitor': 'healthmonitor', + 'health_monitor_id': uuid.uuid4(), + 'members': [{'id': uuid.uuid4()}], + 'tls_enabled': True, + 'tls_ciphers': 'ECDHE-RSA-AES256-GCM-SHA384:ECDHE-RSA-AES128-GCM-SHA256', + 'tls_versions': ['TLSv1.1', 'TLSv1.2'], + 'alpn_protocols': ['h2', 'http/1.1', 'http/1.0'], + 'ca_tls_container_ref': ( + 'http://198.51.100.10:9311/v1/containers/' + 'a570068c-d295-4780-91d4-3046a325db52' + ), + 'crl_container_ref': ( + 'http://198.51.100.10:9311/v1/containers/' + 'a570068c-d295-4780-91d4-3046a325db53' + ), +} + + +class TestPool(base.TestCase): + def test_basic(self): + test_pool = pool.Pool() + self.assertEqual('pool', test_pool.resource_key) + self.assertEqual('pools', test_pool.resources_key) + self.assertEqual('/lbaas/pools', test_pool.base_path) + self.assertTrue(test_pool.allow_create) + self.assertTrue(test_pool.allow_fetch) + self.assertTrue(test_pool.allow_delete) + self.assertTrue(test_pool.allow_list) + self.assertTrue(test_pool.allow_commit) + + def test_make_it(self): + test_pool = pool.Pool(**EXAMPLE) + self.assertEqual(EXAMPLE['name'], test_pool.name) + self.assertEqual(EXAMPLE['description'], test_pool.description) + self.assertEqual( + EXAMPLE['admin_state_up'], test_pool.is_admin_state_up + ) + self.assertEqual( + EXAMPLE['provisioning_status'], test_pool.provisioning_status + ) + self.assertEqual(EXAMPLE['protocol'], test_pool.protocol) + self.assertEqual( + EXAMPLE['operating_status'], test_pool.operating_status + ) + self.assertEqual(EXAMPLE['listener_id'], test_pool.listener_id) + self.assertEqual(EXAMPLE['loadbalancer_id'], test_pool.loadbalancer_id) + self.assertEqual(EXAMPLE['lb_algorithm'], test_pool.lb_algorithm) + self.assertEqual( + EXAMPLE['session_persistence'], test_pool.session_persistence + ) + self.assertEqual(EXAMPLE['project_id'], test_pool.project_id) + self.assertEqual(EXAMPLE['loadbalancers'], test_pool.loadbalancers) + self.assertEqual(EXAMPLE['listeners'], test_pool.listeners) + self.assertEqual(EXAMPLE['created_at'], test_pool.created_at) + self.assertEqual(EXAMPLE['updated_at'], test_pool.updated_at) + self.assertEqual( + EXAMPLE['health_monitor_id'], test_pool.health_monitor_id + ) + self.assertEqual(EXAMPLE['members'], test_pool.members) + self.assertEqual(EXAMPLE['tls_enabled'], test_pool.tls_enabled) + self.assertEqual(EXAMPLE['tls_ciphers'], test_pool.tls_ciphers) + self.assertEqual(EXAMPLE['tls_versions'], test_pool.tls_versions) + self.assertEqual(EXAMPLE['alpn_protocols'], test_pool.alpn_protocols) + self.assertEqual( + EXAMPLE['ca_tls_container_ref'], test_pool.ca_tls_container_ref + ) + self.assertEqual( + EXAMPLE['crl_container_ref'], test_pool.crl_container_ref + ) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'created_at': 'created_at', + 'updated_at': 'updated_at', + 'description': 'description', + 'name': 'name', + 'project_id': 'project_id', + 'tags': 'tags', + 'any_tags': 'tags-any', + 'not_tags': 'not-tags', + 'not_any_tags': 'not-tags-any', + 'operating_status': 'operating_status', + 'provisioning_status': 'provisioning_status', + 'is_admin_state_up': 'admin_state_up', + 'health_monitor_id': 'health_monitor_id', + 'lb_algorithm': 'lb_algorithm', + 'listener_id': 'listener_id', + 'loadbalancer_id': 'loadbalancer_id', + 'protocol': 'protocol', + 'tls_enabled': 'tls_enabled', + 'tls_ciphers': 'tls_ciphers', + 'tls_versions': 'tls_versions', + 'alpn_protocols': 'alpn_protocols', + 'ca_tls_container_ref': 'ca_tls_container_ref', + 'crl_container_ref': 'crl_container_ref', + }, + test_pool._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/load_balancer/test_provider.py b/openstack/tests/unit/load_balancer/test_provider.py new file mode 100644 index 0000000000..bcbf7ed093 --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_provider.py @@ -0,0 +1,76 @@ +# Copyright 2019 Rackspace, US Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.load_balancer.v2 import provider +from openstack.tests.unit import base + + +EXAMPLE = {'name': 'best', 'description': 'The best provider'} + + +class TestProvider(base.TestCase): + def test_basic(self): + test_provider = provider.Provider() + self.assertEqual('providers', test_provider.resources_key) + self.assertEqual('/lbaas/providers', test_provider.base_path) + self.assertFalse(test_provider.allow_create) + self.assertFalse(test_provider.allow_fetch) + self.assertFalse(test_provider.allow_commit) + self.assertFalse(test_provider.allow_delete) + self.assertTrue(test_provider.allow_list) + + def test_make_it(self): + test_provider = provider.Provider(**EXAMPLE) + self.assertEqual(EXAMPLE['name'], test_provider.name) + self.assertEqual(EXAMPLE['description'], test_provider.description) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'name': 'name', + 'description': 'description', + }, + test_provider._query_mapping._mapping, + ) + + +class TestProviderFlavorCapabilities(base.TestCase): + def test_basic(self): + test_flav_cap = provider.ProviderFlavorCapabilities() + self.assertEqual('flavor_capabilities', test_flav_cap.resources_key) + self.assertEqual( + '/lbaas/providers/%(provider)s/flavor_capabilities', + test_flav_cap.base_path, + ) + self.assertFalse(test_flav_cap.allow_create) + self.assertFalse(test_flav_cap.allow_fetch) + self.assertFalse(test_flav_cap.allow_commit) + self.assertFalse(test_flav_cap.allow_delete) + self.assertTrue(test_flav_cap.allow_list) + + def test_make_it(self): + test_flav_cap = provider.ProviderFlavorCapabilities(**EXAMPLE) + self.assertEqual(EXAMPLE['name'], test_flav_cap.name) + self.assertEqual(EXAMPLE['description'], test_flav_cap.description) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'name': 'name', + 'description': 'description', + }, + test_flav_cap._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/load_balancer/test_quota.py b/openstack/tests/unit/load_balancer/test_quota.py new file mode 100644 index 0000000000..e1ac593356 --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_quota.py @@ -0,0 +1,79 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.load_balancer.v2 import quota +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'load_balancer': 1, + 'listener': 2, + 'pool': 3, + 'health_monitor': 4, + 'member': 5, + 'project_id': 6, +} + + +class TestQuota(base.TestCase): + def test_basic(self): + sot = quota.Quota() + self.assertEqual('quota', sot.resource_key) + self.assertEqual('quotas', sot.resources_key) + self.assertEqual('/lbaas/quotas', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = quota.Quota(**EXAMPLE) + self.assertEqual(EXAMPLE['load_balancer'], sot.load_balancers) + self.assertEqual(EXAMPLE['listener'], sot.listeners) + self.assertEqual(EXAMPLE['pool'], sot.pools) + self.assertEqual(EXAMPLE['health_monitor'], sot.health_monitors) + self.assertEqual(EXAMPLE['member'], sot.members) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + def test_prepare_request(self): + body = {'id': 'ABCDEFGH', 'load_balancer': '12345'} + quota_obj = quota.Quota(**body) + response = quota_obj._prepare_request() + self.assertNotIn('id', response) + + +class TestQuotaDefault(base.TestCase): + def test_basic(self): + sot = quota.QuotaDefault() + self.assertEqual('quota', sot.resource_key) + self.assertEqual('quotas', sot.resources_key) + self.assertEqual('/lbaas/quotas/defaults', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) + self.assertTrue(sot.allow_retrieve) + + def test_make_it(self): + sot = quota.Quota(**EXAMPLE) + self.assertEqual(EXAMPLE['load_balancer'], sot.load_balancers) + self.assertEqual(EXAMPLE['listener'], sot.listeners) + self.assertEqual(EXAMPLE['pool'], sot.pools) + self.assertEqual(EXAMPLE['health_monitor'], sot.health_monitors) + self.assertEqual(EXAMPLE['member'], sot.members) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) diff --git a/openstack/tests/unit/load_balancer/test_version.py b/openstack/tests/unit/load_balancer/test_version.py new file mode 100644 index 0000000000..4cf818032a --- /dev/null +++ b/openstack/tests/unit/load_balancer/test_version.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.load_balancer import version +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'links': '2', + 'status': '3', +} + + +class TestVersion(base.TestCase): + def test_basic(self): + sot = version.Version() + self.assertEqual('version', sot.resource_key) + self.assertEqual('versions', sot.resources_key) + self.assertEqual('/', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = version.Version(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['status'], sot.status) diff --git a/openstack/tests/unit/load_balancer/v2/__init__.py b/openstack/tests/unit/load_balancer/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/load_balancer/v2/test_proxy.py b/openstack/tests/unit/load_balancer/v2/test_proxy.py new file mode 100644 index 0000000000..8cb57f7f35 --- /dev/null +++ b/openstack/tests/unit/load_balancer/v2/test_proxy.py @@ -0,0 +1,499 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock +import uuid + +from openstack.load_balancer.v2 import _proxy +from openstack.load_balancer.v2 import amphora +from openstack.load_balancer.v2 import availability_zone +from openstack.load_balancer.v2 import availability_zone_profile +from openstack.load_balancer.v2 import flavor +from openstack.load_balancer.v2 import flavor_profile +from openstack.load_balancer.v2 import health_monitor +from openstack.load_balancer.v2 import l7_policy +from openstack.load_balancer.v2 import l7_rule +from openstack.load_balancer.v2 import listener +from openstack.load_balancer.v2 import load_balancer as lb +from openstack.load_balancer.v2 import member +from openstack.load_balancer.v2 import pool +from openstack.load_balancer.v2 import provider +from openstack.load_balancer.v2 import quota +from openstack import proxy as proxy_base +from openstack.tests.unit import test_proxy_base + + +class TestLoadBalancerProxy(test_proxy_base.TestProxyBase): + LB_ID = uuid.uuid4() + LISTENER_ID = uuid.uuid4() + POOL_ID = uuid.uuid4() + L7_POLICY_ID = uuid.uuid4() + AMPHORA = 'amphora' + AMPHORA_ID = uuid.uuid4() + + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_load_balancers(self): + self.verify_list(self.proxy.load_balancers, lb.LoadBalancer) + + def test_load_balancer_get(self): + self.verify_get(self.proxy.get_load_balancer, lb.LoadBalancer) + + def test_load_balancer_stats_get(self): + self.verify_get( + self.proxy.get_load_balancer_statistics, + lb.LoadBalancerStats, + method_args=[self.LB_ID], + expected_args=[], + expected_kwargs={'lb_id': self.LB_ID, 'requires_id': False}, + ) + + def test_load_balancer_create(self): + self.verify_create(self.proxy.create_load_balancer, lb.LoadBalancer) + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_load_balancer_delete_non_cascade(self, mock_get_resource): + fake_load_balancer = mock.Mock() + fake_load_balancer.id = "load_balancer_id" + mock_get_resource.return_value = fake_load_balancer + self._verify( + "openstack.proxy.Proxy._delete", + self.proxy.delete_load_balancer, + method_args=["resource_or_id", True, False], + expected_args=[lb.LoadBalancer, fake_load_balancer], + expected_kwargs={"ignore_missing": True}, + ) + self.assertFalse(fake_load_balancer.cascade) + mock_get_resource.assert_called_once_with( + lb.LoadBalancer, "resource_or_id" + ) + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + def test_load_balancer_delete_cascade(self, mock_get_resource): + fake_load_balancer = mock.Mock() + fake_load_balancer.id = "load_balancer_id" + mock_get_resource.return_value = fake_load_balancer + self._verify( + "openstack.proxy.Proxy._delete", + self.proxy.delete_load_balancer, + method_args=["resource_or_id", True, True], + expected_args=[lb.LoadBalancer, fake_load_balancer], + expected_kwargs={"ignore_missing": True}, + ) + self.assertTrue(fake_load_balancer.cascade) + mock_get_resource.assert_called_once_with( + lb.LoadBalancer, "resource_or_id" + ) + + def test_load_balancer_find(self): + self.verify_find(self.proxy.find_load_balancer, lb.LoadBalancer) + + def test_load_balancer_update(self): + self.verify_update(self.proxy.update_load_balancer, lb.LoadBalancer) + + def test_load_balancer_failover(self): + self._verify( + "openstack.load_balancer.v2.load_balancer.LoadBalancer.failover", + self.proxy.failover_load_balancer, + method_args=[self.LB_ID], + expected_args=[self.proxy], + ) + + def test_listeners(self): + self.verify_list(self.proxy.listeners, listener.Listener) + + def test_listener_get(self): + self.verify_get(self.proxy.get_listener, listener.Listener) + + def test_listener_stats_get(self): + self.verify_get( + self.proxy.get_listener_statistics, + listener.ListenerStats, + method_args=[self.LISTENER_ID], + expected_args=[], + expected_kwargs={ + 'listener_id': self.LISTENER_ID, + 'requires_id': False, + }, + ) + + def test_listener_create(self): + self.verify_create(self.proxy.create_listener, listener.Listener) + + def test_listener_delete(self): + self.verify_delete(self.proxy.delete_listener, listener.Listener, True) + + def test_listener_find(self): + self.verify_find(self.proxy.find_listener, listener.Listener) + + def test_listener_update(self): + self.verify_update(self.proxy.update_listener, listener.Listener) + + def test_pools(self): + self.verify_list(self.proxy.pools, pool.Pool) + + def test_pool_get(self): + self.verify_get(self.proxy.get_pool, pool.Pool) + + def test_pool_create(self): + self.verify_create(self.proxy.create_pool, pool.Pool) + + def test_pool_delete(self): + self.verify_delete(self.proxy.delete_pool, pool.Pool, True) + + def test_pool_find(self): + self.verify_find(self.proxy.find_pool, pool.Pool) + + def test_pool_update(self): + self.verify_update(self.proxy.update_pool, pool.Pool) + + def test_members(self): + self.verify_list( + self.proxy.members, + member.Member, + method_kwargs={'pool': self.POOL_ID}, + expected_kwargs={'pool_id': self.POOL_ID}, + ) + + def test_member_get(self): + self.verify_get( + self.proxy.get_member, + member.Member, + method_kwargs={'pool': self.POOL_ID}, + expected_kwargs={'pool_id': self.POOL_ID}, + ) + + def test_member_create(self): + self.verify_create( + self.proxy.create_member, + member.Member, + method_kwargs={'pool': self.POOL_ID}, + expected_kwargs={'pool_id': self.POOL_ID}, + ) + + def test_member_delete(self): + self.verify_delete( + self.proxy.delete_member, + member.Member, + ignore_missing=True, + method_kwargs={'pool': self.POOL_ID}, + expected_kwargs={'pool_id': self.POOL_ID, 'ignore_missing': True}, + ) + + def test_member_find(self): + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_member, + method_args=["MEMBER", self.POOL_ID], + expected_args=[member.Member, "MEMBER"], + expected_kwargs={"pool_id": self.POOL_ID, "ignore_missing": True}, + ) + + def test_member_update(self): + self._verify( + 'openstack.proxy.Proxy._update', + self.proxy.update_member, + method_args=["MEMBER", self.POOL_ID], + expected_args=[member.Member, "MEMBER"], + expected_kwargs={"pool_id": self.POOL_ID}, + ) + + def test_health_monitors(self): + self.verify_list( + self.proxy.health_monitors, health_monitor.HealthMonitor + ) + + def test_health_monitor_get(self): + self.verify_get( + self.proxy.get_health_monitor, health_monitor.HealthMonitor + ) + + def test_health_monitor_create(self): + self.verify_create( + self.proxy.create_health_monitor, health_monitor.HealthMonitor + ) + + def test_health_monitor_delete(self): + self.verify_delete( + self.proxy.delete_health_monitor, + health_monitor.HealthMonitor, + True, + ) + + def test_health_monitor_find(self): + self.verify_find( + self.proxy.find_health_monitor, health_monitor.HealthMonitor + ) + + def test_health_monitor_update(self): + self.verify_update( + self.proxy.update_health_monitor, health_monitor.HealthMonitor + ) + + def test_l7_policies(self): + self.verify_list(self.proxy.l7_policies, l7_policy.L7Policy) + + def test_l7_policy_get(self): + self.verify_get(self.proxy.get_l7_policy, l7_policy.L7Policy) + + def test_l7_policy_create(self): + self.verify_create(self.proxy.create_l7_policy, l7_policy.L7Policy) + + def test_l7_policy_delete(self): + self.verify_delete( + self.proxy.delete_l7_policy, l7_policy.L7Policy, True + ) + + def test_l7_policy_find(self): + self.verify_find(self.proxy.find_l7_policy, l7_policy.L7Policy) + + def test_l7_policy_update(self): + self.verify_update(self.proxy.update_l7_policy, l7_policy.L7Policy) + + def test_l7_rules(self): + self.verify_list( + self.proxy.l7_rules, + l7_rule.L7Rule, + method_kwargs={'l7_policy': self.L7_POLICY_ID}, + expected_kwargs={'l7policy_id': self.L7_POLICY_ID}, + ) + + def test_l7_rule_get(self): + self.verify_get( + self.proxy.get_l7_rule, + l7_rule.L7Rule, + method_kwargs={'l7_policy': self.L7_POLICY_ID}, + expected_kwargs={'l7policy_id': self.L7_POLICY_ID}, + ) + + def test_l7_rule_create(self): + self.verify_create( + self.proxy.create_l7_rule, + l7_rule.L7Rule, + method_kwargs={'l7_policy': self.L7_POLICY_ID}, + expected_kwargs={'l7policy_id': self.L7_POLICY_ID}, + ) + + def test_l7_rule_delete(self): + self.verify_delete( + self.proxy.delete_l7_rule, + l7_rule.L7Rule, + ignore_missing=True, + method_kwargs={'l7_policy': self.L7_POLICY_ID}, + expected_kwargs={'l7policy_id': self.L7_POLICY_ID}, + ) + + def test_l7_rule_find(self): + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_l7_rule, + method_args=["RULE", self.L7_POLICY_ID], + expected_args=[l7_rule.L7Rule, "RULE"], + expected_kwargs={ + "l7policy_id": self.L7_POLICY_ID, + "ignore_missing": True, + }, + ) + + def test_l7_rule_update(self): + self._verify( + 'openstack.proxy.Proxy._update', + self.proxy.update_l7_rule, + method_args=["RULE", self.L7_POLICY_ID], + expected_args=[l7_rule.L7Rule, "RULE"], + expected_kwargs={"l7policy_id": self.L7_POLICY_ID}, + ) + + def test_quotas(self): + self.verify_list(self.proxy.quotas, quota.Quota) + + def test_quota_get(self): + self.verify_get(self.proxy.get_quota, quota.Quota) + + def test_quota_update(self): + self.verify_update(self.proxy.update_quota, quota.Quota) + + def test_quota_default_get(self): + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_quota_default, + expected_args=[quota.QuotaDefault], + expected_kwargs={'requires_id': False}, + ) + + def test_quota_delete(self): + self.verify_delete(self.proxy.delete_quota, quota.Quota, False) + + def test_quota_delete_ignore(self): + self.verify_delete(self.proxy.delete_quota, quota.Quota, True) + + def test_providers(self): + self.verify_list(self.proxy.providers, provider.Provider) + + def test_provider_flavor_capabilities(self): + self.verify_list( + self.proxy.provider_flavor_capabilities, + provider.ProviderFlavorCapabilities, + method_args=[self.AMPHORA], + expected_args=[], + expected_kwargs={'provider': self.AMPHORA}, + ) + + def test_flavor_profiles(self): + self.verify_list( + self.proxy.flavor_profiles, flavor_profile.FlavorProfile + ) + + def test_flavor_profile_get(self): + self.verify_get( + self.proxy.get_flavor_profile, flavor_profile.FlavorProfile + ) + + def test_flavor_profile_create(self): + self.verify_create( + self.proxy.create_flavor_profile, flavor_profile.FlavorProfile + ) + + def test_flavor_profile_delete(self): + self.verify_delete( + self.proxy.delete_flavor_profile, + flavor_profile.FlavorProfile, + True, + ) + + def test_flavor_profile_find(self): + self.verify_find( + self.proxy.find_flavor_profile, flavor_profile.FlavorProfile + ) + + def test_flavor_profile_update(self): + self.verify_update( + self.proxy.update_flavor_profile, flavor_profile.FlavorProfile + ) + + def test_flavors(self): + self.verify_list(self.proxy.flavors, flavor.Flavor) + + def test_flavor_get(self): + self.verify_get(self.proxy.get_flavor, flavor.Flavor) + + def test_flavor_create(self): + self.verify_create(self.proxy.create_flavor, flavor.Flavor) + + def test_flavor_delete(self): + self.verify_delete(self.proxy.delete_flavor, flavor.Flavor, True) + + def test_flavor_find(self): + self.verify_find(self.proxy.find_flavor, flavor.Flavor) + + def test_flavor_update(self): + self.verify_update(self.proxy.update_flavor, flavor.Flavor) + + def test_amphorae(self): + self.verify_list(self.proxy.amphorae, amphora.Amphora) + + def test_amphora_get(self): + self.verify_get(self.proxy.get_amphora, amphora.Amphora) + + def test_amphora_find(self): + self.verify_find(self.proxy.find_amphora, amphora.Amphora) + + def test_amphora_configure(self): + self._verify( + "openstack.load_balancer.v2.amphora.Amphora.configure", + self.proxy.configure_amphora, + method_args=[self.AMPHORA_ID], + expected_args=[self.proxy], + ) + + def test_amphora_failover(self): + self._verify( + "openstack.load_balancer.v2.amphora.Amphora.failover", + self.proxy.failover_amphora, + method_args=[self.AMPHORA_ID], + expected_args=[self.proxy], + ) + + def test_availability_zone_profiles(self): + self.verify_list( + self.proxy.availability_zone_profiles, + availability_zone_profile.AvailabilityZoneProfile, + ) + + def test_availability_zone_profile_get(self): + self.verify_get( + self.proxy.get_availability_zone_profile, + availability_zone_profile.AvailabilityZoneProfile, + ) + + def test_availability_zone_profile_create(self): + self.verify_create( + self.proxy.create_availability_zone_profile, + availability_zone_profile.AvailabilityZoneProfile, + ) + + def test_availability_zone_profile_delete(self): + self.verify_delete( + self.proxy.delete_availability_zone_profile, + availability_zone_profile.AvailabilityZoneProfile, + True, + ) + + def test_availability_zone_profile_find(self): + self.verify_find( + self.proxy.find_availability_zone_profile, + availability_zone_profile.AvailabilityZoneProfile, + ) + + def test_availability_zone_profile_update(self): + self.verify_update( + self.proxy.update_availability_zone_profile, + availability_zone_profile.AvailabilityZoneProfile, + ) + + def test_availability_zones(self): + self.verify_list( + self.proxy.availability_zones, availability_zone.AvailabilityZone + ) + + def test_availability_zone_get(self): + self.verify_get( + self.proxy.get_availability_zone, + availability_zone.AvailabilityZone, + ) + + def test_availability_zone_create(self): + self.verify_create( + self.proxy.create_availability_zone, + availability_zone.AvailabilityZone, + ) + + def test_availability_zone_delete(self): + self.verify_delete( + self.proxy.delete_availability_zone, + availability_zone.AvailabilityZone, + True, + ) + + def test_availability_zone_find(self): + self.verify_find( + self.proxy.find_availability_zone, + availability_zone.AvailabilityZone, + ) + + def test_availability_zone_update(self): + self.verify_update( + self.proxy.update_availability_zone, + availability_zone.AvailabilityZone, + ) diff --git a/openstack/tests/unit/message/test_message_service.py b/openstack/tests/unit/message/test_message_service.py deleted file mode 100644 index 66d074fc10..0000000000 --- a/openstack/tests/unit/message/test_message_service.py +++ /dev/null @@ -1,30 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.message import message_service - - -class TestMessageService(testtools.TestCase): - - def test_service(self): - sot = message_service.MessageService() - self.assertEqual('messaging', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(2, len(sot.valid_versions)) - self.assertEqual('v1', sot.valid_versions[0].module) - self.assertEqual('v1', sot.valid_versions[0].path) - self.assertEqual('v2', sot.valid_versions[1].module) - self.assertEqual('v2', sot.valid_versions[1].path) diff --git a/openstack/tests/unit/message/test_version.py b/openstack/tests/unit/message/test_version.py index bf9662e179..3d3d03efc4 100644 --- a/openstack/tests/unit/message/test_version.py +++ b/openstack/tests/unit/message/test_version.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.message import version +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -22,22 +22,20 @@ } -class TestVersion(testtools.TestCase): - +class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) - self.assertEqual('messaging', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): - sot = version.Version(EXAMPLE) + sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) diff --git a/openstack/tests/unit/message/v1/test_claim.py b/openstack/tests/unit/message/v1/test_claim.py deleted file mode 100644 index cbfeb9f9cd..0000000000 --- a/openstack/tests/unit/message/v1/test_claim.py +++ /dev/null @@ -1,97 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json -import mock -import testtools - -from openstack import exceptions -from openstack.message.v1 import claim - -CLIENT = '3381af92-2b9e-11e3-b191-71861300734c' -QUEUE = 'test_queue' -LIMIT = 2 -FAKE = { - 'ttl': 300, - 'grace': 60 -} - - -class TestClaim(testtools.TestCase): - - def test_basic(self): - sot = claim.Claim() - self.assertEqual('claims', sot.resources_key) - self.assertEqual('/queues/%(queue_name)s/claims', sot.base_path) - self.assertEqual('messaging', sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertFalse(sot.allow_list) - - def test_make_it(self): - sot = claim.Claim.new(client_id=CLIENT, - queue_name=QUEUE, - limit=LIMIT, - **FAKE) - self.assertEqual(CLIENT, sot.client_id) - self.assertEqual(QUEUE, sot.queue_name) - self.assertEqual(LIMIT, sot.limit) - self.assertEqual(FAKE['ttl'], sot.ttl) - self.assertEqual(FAKE['grace'], sot.grace) - - def test_create(self): - sess = mock.Mock() - obj = mock.Mock() - fake_attrs = [{'foo': 'bar'}, {'zoo': 'lah'}] - obj.json = mock.Mock(return_value=fake_attrs) - sess.post = mock.Mock(return_value=obj) - sot = claim.Claim() - - c = claim.Claim.new(client_id=CLIENT, queue_name=QUEUE, **FAKE) - list(sot.claim_messages(sess, c)) - - url = '/queues/%s/claims' % QUEUE - sess.post.assert_called_with( - url, endpoint_filter=sot.service, - headers={'Client-ID': CLIENT}, params=None, - data=json.dumps(FAKE, cls=claim.ClaimEncoder)) - - def test_claim_messages_no_invalid_response(self): - sess = mock.Mock() - resp = mock.Mock() - resp.status_code = 204 - sess.post = mock.Mock( - side_effect=exceptions.InvalidResponse(response=resp)) - sot = claim.Claim() - - messages = list(sot.claim_messages( - sess, claim.Claim.new(client_id=CLIENT, queue_name=QUEUE, **FAKE))) - - self.assertEqual(0, len(messages)) - - def test_claim_messages_invalid_response(self): - sess = mock.Mock() - resp = mock.Mock() - resp.status_code = 400 - sess.post = mock.Mock( - side_effect=exceptions.InvalidResponse(response=resp)) - sot = claim.Claim() - - try: - list(sot.claim_messages( - sess, claim.Claim.new(client_id=CLIENT, - queue_name=QUEUE, - **FAKE))) - except exceptions.InvalidResponse as e: - self.assertEqual(400, e.response.status_code) diff --git a/openstack/tests/unit/message/v1/test_message.py b/openstack/tests/unit/message/v1/test_message.py deleted file mode 100644 index 6fe5d26ad5..0000000000 --- a/openstack/tests/unit/message/v1/test_message.py +++ /dev/null @@ -1,85 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import json - -import mock -import testtools - -from openstack.message.v1 import message - -CLIENT = '3381af92-2b9e-11e3-b191-71861300734c' -QUEUE = 'test_queue' -FAKE = { - 'ttl': 300, - 'body': {'key': 'value'} -} -FAKE_HREF = { - 'href': '/v1/queues/test_queue/messages/1234', - 'ttl': 300, - 'body': {'key': 'value'} -} - - -class TestMessage(testtools.TestCase): - - def test_basic(self): - sot = message.Message() - self.assertEqual('messages', sot.resources_key) - self.assertEqual('/queues/%(queue_name)s/messages', sot.base_path) - self.assertEqual('messaging', sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertFalse(sot.allow_list) - - def test_make_it(self): - sot = message.Message(FAKE) - self.assertEqual(FAKE['ttl'], sot.ttl) - self.assertEqual(FAKE['body'], sot.body) - - def test_create(self): - sess = mock.Mock() - obj = mock.Mock() - obj.json = mock.Mock(return_value={'resources': {'k': 'v'}}) - sess.post = mock.Mock(return_value=obj) - sot = message.Message() - - msg = message.Message.new(client_id=CLIENT, queue_name=QUEUE, **FAKE) - sot.create_messages(sess, [msg]) - - url = '/queues/%s/messages' % QUEUE - sess.post.assert_called_with( - url, endpoint_filter=sot.service, - headers={'Client-ID': CLIENT}, - data=mock.ANY) - - args, kwargs = sess.post.call_args - self.assertIn("data", kwargs) - self.assertDictEqual(json.loads(kwargs["data"])[0], FAKE) - - def test_delete(self): - sess = mock.Mock() - sess.delete = mock.Mock() - sess.delete.return_value = mock.Mock() - sot = message.Message() - - sot.delete_by_id( - sess, message.Message.new(client_id=CLIENT, - queue_name=QUEUE, - **FAKE_HREF)) - - url = '/queues/%s/messages/1234' % QUEUE - sess.delete.assert_called_with( - url, endpoint_filter=sot.service, - headers={'Client-ID': CLIENT, 'Accept': ''}) diff --git a/openstack/tests/unit/message/v1/test_proxy.py b/openstack/tests/unit/message/v1/test_proxy.py deleted file mode 100644 index 8436f024fe..0000000000 --- a/openstack/tests/unit/message/v1/test_proxy.py +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.message.v1 import _proxy -from openstack.message.v1 import claim -from openstack.message.v1 import message -from openstack.message.v1 import queue -from openstack.tests.unit import test_proxy_base - -CLIENT_ID = '3381af92-2b9e-11e3-b191-71861300734c' -QUEUE_NAME = 'test_queue' - - -class TestMessageProxy(test_proxy_base.TestProxyBase): - def setUp(self): - super(TestMessageProxy, self).setUp() - self.proxy = _proxy.Proxy(self.session) - - def test_queue_create_attrs(self): - self.verify_create(self.proxy.create_queue, queue.Queue) - - def test_queue_delete(self): - self.verify_delete(self.proxy.delete_queue, queue.Queue, False) - - def test_queue_delete_ignore(self): - self.verify_delete(self.proxy.delete_queue, queue.Queue, True) - - def test_messages_create(self): - self._verify2("openstack.message.v1.message.Message.create_messages", - self.proxy.create_messages, - expected_result="result", - method_args=[[]], - expected_args=[self.session, []]) - - def test_messages_claim(self): - self._verify2("openstack.message.v1.claim.Claim.claim_messages", - self.proxy.claim_messages, - expected_result="result", - method_args=[claim.Claim], - expected_args=[self.session, claim.Claim]) - - def test_message_delete(self): - self._verify2("openstack.message.v1.message.Message.delete_by_id", - self.proxy.delete_message, - method_args=[message.Message], - expected_args=[self.session, message.Message]) diff --git a/openstack/tests/unit/message/v1/test_queue.py b/openstack/tests/unit/message/v1/test_queue.py deleted file mode 100644 index 81b07df7fb..0000000000 --- a/openstack/tests/unit/message/v1/test_queue.py +++ /dev/null @@ -1,55 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from openstack.message.v1 import queue - - -FAKE_NAME = 'test_queue' -FAKE = { - 'name': FAKE_NAME, -} - - -class TestQueue(testtools.TestCase): - - def test_basic(self): - sot = queue.Queue() - self.assertEqual('queues', sot.resources_key) - self.assertEqual('/queues', sot.base_path) - self.assertEqual('messaging', sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) - self.assertTrue(sot.allow_delete) - self.assertFalse(sot.allow_list) - - def test_make_it(self): - sot = queue.Queue(FAKE) - self.assertEqual(FAKE['name'], sot.name) - - def test_create(self): - sess = mock.Mock() - sess.put = mock.Mock() - sess.put.return_value = mock.Mock() - sot = queue.Queue(FAKE) - - sot.create(sess) - - url = 'queues/%s' % FAKE_NAME - headers = {'Accept': ''} - sess.put.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) - self.assertEqual(FAKE_NAME, sot.id) - self.assertEqual(FAKE_NAME, sot.name) diff --git a/openstack/tests/unit/message/v2/test_claim.py b/openstack/tests/unit/message/v2/test_claim.py index 945829148f..2b108dcb34 100644 --- a/openstack/tests/unit/message/v2/test_claim.py +++ b/openstack/tests/unit/message/v2/test_claim.py @@ -11,12 +11,11 @@ # under the License. import copy -import mock -import testtools +from unittest import mock import uuid from openstack.message.v2 import claim - +from openstack.tests.unit import base FAKE1 = { "age": 1632, @@ -25,7 +24,7 @@ "limit": 10, "messages": [{"id": "1"}, {"id": "2"}], "ttl": 3600, - "queue_name": "queue1" + "queue_name": "queue1", } @@ -38,20 +37,19 @@ "ttl": 3600, "queue_name": "queue1", "client_id": "OLD_CLIENT_ID", - "project_id": "OLD_PROJECT_ID" + "project_id": "OLD_PROJECT_ID", } -class TestClaim(testtools.TestCase): +class TestClaim(base.TestCase): def test_basic(self): sot = claim.Claim() self.assertEqual("claims", sot.resources_key) self.assertEqual("/queues/%(queue_name)s/claims", sot.base_path) - self.assertEqual("messaging", sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) + self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_commit) def test_make_it(self): sot = claim.Claim.new(**FAKE2) @@ -78,11 +76,12 @@ def test_create_204_resp(self, mock_uuid): sot = claim.Claim(**FAKE1) res = sot.create(sess) - url = "/queues/%(queue)s/claims" % {"queue": FAKE.pop("queue_name")} - headers = {"Client-ID": "NEW_CLIENT_ID", - "X-PROJECT-ID": "NEW_PROJECT_ID"} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - headers=headers, json=FAKE) + url = "/queues/{queue}/claims".format(queue=FAKE.pop("queue_name")) + headers = { + "Client-ID": "NEW_CLIENT_ID", + "X-PROJECT-ID": "NEW_PROJECT_ID", + } + sess.post.assert_called_once_with(url, headers=headers, json=FAKE) sess.get_project_id.assert_called_once_with() self.assertEqual(sot, res) @@ -100,11 +99,12 @@ def test_create_non_204_resp(self, mock_uuid): sot._translate_response = mock.Mock() res = sot.create(sess) - url = "/queues/%(queue)s/claims" % {"queue": FAKE.pop("queue_name")} - headers = {"Client-ID": "NEW_CLIENT_ID", - "X-PROJECT-ID": "NEW_PROJECT_ID"} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - headers=headers, json=FAKE) + url = "/queues/{queue}/claims".format(queue=FAKE.pop("queue_name")) + headers = { + "Client-ID": "NEW_CLIENT_ID", + "X-PROJECT-ID": "NEW_PROJECT_ID", + } + sess.post.assert_called_once_with(url, headers=headers, json=FAKE) sess.get_project_id.assert_called_once_with() self.assertEqual(sot, res) sot._translate_response.assert_called_once_with(resp) @@ -120,11 +120,12 @@ def test_create_client_id_project_id_exist(self): sot._translate_response = mock.Mock() res = sot.create(sess) - url = "/queues/%(queue)s/claims" % {"queue": FAKE.pop("queue_name")} - headers = {"Client-ID": FAKE.pop("client_id"), - "X-PROJECT-ID": FAKE.pop("project_id")} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - headers=headers, json=FAKE) + url = "/queues/{queue}/claims".format(queue=FAKE.pop("queue_name")) + headers = { + "Client-ID": FAKE.pop("client_id"), + "X-PROJECT-ID": FAKE.pop("project_id"), + } + sess.post.assert_called_once_with(url, headers=headers, json=FAKE) self.assertEqual(sot, res) @mock.patch.object(uuid, "uuid4") @@ -137,14 +138,17 @@ def test_get(self, mock_uuid): sot = claim.Claim(**FAKE1) sot._translate_response = mock.Mock() - res = sot.get(sess) - - url = "queues/%(queue)s/claims/%(claim)s" % { - "queue": FAKE1["queue_name"], "claim": FAKE1["id"]} - headers = {"Client-ID": "NEW_CLIENT_ID", - "X-PROJECT-ID": "NEW_PROJECT_ID"} - sess.get.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + res = sot.fetch(sess) + + url = "queues/{queue}/claims/{claim}".format( + queue=FAKE1["queue_name"], + claim=FAKE1["id"], + ) + headers = { + "Client-ID": "NEW_CLIENT_ID", + "X-PROJECT-ID": "NEW_PROJECT_ID", + } + sess.get.assert_called_with(url, headers=headers, skip_cache=False) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) @@ -156,14 +160,17 @@ def test_get_client_id_project_id_exist(self): sot = claim.Claim(**FAKE2) sot._translate_response = mock.Mock() - res = sot.get(sess) - - url = "queues/%(queue)s/claims/%(claim)s" % { - "queue": FAKE2["queue_name"], "claim": FAKE2["id"]} - headers = {"Client-ID": "OLD_CLIENT_ID", - "X-PROJECT-ID": "OLD_PROJECT_ID"} - sess.get.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + res = sot.fetch(sess) + + url = "queues/{queue}/claims/{claim}".format( + queue=FAKE2["queue_name"], + claim=FAKE2["id"], + ) + headers = { + "Client-ID": "OLD_CLIENT_ID", + "X-PROJECT-ID": "OLD_PROJECT_ID", + } + sess.get.assert_called_with(url, headers=headers, skip_cache=False) sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) @@ -177,14 +184,17 @@ def test_update(self, mock_uuid): FAKE = copy.deepcopy(FAKE1) sot = claim.Claim(**FAKE1) - res = sot.update(sess) - - url = "queues/%(queue)s/claims/%(claim)s" % { - "queue": FAKE.pop("queue_name"), "claim": FAKE["id"]} - headers = {"Client-ID": "NEW_CLIENT_ID", - "X-PROJECT-ID": "NEW_PROJECT_ID"} - sess.patch.assert_called_with(url, endpoint_filter=sot.service, - headers=headers, json=FAKE) + res = sot.commit(sess) + + url = "queues/{queue}/claims/{claim}".format( + queue=FAKE.pop("queue_name"), + claim=FAKE["id"], + ) + headers = { + "Client-ID": "NEW_CLIENT_ID", + "X-PROJECT-ID": "NEW_PROJECT_ID", + } + sess.patch.assert_called_with(url, headers=headers, json=FAKE) sess.get_project_id.assert_called_once_with() self.assertEqual(sot, res) @@ -195,14 +205,17 @@ def test_update_client_id_project_id_exist(self): FAKE = copy.deepcopy(FAKE2) sot = claim.Claim(**FAKE2) - res = sot.update(sess) - - url = "queues/%(queue)s/claims/%(claim)s" % { - "queue": FAKE.pop("queue_name"), "claim": FAKE["id"]} - headers = {"Client-ID": FAKE.pop("client_id"), - "X-PROJECT-ID": FAKE.pop("project_id")} - sess.patch.assert_called_with(url, endpoint_filter=sot.service, - headers=headers, json=FAKE) + res = sot.commit(sess) + + url = "queues/{queue}/claims/{claim}".format( + queue=FAKE.pop("queue_name"), + claim=FAKE["id"], + ) + headers = { + "Client-ID": FAKE.pop("client_id"), + "X-PROJECT-ID": FAKE.pop("project_id"), + } + sess.patch.assert_called_with(url, headers=headers, json=FAKE) self.assertEqual(sot, res) @mock.patch.object(uuid, "uuid4") @@ -217,12 +230,15 @@ def test_delete(self, mock_uuid): sot._translate_response = mock.Mock() sot.delete(sess) - url = "queues/%(queue)s/claims/%(claim)s" % { - "queue": FAKE1["queue_name"], "claim": FAKE1["id"]} - headers = {"Client-ID": "NEW_CLIENT_ID", - "X-PROJECT-ID": "NEW_PROJECT_ID"} - sess.delete.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + url = "queues/{queue}/claims/{claim}".format( + queue=FAKE1["queue_name"], + claim=FAKE1["id"], + ) + headers = { + "Client-ID": "NEW_CLIENT_ID", + "X-PROJECT-ID": "NEW_PROJECT_ID", + } + sess.delete.assert_called_with(url, headers=headers) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp, has_body=False) @@ -235,10 +251,13 @@ def test_delete_client_id_project_id_exist(self): sot._translate_response = mock.Mock() sot.delete(sess) - url = "queues/%(queue)s/claims/%(claim)s" % { - "queue": FAKE2["queue_name"], "claim": FAKE2["id"]} - headers = {"Client-ID": "OLD_CLIENT_ID", - "X-PROJECT-ID": "OLD_PROJECT_ID"} - sess.delete.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + url = "queues/{queue}/claims/{claim}".format( + queue=FAKE2["queue_name"], + claim=FAKE2["id"], + ) + headers = { + "Client-ID": "OLD_CLIENT_ID", + "X-PROJECT-ID": "OLD_PROJECT_ID", + } + sess.delete.assert_called_with(url, headers=headers) sot._translate_response.assert_called_once_with(resp, has_body=False) diff --git a/openstack/tests/unit/message/v2/test_message.py b/openstack/tests/unit/message/v2/test_message.py index d7541f5bc2..d7aa7de492 100644 --- a/openstack/tests/unit/message/v2/test_message.py +++ b/openstack/tests/unit/message/v2/test_message.py @@ -10,24 +10,23 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +from unittest import mock import uuid from openstack.message.v2 import message - +from openstack.tests.unit import base FAKE1 = { 'age': 456, 'body': { 'current_bytes': '0', 'event': 'BackupProgress', - 'total_bytes': '99614720' + 'total_bytes': '99614720', }, 'id': '578ee000508f153f256f717d', 'href': '/v2/queues/queue1/messages/578ee000508f153f256f717d', 'ttl': 3600, - 'queue_name': 'queue1' + 'queue_name': 'queue1', } @@ -36,25 +35,24 @@ 'body': { 'current_bytes': '0', 'event': 'BackupProgress', - 'total_bytes': '99614720' + 'total_bytes': '99614720', }, 'id': '578ee000508f153f256f717d', 'href': '/v2/queues/queue1/messages/578ee000508f153f256f717d', 'ttl': 3600, 'queue_name': 'queue1', 'client_id': 'OLD_CLIENT_ID', - 'project_id': 'OLD_PROJECT_ID' + 'project_id': 'OLD_PROJECT_ID', } -class TestMessage(testtools.TestCase): +class TestMessage(base.TestCase): def test_basic(self): sot = message.Message() self.assertEqual('messages', sot.resources_key) self.assertEqual('/queues/%(queue_name)s/messages', sot.base_path) - self.assertEqual('messaging', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) + self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -82,25 +80,21 @@ def test_post(self, mock_uuid): sess.get_project_id.return_value = 'NEW_PROJECT_ID' mock_uuid.return_value = 'NEW_CLIENT_ID' messages = [ - { - 'body': {'key': 'value1'}, - 'ttl': 3600 - }, - { - 'body': {'key': 'value2'}, - 'ttl': 1800 - } + {'body': {'key': 'value1'}, 'ttl': 3600}, + {'body': {'key': 'value2'}, 'ttl': 1800}, ] sot = message.Message(**FAKE1) res = sot.post(sess, messages) - url = '/queues/%(queue)s/messages' % {'queue': FAKE1['queue_name']} - headers = {'Client-ID': 'NEW_CLIENT_ID', - 'X-PROJECT-ID': 'NEW_PROJECT_ID'} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - headers=headers, - json={'messages': messages}) + url = '/queues/{queue}/messages'.format(queue=FAKE1['queue_name']) + headers = { + 'Client-ID': 'NEW_CLIENT_ID', + 'X-PROJECT-ID': 'NEW_PROJECT_ID', + } + sess.post.assert_called_once_with( + url, headers=headers, json={'messages': messages} + ) sess.get_project_id.assert_called_once_with() resp.json.assert_called_once_with() self.assertEqual(resources, res) @@ -115,25 +109,21 @@ def test_post_client_id_project_id_exist(self): ] resp.json.return_value = {'resources': resources} messages = [ - { - 'body': {'key': 'value1'}, - 'ttl': 3600 - }, - { - 'body': {'key': 'value2'}, - 'ttl': 1800 - } + {'body': {'key': 'value1'}, 'ttl': 3600}, + {'body': {'key': 'value2'}, 'ttl': 1800}, ] sot = message.Message(**FAKE2) res = sot.post(sess, messages) - url = '/queues/%(queue)s/messages' % {'queue': FAKE2['queue_name']} - headers = {'Client-ID': 'OLD_CLIENT_ID', - 'X-PROJECT-ID': 'OLD_PROJECT_ID'} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - headers=headers, - json={'messages': messages}) + url = '/queues/{queue}/messages'.format(queue=FAKE2['queue_name']) + headers = { + 'Client-ID': 'OLD_CLIENT_ID', + 'X-PROJECT-ID': 'OLD_PROJECT_ID', + } + sess.post.assert_called_once_with( + url, headers=headers, json={'messages': messages} + ) resp.json.assert_called_once_with() self.assertEqual(resources, res) @@ -147,14 +137,17 @@ def test_get(self, mock_uuid): sot = message.Message(**FAKE1) sot._translate_response = mock.Mock() - res = sot.get(sess) - - url = 'queues/%(queue)s/messages/%(message)s' % { - 'queue': FAKE1['queue_name'], 'message': FAKE1['id']} - headers = {'Client-ID': 'NEW_CLIENT_ID', - 'X-PROJECT-ID': 'NEW_PROJECT_ID'} - sess.get.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + res = sot.fetch(sess) + + url = 'queues/{queue}/messages/{message}'.format( + queue=FAKE1['queue_name'], + message=FAKE1['id'], + ) + headers = { + 'Client-ID': 'NEW_CLIENT_ID', + 'X-PROJECT-ID': 'NEW_PROJECT_ID', + } + sess.get.assert_called_with(url, headers=headers, skip_cache=False) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) @@ -166,17 +159,20 @@ def test_get_client_id_project_id_exist(self): sot = message.Message(**FAKE1) sot._translate_response = mock.Mock() - res = sot.get(sess) + res = sot.fetch(sess) - url = 'queues/%(queue)s/messages/%(message)s' % { - 'queue': FAKE2['queue_name'], 'message': FAKE2['id']} + url = 'queues/{queue}/messages/{message}'.format( + queue=FAKE2['queue_name'], + message=FAKE2['id'], + ) sot = message.Message(**FAKE2) sot._translate_response = mock.Mock() - res = sot.get(sess) - headers = {'Client-ID': 'OLD_CLIENT_ID', - 'X-PROJECT-ID': 'OLD_PROJECT_ID'} - sess.get.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + res = sot.fetch(sess) + headers = { + 'Client-ID': 'OLD_CLIENT_ID', + 'X-PROJECT-ID': 'OLD_PROJECT_ID', + } + sess.get.assert_called_with(url, headers=headers, skip_cache=False) sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) @@ -193,12 +189,15 @@ def test_delete_unclaimed(self, mock_uuid): sot._translate_response = mock.Mock() sot.delete(sess) - url = 'queues/%(queue)s/messages/%(message)s' % { - 'queue': FAKE1['queue_name'], 'message': FAKE1['id']} - headers = {'Client-ID': 'NEW_CLIENT_ID', - 'X-PROJECT-ID': 'NEW_PROJECT_ID'} - sess.delete.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + url = 'queues/{queue}/messages/{message}'.format( + queue=FAKE1['queue_name'], + message=FAKE1['id'], + ) + headers = { + 'Client-ID': 'NEW_CLIENT_ID', + 'X-PROJECT-ID': 'NEW_PROJECT_ID', + } + sess.delete.assert_called_with(url, headers=headers) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp, has_body=False) @@ -215,13 +214,16 @@ def test_delete_claimed(self, mock_uuid): sot._translate_response = mock.Mock() sot.delete(sess) - url = 'queues/%(queue)s/messages/%(message)s?claim_id=%(cid)s' % { - 'queue': FAKE1['queue_name'], 'message': FAKE1['id'], - 'cid': 'CLAIM_ID'} - headers = {'Client-ID': 'NEW_CLIENT_ID', - 'X-PROJECT-ID': 'NEW_PROJECT_ID'} - sess.delete.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + url = 'queues/{queue}/messages/{message}?claim_id={cid}'.format( + queue=FAKE1['queue_name'], + message=FAKE1['id'], + cid='CLAIM_ID', + ) + headers = { + 'Client-ID': 'NEW_CLIENT_ID', + 'X-PROJECT-ID': 'NEW_PROJECT_ID', + } + sess.delete.assert_called_with(url, headers=headers) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp, has_body=False) @@ -235,10 +237,13 @@ def test_delete_client_id_project_id_exist(self): sot._translate_response = mock.Mock() sot.delete(sess) - url = 'queues/%(queue)s/messages/%(message)s' % { - 'queue': FAKE2['queue_name'], 'message': FAKE2['id']} - headers = {'Client-ID': 'OLD_CLIENT_ID', - 'X-PROJECT-ID': 'OLD_PROJECT_ID'} - sess.delete.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + url = 'queues/{queue}/messages/{message}'.format( + queue=FAKE2['queue_name'], + message=FAKE2['id'], + ) + headers = { + 'Client-ID': 'OLD_CLIENT_ID', + 'X-PROJECT-ID': 'OLD_PROJECT_ID', + } + sess.delete.assert_called_with(url, headers=headers) sot._translate_response.assert_called_once_with(resp, has_body=False) diff --git a/openstack/tests/unit/message/v2/test_proxy.py b/openstack/tests/unit/message/v2/test_proxy.py index 75a602c56a..9569b018c1 100644 --- a/openstack/tests/unit/message/v2/test_proxy.py +++ b/openstack/tests/unit/message/v2/test_proxy.py @@ -10,32 +10,37 @@ # License for the specific language governing permissions and limitations # under the License. -import mock +from unittest import mock from openstack.message.v2 import _proxy from openstack.message.v2 import claim from openstack.message.v2 import message from openstack.message.v2 import queue from openstack.message.v2 import subscription -from openstack import proxy2 as proxy_base -from openstack.tests.unit import test_proxy_base2 +from openstack import proxy as proxy_base +from openstack.tests.unit import test_proxy_base QUEUE_NAME = 'test_queue' -class TestMessageProxy(test_proxy_base2.TestProxyBase): +class TestMessageProxy(test_proxy_base.TestProxyBase): def setUp(self): - super(TestMessageProxy, self).setUp() + super().setUp() self.proxy = _proxy.Proxy(self.session) + +class TestMessageQueue(TestMessageProxy): def test_queue_create(self): self.verify_create(self.proxy.create_queue, queue.Queue) def test_queue_get(self): self.verify_get(self.proxy.get_queue, queue.Queue) + self.verify_get_overrided( + self.proxy, queue.Queue, 'openstack.message.v2.queue.Queue' + ) def test_queues(self): - self.verify_list(self.proxy.queues, queue.Queue, paginated=True) + self.verify_list(self.proxy.queues, queue.Queue) def test_queue_delete(self): self.verify_delete(self.proxy.delete_queue, queue.Queue, False) @@ -43,157 +48,223 @@ def test_queue_delete(self): def test_queue_delete_ignore(self): self.verify_delete(self.proxy.delete_queue, queue.Queue, True) - @mock.patch.object(proxy_base.BaseProxy, '_get_resource') + +class TestMessageMessage(TestMessageProxy): + @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_message_post(self, mock_get_resource): message_obj = message.Message(queue_name="test_queue") mock_get_resource.return_value = message_obj - self._verify("openstack.message.v2.message.Message.post", - self.proxy.post_message, - method_args=["test_queue", ["msg1", "msg2"]], - expected_args=[["msg1", "msg2"]]) - mock_get_resource.assert_called_once_with(message.Message, None, - queue_name="test_queue") - - @mock.patch.object(proxy_base.BaseProxy, '_get_resource') + self._verify( + "openstack.message.v2.message.Message.post", + self.proxy.post_message, + method_args=["test_queue", ["msg1", "msg2"]], + expected_args=[self.proxy, ["msg1", "msg2"]], + ) + mock_get_resource.assert_called_once_with( + message.Message, None, queue_name="test_queue" + ) + + @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_message_get(self, mock_get_resource): mock_get_resource.return_value = "resource_or_id" - self._verify2("openstack.proxy2.BaseProxy._get", - self.proxy.get_message, - method_args=["test_queue", "resource_or_id"], - expected_args=[message.Message, "resource_or_id"]) - mock_get_resource.assert_called_once_with(message.Message, - "resource_or_id", - queue_name="test_queue") + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_message, + method_args=["test_queue", "resource_or_id"], + expected_args=[message.Message, "resource_or_id"], + ) + mock_get_resource.assert_called_once_with( + message.Message, "resource_or_id", queue_name="test_queue" + ) + self.verify_get_overrided( + self.proxy, message.Message, 'openstack.message.v2.message.Message' + ) def test_messages(self): - self.verify_list(self.proxy.messages, message.Message, - paginated=True, method_args=["test_queue"], - expected_kwargs={"queue_name": "test_queue"}) + self.verify_list( + self.proxy.messages, + message.Message, + method_kwargs={"queue_name": "test_queue"}, + expected_kwargs={"queue_name": "test_queue"}, + ) - @mock.patch.object(proxy_base.BaseProxy, '_get_resource') + @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_message_delete(self, mock_get_resource): fake_message = mock.Mock() fake_message.id = "message_id" mock_get_resource.return_value = fake_message - self._verify2("openstack.proxy2.BaseProxy._delete", - self.proxy.delete_message, - method_args=["test_queue", "resource_or_id", None, - False], - expected_args=[message.Message, - fake_message], - expected_kwargs={"ignore_missing": False}) + self._verify( + "openstack.proxy.Proxy._delete", + self.proxy.delete_message, + method_args=["test_queue", "resource_or_id", None, False], + expected_args=[message.Message, fake_message], + expected_kwargs={"ignore_missing": False}, + ) self.assertIsNone(fake_message.claim_id) - mock_get_resource.assert_called_once_with(message.Message, - "resource_or_id", - queue_name="test_queue") + mock_get_resource.assert_called_once_with( + message.Message, "resource_or_id", queue_name="test_queue" + ) - @mock.patch.object(proxy_base.BaseProxy, '_get_resource') + @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_message_delete_claimed(self, mock_get_resource): fake_message = mock.Mock() fake_message.id = "message_id" mock_get_resource.return_value = fake_message - self._verify2("openstack.proxy2.BaseProxy._delete", - self.proxy.delete_message, - method_args=["test_queue", "resource_or_id", "claim_id", - False], - expected_args=[message.Message, - fake_message], - expected_kwargs={"ignore_missing": False}) + self._verify( + "openstack.proxy.Proxy._delete", + self.proxy.delete_message, + method_args=["test_queue", "resource_or_id", "claim_id", False], + expected_args=[message.Message, fake_message], + expected_kwargs={"ignore_missing": False}, + ) self.assertEqual("claim_id", fake_message.claim_id) - mock_get_resource.assert_called_once_with(message.Message, - "resource_or_id", - queue_name="test_queue") + mock_get_resource.assert_called_once_with( + message.Message, "resource_or_id", queue_name="test_queue" + ) - @mock.patch.object(proxy_base.BaseProxy, '_get_resource') + @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_message_delete_ignore(self, mock_get_resource): fake_message = mock.Mock() fake_message.id = "message_id" mock_get_resource.return_value = fake_message - self._verify2("openstack.proxy2.BaseProxy._delete", - self.proxy.delete_message, - method_args=["test_queue", "resource_or_id", None, - True], - expected_args=[message.Message, - fake_message], - expected_kwargs={"ignore_missing": True}) + self._verify( + "openstack.proxy.Proxy._delete", + self.proxy.delete_message, + method_args=["test_queue", "resource_or_id", None, True], + expected_args=[message.Message, fake_message], + expected_kwargs={"ignore_missing": True}, + ) self.assertIsNone(fake_message.claim_id) - mock_get_resource.assert_called_once_with(message.Message, - "resource_or_id", - queue_name="test_queue") + mock_get_resource.assert_called_once_with( + message.Message, "resource_or_id", queue_name="test_queue" + ) + +class TestMessageSubscription(TestMessageProxy): def test_subscription_create(self): - self._verify("openstack.message.v2.subscription.Subscription.create", - self.proxy.create_subscription, - method_args=["test_queue"]) + self._verify( + "openstack.message.v2.subscription.Subscription.create", + self.proxy.create_subscription, + method_args=["test_queue"], + expected_args=[self.proxy], + expected_kwargs={"base_path": None}, + ) - @mock.patch.object(proxy_base.BaseProxy, '_get_resource') + @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_subscription_get(self, mock_get_resource): mock_get_resource.return_value = "resource_or_id" - self._verify2("openstack.proxy2.BaseProxy._get", - self.proxy.get_subscription, - method_args=["test_queue", "resource_or_id"], - expected_args=[subscription.Subscription, - "resource_or_id"]) + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_subscription, + method_args=["test_queue", "resource_or_id"], + expected_args=[subscription.Subscription, "resource_or_id"], + ) mock_get_resource.assert_called_once_with( - subscription.Subscription, "resource_or_id", - queue_name="test_queue") + subscription.Subscription, + "resource_or_id", + queue_name="test_queue", + ) + self.verify_get_overrided( + self.proxy, + subscription.Subscription, + 'openstack.message.v2.subscription.Subscription', + ) def test_subscriptions(self): - self.verify_list(self.proxy.subscriptions, subscription.Subscription, - paginated=True, method_args=["test_queue"], - expected_kwargs={"queue_name": "test_queue"}) + self.verify_list( + self.proxy.subscriptions, + subscription.Subscription, + method_kwargs={"queue_name": "test_queue"}, + expected_kwargs={"queue_name": "test_queue"}, + ) - @mock.patch.object(proxy_base.BaseProxy, '_get_resource') + @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_subscription_delete(self, mock_get_resource): - mock_get_resource.return_value = "resource_or_id" - self.verify_delete(self.proxy.delete_subscription, - subscription.Subscription, False, - ["test_queue", "resource_or_id"]) + mock_get_resource.return_value = "test_subscription" + self.verify_delete( + self.proxy.delete_subscription, + subscription.Subscription, + ignore_missing=False, + method_args=["test_queue", "resource_or_id"], + expected_args=["test_subscription"], + ) mock_get_resource.assert_called_once_with( - subscription.Subscription, "resource_or_id", - queue_name="test_queue") + subscription.Subscription, + "resource_or_id", + queue_name="test_queue", + ) - @mock.patch.object(proxy_base.BaseProxy, '_get_resource') + @mock.patch.object(proxy_base.Proxy, '_get_resource') def test_subscription_delete_ignore(self, mock_get_resource): - mock_get_resource.return_value = "resource_or_id" - self.verify_delete(self.proxy.delete_subscription, - subscription.Subscription, True, - ["test_queue", "resource_or_id"]) + mock_get_resource.return_value = "test_subscription" + self.verify_delete( + self.proxy.delete_subscription, + subscription.Subscription, + ignore_missing=True, + method_args=["test_queue", "resource_or_id"], + expected_args=["test_subscription"], + ) mock_get_resource.assert_called_once_with( - subscription.Subscription, "resource_or_id", - queue_name="test_queue") + subscription.Subscription, + "resource_or_id", + queue_name="test_queue", + ) + +class TestMessageClaim(TestMessageProxy): def test_claim_create(self): - self._verify("openstack.message.v2.claim.Claim.create", - self.proxy.create_claim, - method_args=["test_queue"]) + self._verify( + "openstack.message.v2.claim.Claim.create", + self.proxy.create_claim, + method_args=["test_queue"], + expected_args=[self.proxy], + expected_kwargs={"base_path": None}, + ) def test_claim_get(self): - self._verify2("openstack.proxy2.BaseProxy._get", - self.proxy.get_claim, - method_args=["test_queue", "resource_or_id"], - expected_args=[claim.Claim, - "resource_or_id"], - expected_kwargs={"queue_name": "test_queue"}) + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_claim, + method_args=["test_queue", "resource_or_id"], + expected_args=[claim.Claim, "resource_or_id"], + expected_kwargs={"queue_name": "test_queue"}, + ) + self.verify_get_overrided( + self.proxy, claim.Claim, 'openstack.message.v2.claim.Claim' + ) def test_claim_update(self): - self._verify2("openstack.proxy2.BaseProxy._update", - self.proxy.update_claim, - method_args=["test_queue", "resource_or_id"], - method_kwargs={"k1": "v1"}, - expected_args=[claim.Claim, - "resource_or_id"], - expected_kwargs={"queue_name": "test_queue", - "k1": "v1"}) + self._verify( + "openstack.proxy.Proxy._update", + self.proxy.update_claim, + method_args=["test_queue", "resource_or_id"], + method_kwargs={"k1": "v1"}, + expected_args=[claim.Claim, "resource_or_id"], + expected_kwargs={"queue_name": "test_queue", "k1": "v1"}, + ) def test_claim_delete(self): - self.verify_delete(self.proxy.delete_claim, - claim.Claim, False, - ["test_queue", "resource_or_id"], - expected_kwargs={"queue_name": "test_queue"}) + self.verify_delete( + self.proxy.delete_claim, + claim.Claim, + ignore_missing=False, + method_args=["test_queue", "test_claim"], + expected_args=["test_claim"], + expected_kwargs={ + "queue_name": "test_queue", + "ignore_missing": False, + }, + ) def test_claim_delete_ignore(self): - self.verify_delete(self.proxy.delete_claim, - claim.Claim, True, - ["test_queue", "resource_or_id"], - expected_kwargs={"queue_name": "test_queue"}) + self.verify_delete( + self.proxy.delete_claim, + claim.Claim, + ignore_missing=True, + method_args=["test_queue", "test_claim"], + expected_args=["test_claim"], + expected_kwargs={ + "queue_name": "test_queue", + "ignore_missing": True, + }, + ) diff --git a/openstack/tests/unit/message/v2/test_queue.py b/openstack/tests/unit/message/v2/test_queue.py index 566510c67c..03932d99fb 100644 --- a/openstack/tests/unit/message/v2/test_queue.py +++ b/openstack/tests/unit/message/v2/test_queue.py @@ -10,18 +10,17 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +from unittest import mock import uuid from openstack.message.v2 import queue - +from openstack.tests.unit import base FAKE1 = { 'name': 'test_queue', 'description': 'Queue used for test.', '_default_message_ttl': 3600, - '_max_messages_post_size': 262144 + '_max_messages_post_size': 262144, } @@ -31,18 +30,17 @@ '_default_message_ttl': 3600, '_max_messages_post_size': 262144, 'client_id': 'OLD_CLIENT_ID', - 'project_id': 'OLD_PROJECT_ID' + 'project_id': 'OLD_PROJECT_ID', } -class TestQueue(testtools.TestCase): +class TestQueue(base.TestCase): def test_basic(self): sot = queue.Queue() self.assertEqual('queues', sot.resources_key) self.assertEqual('/queues', sot.base_path) - self.assertEqual('messaging', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) + self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -51,10 +49,12 @@ def test_make_it(self): self.assertEqual(FAKE1['description'], sot.description) self.assertEqual(FAKE1['name'], sot.name) self.assertEqual(FAKE1['name'], sot.id) - self.assertEqual(FAKE1['_default_message_ttl'], - sot.default_message_ttl) - self.assertEqual(FAKE1['_max_messages_post_size'], - sot.max_messages_post_size) + self.assertEqual( + FAKE1['_default_message_ttl'], sot.default_message_ttl + ) + self.assertEqual( + FAKE1['_max_messages_post_size'], sot.max_messages_post_size + ) self.assertEqual(FAKE2['client_id'], sot.client_id) self.assertEqual(FAKE2['project_id'], sot.project_id) @@ -70,11 +70,12 @@ def test_create(self, mock_uuid): sot._translate_response = mock.Mock() res = sot.create(sess) - url = 'queues/%s' % FAKE1['name'] - headers = {'Client-ID': 'NEW_CLIENT_ID', - 'X-PROJECT-ID': 'NEW_PROJECT_ID'} - sess.put.assert_called_with(url, endpoint_filter=sot.service, - headers=headers, json=FAKE1) + url = 'queues/{}'.format(FAKE1['name']) + headers = { + 'Client-ID': 'NEW_CLIENT_ID', + 'X-PROJECT-ID': 'NEW_PROJECT_ID', + } + sess.put.assert_called_with(url, headers=headers, json=FAKE1) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp, has_body=False) self.assertEqual(sot, res) @@ -88,11 +89,12 @@ def test_create_client_id_project_id_exist(self): sot._translate_response = mock.Mock() res = sot.create(sess) - url = 'queues/%s' % FAKE2['name'] - headers = {'Client-ID': 'OLD_CLIENT_ID', - 'X-PROJECT-ID': 'OLD_PROJECT_ID'} - sess.put.assert_called_with(url, endpoint_filter=sot.service, - headers=headers, json=FAKE1) + url = 'queues/{}'.format(FAKE2['name']) + headers = { + 'Client-ID': 'OLD_CLIENT_ID', + 'X-PROJECT-ID': 'OLD_PROJECT_ID', + } + sess.put.assert_called_with(url, headers=headers, json=FAKE1) sot._translate_response.assert_called_once_with(resp, has_body=False) self.assertEqual(sot, res) @@ -106,13 +108,14 @@ def test_get(self, mock_uuid): sot = queue.Queue(**FAKE1) sot._translate_response = mock.Mock() - res = sot.get(sess) - - url = 'queues/%s' % FAKE1['name'] - headers = {'Client-ID': 'NEW_CLIENT_ID', - 'X-PROJECT-ID': 'NEW_PROJECT_ID'} - sess.get.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + res = sot.fetch(sess) + + url = 'queues/{}'.format(FAKE1['name']) + headers = { + 'Client-ID': 'NEW_CLIENT_ID', + 'X-PROJECT-ID': 'NEW_PROJECT_ID', + } + sess.get.assert_called_with(url, headers=headers, skip_cache=False) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) @@ -124,13 +127,14 @@ def test_get_client_id_project_id_exist(self): sot = queue.Queue(**FAKE2) sot._translate_response = mock.Mock() - res = sot.get(sess) - - url = 'queues/%s' % FAKE2['name'] - headers = {'Client-ID': 'OLD_CLIENT_ID', - 'X-PROJECT-ID': 'OLD_PROJECT_ID'} - sess.get.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + res = sot.fetch(sess) + + url = 'queues/{}'.format(FAKE2['name']) + headers = { + 'Client-ID': 'OLD_CLIENT_ID', + 'X-PROJECT-ID': 'OLD_PROJECT_ID', + } + sess.get.assert_called_with(url, headers=headers, skip_cache=False) sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) @@ -146,11 +150,12 @@ def test_delete(self, mock_uuid): sot._translate_response = mock.Mock() sot.delete(sess) - url = 'queues/%s' % FAKE1['name'] - headers = {'Client-ID': 'NEW_CLIENT_ID', - 'X-PROJECT-ID': 'NEW_PROJECT_ID'} - sess.delete.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + url = 'queues/{}'.format(FAKE1['name']) + headers = { + 'Client-ID': 'NEW_CLIENT_ID', + 'X-PROJECT-ID': 'NEW_PROJECT_ID', + } + sess.delete.assert_called_with(url, headers=headers) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp, has_body=False) @@ -163,9 +168,10 @@ def test_delete_client_id_project_id_exist(self): sot._translate_response = mock.Mock() sot.delete(sess) - url = 'queues/%s' % FAKE2['name'] - headers = {'Client-ID': 'OLD_CLIENT_ID', - 'X-PROJECT-ID': 'OLD_PROJECT_ID'} - sess.delete.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + url = 'queues/{}'.format(FAKE2['name']) + headers = { + 'Client-ID': 'OLD_CLIENT_ID', + 'X-PROJECT-ID': 'OLD_PROJECT_ID', + } + sess.delete.assert_called_with(url, headers=headers) sot._translate_response.assert_called_once_with(resp, has_body=False) diff --git a/openstack/tests/unit/message/v2/test_subscription.py b/openstack/tests/unit/message/v2/test_subscription.py index 47c165fc90..57686f8c43 100644 --- a/openstack/tests/unit/message/v2/test_subscription.py +++ b/openstack/tests/unit/message/v2/test_subscription.py @@ -11,11 +11,11 @@ # under the License. import copy -import mock -import testtools +from unittest import mock import uuid from openstack.message.v2 import subscription +from openstack.tests.unit import base FAKE1 = { @@ -25,10 +25,8 @@ "subscription_id": "576b54963990b48c644bb7e7", "source": "test", "ttl": 3600, - "options": { - "name": "test" - }, - "queue_name": "queue1" + "options": {"name": "test"}, + "queue_name": "queue1", } @@ -39,23 +37,20 @@ "subscription_id": "576b54963990b48c644bb7e7", "source": "test", "ttl": 3600, - "options": { - "name": "test" - }, + "options": {"name": "test"}, "queue_name": "queue1", "client_id": "OLD_CLIENT_ID", - "project_id": "OLD_PROJECT_ID" + "project_id": "OLD_PROJECT_ID", } -class TestSubscription(testtools.TestCase): +class TestSubscription(base.TestCase): def test_basic(self): sot = subscription.Subscription() self.assertEqual("subscriptions", sot.resources_key) self.assertEqual("/queues/%(queue_name)s/subscriptions", sot.base_path) - self.assertEqual("messaging", sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) + self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -85,12 +80,14 @@ def test_create(self, mock_uuid): sot._translate_response = mock.Mock() res = sot.create(sess) - url = "/queues/%(queue)s/subscriptions" % { - "queue": FAKE.pop("queue_name")} - headers = {"Client-ID": "NEW_CLIENT_ID", - "X-PROJECT-ID": "NEW_PROJECT_ID"} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - headers=headers, json=FAKE) + url = "/queues/{queue}/subscriptions".format( + queue=FAKE.pop("queue_name") + ) + headers = { + "Client-ID": "NEW_CLIENT_ID", + "X-PROJECT-ID": "NEW_PROJECT_ID", + } + sess.post.assert_called_once_with(url, headers=headers, json=FAKE) sess.get_project_id.assert_called_once_with() self.assertEqual(sot, res) @@ -104,12 +101,14 @@ def test_create_client_id_project_id_exist(self): sot._translate_response = mock.Mock() res = sot.create(sess) - url = "/queues/%(queue)s/subscriptions" % { - "queue": FAKE.pop("queue_name")} - headers = {"Client-ID": FAKE.pop("client_id"), - "X-PROJECT-ID": FAKE.pop("project_id")} - sess.post.assert_called_once_with(url, endpoint_filter=sot.service, - headers=headers, json=FAKE) + url = "/queues/{queue}/subscriptions".format( + queue=FAKE.pop("queue_name") + ) + headers = { + "Client-ID": FAKE.pop("client_id"), + "X-PROJECT-ID": FAKE.pop("project_id"), + } + sess.post.assert_called_once_with(url, headers=headers, json=FAKE) self.assertEqual(sot, res) @mock.patch.object(uuid, "uuid4") @@ -122,14 +121,17 @@ def test_get(self, mock_uuid): sot = subscription.Subscription(**FAKE1) sot._translate_response = mock.Mock() - res = sot.get(sess) - - url = "queues/%(queue)s/subscriptions/%(subscription)s" % { - "queue": FAKE1["queue_name"], "subscription": FAKE1["id"]} - headers = {"Client-ID": "NEW_CLIENT_ID", - "X-PROJECT-ID": "NEW_PROJECT_ID"} - sess.get.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + res = sot.fetch(sess) + + url = "queues/{queue}/subscriptions/{subscription}".format( + queue=FAKE1["queue_name"], + subscription=FAKE1["id"], + ) + headers = { + "Client-ID": "NEW_CLIENT_ID", + "X-PROJECT-ID": "NEW_PROJECT_ID", + } + sess.get.assert_called_with(url, headers=headers, skip_cache=False) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) @@ -141,14 +143,17 @@ def test_get_client_id_project_id_exist(self): sot = subscription.Subscription(**FAKE2) sot._translate_response = mock.Mock() - res = sot.get(sess) - - url = "queues/%(queue)s/subscriptions/%(subscription)s" % { - "queue": FAKE2["queue_name"], "subscription": FAKE2["id"]} - headers = {"Client-ID": "OLD_CLIENT_ID", - "X-PROJECT-ID": "OLD_PROJECT_ID"} - sess.get.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + res = sot.fetch(sess) + + url = "queues/{queue}/subscriptions/{subscription}".format( + queue=FAKE2["queue_name"], + subscription=FAKE2["id"], + ) + headers = { + "Client-ID": "OLD_CLIENT_ID", + "X-PROJECT-ID": "OLD_PROJECT_ID", + } + sess.get.assert_called_with(url, headers=headers, skip_cache=False) sot._translate_response.assert_called_once_with(resp) self.assertEqual(sot, res) @@ -164,12 +169,15 @@ def test_delete(self, mock_uuid): sot._translate_response = mock.Mock() sot.delete(sess) - url = "queues/%(queue)s/subscriptions/%(subscription)s" % { - "queue": FAKE1["queue_name"], "subscription": FAKE1["id"]} - headers = {"Client-ID": "NEW_CLIENT_ID", - "X-PROJECT-ID": "NEW_PROJECT_ID"} - sess.delete.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + url = "queues/{queue}/subscriptions/{subscription}".format( + queue=FAKE1["queue_name"], + subscription=FAKE1["id"], + ) + headers = { + "Client-ID": "NEW_CLIENT_ID", + "X-PROJECT-ID": "NEW_PROJECT_ID", + } + sess.delete.assert_called_with(url, headers=headers) sess.get_project_id.assert_called_once_with() sot._translate_response.assert_called_once_with(resp, has_body=False) @@ -182,10 +190,13 @@ def test_delete_client_id_project_id_exist(self): sot._translate_response = mock.Mock() sot.delete(sess) - url = "queues/%(queue)s/subscriptions/%(subscription)s" % { - "queue": FAKE2["queue_name"], "subscription": FAKE2["id"]} - headers = {"Client-ID": "OLD_CLIENT_ID", - "X-PROJECT-ID": "OLD_PROJECT_ID"} - sess.delete.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + url = "queues/{queue}/subscriptions/{subscription}".format( + queue=FAKE2["queue_name"], + subscription=FAKE2["id"], + ) + headers = { + "Client-ID": "OLD_CLIENT_ID", + "X-PROJECT-ID": "OLD_PROJECT_ID", + } + sess.delete.assert_called_with(url, headers=headers) sot._translate_response.assert_called_once_with(resp, has_body=False) diff --git a/openstack/tests/unit/metric/test_metric_service.py b/openstack/tests/unit/metric/test_metric_service.py deleted file mode 100644 index acb846b3ac..0000000000 --- a/openstack/tests/unit/metric/test_metric_service.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.metric import metric_service - - -class TestMetricService(testtools.TestCase): - - def test_service(self): - sot = metric_service.MetricService() - self.assertEqual('metric', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(1, len(sot.valid_versions)) - self.assertEqual('v1', sot.valid_versions[0].module) - self.assertEqual('v1', sot.valid_versions[0].path) diff --git a/openstack/tests/unit/metric/v1/test_archive_policy.py b/openstack/tests/unit/metric/v1/test_archive_policy.py deleted file mode 100644 index ef0affca72..0000000000 --- a/openstack/tests/unit/metric/v1/test_archive_policy.py +++ /dev/null @@ -1,60 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from openstack.metric.v1 import archive_policy - -EXAMPLE = { - 'definition': - [ - {u'points': 12, u'timespan': u'1:00:00', - u'granularity': u'0:05:00'}, - {u'points': 24, u'timespan': u'1 day, 0:00:00', - u'granularity': u'1:00:00'}, - {u'points': 30, u'timespan': u'30 days, 0:00:00', - u'granularity': u'1 day, 0:00:00'}, - ], - u'back_window': 0, - u'name': u'low', - u'aggregation_methods': [u'sum', u'max'] -} - - -class TestArchivePolicy(testtools.TestCase): - - def setUp(self): - super(TestArchivePolicy, self).setUp() - self.resp = mock.Mock() - self.resp.body = '' - self.sess = mock.Mock() - self.sess.put = mock.Mock(return_value=self.resp) - - def test_basic(self): - m = archive_policy.ArchivePolicy() - self.assertIsNone(m.resource_key) - self.assertIsNone(m.resources_key) - self.assertEqual('/archive_policy', m.base_path) - self.assertEqual('metric', m.service.service_type) - self.assertTrue(m.allow_create) - self.assertTrue(m.allow_retrieve) - self.assertFalse(m.allow_update) - self.assertTrue(m.allow_delete) - self.assertTrue(m.allow_list) - - def test_make_it(self): - m = archive_policy.ArchivePolicy(EXAMPLE) - self.assertEqual(EXAMPLE['name'], m.name) - self.assertEqual(EXAMPLE['definition'], m.definition) - self.assertEqual(EXAMPLE['back_window'], m.back_window) - self.assertEqual(EXAMPLE['aggregation_methods'], m.aggregation_methods) diff --git a/openstack/tests/unit/metric/v1/test_capabilities.py b/openstack/tests/unit/metric/v1/test_capabilities.py deleted file mode 100644 index 0f2ee38cec..0000000000 --- a/openstack/tests/unit/metric/v1/test_capabilities.py +++ /dev/null @@ -1,36 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.metric.v1 import capabilities - -BODY = { - 'aggregation_methods': ['mean', 'max', 'avg'], -} - - -class TestCapabilites(testtools.TestCase): - def test_basic(self): - sot = capabilities.Capabilities() - self.assertEqual('/capabilities', sot.base_path) - self.assertEqual('metric', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_retrieve) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertFalse(sot.allow_list) - - def test_make_it(self): - sot = capabilities.Capabilities(BODY) - self.assertEqual(BODY['aggregation_methods'], - sot.aggregation_methods) diff --git a/openstack/tests/unit/metric/v1/test_metric.py b/openstack/tests/unit/metric/v1/test_metric.py deleted file mode 100644 index 24a9aac4c5..0000000000 --- a/openstack/tests/unit/metric/v1/test_metric.py +++ /dev/null @@ -1,78 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from openstack.metric.v1 import metric - -EXAMPLE = { - 'id': '31bbd62e-b144-11e4-983c-bf9dbe7e25e6', - 'archive_policy_name': 'low', - 'created_by_user_id': '41bbd62e-b144-11e4-983c-bf9dbe7e25e6', - 'created_by_project_id': '51bbd62e-b144-11e4-983c-bf9dbe7e25e6', - 'resource_id': None, - 'name': None, -} - -EXAMPLE_AP = { - 'id': '31bbd62e-b144-11e4-983c-bf9dbe7e25e6', - 'archive_policy': { - 'name': "foobar", - }, - 'created_by_user_id': '41bbd62e-b144-11e4-983c-bf9dbe7e25e6', - 'created_by_project_id': '51bbd62e-b144-11e4-983c-bf9dbe7e25e6', - 'resource_id': "61bbd62e-b144-11e4-983c-bf9dbe7e25e6", - 'name': "foobaz", -} - - -class TestMetric(testtools.TestCase): - - def setUp(self): - super(TestMetric, self).setUp() - self.resp = mock.Mock() - self.resp.body = '' - self.sess = mock.Mock() - self.sess.put = mock.Mock(return_value=self.resp) - - def test_basic(self): - m = metric.Metric() - self.assertIsNone(m.resource_key) - self.assertIsNone(m.resources_key) - self.assertEqual('/metric', m.base_path) - self.assertEqual('metric', m.service.service_type) - self.assertTrue(m.allow_create) - self.assertTrue(m.allow_retrieve) - self.assertFalse(m.allow_update) - self.assertTrue(m.allow_delete) - self.assertTrue(m.allow_list) - - def test_make_it(self): - m = metric.Metric(EXAMPLE) - self.assertEqual(EXAMPLE['id'], m.id) - self.assertEqual(EXAMPLE['archive_policy_name'], m.archive_policy_name) - self.assertEqual(EXAMPLE['created_by_user_id'], m.created_by_user_id) - self.assertEqual(EXAMPLE['created_by_project_id'], - m.created_by_project_id) - self.assertEqual(EXAMPLE['resource_id'], m.resource_id) - self.assertEqual(EXAMPLE['name'], m.name) - - m = metric.Metric(EXAMPLE_AP) - self.assertEqual(EXAMPLE_AP['id'], m.id) - self.assertEqual(EXAMPLE_AP['archive_policy'], m.archive_policy) - self.assertEqual(EXAMPLE_AP['created_by_user_id'], - m.created_by_user_id) - self.assertEqual(EXAMPLE_AP['created_by_project_id'], - m.created_by_project_id) - self.assertEqual(EXAMPLE_AP['resource_id'], m.resource_id) - self.assertEqual(EXAMPLE_AP['name'], m.name) diff --git a/openstack/tests/unit/metric/v1/test_proxy.py b/openstack/tests/unit/metric/v1/test_proxy.py deleted file mode 100644 index ceb900149c..0000000000 --- a/openstack/tests/unit/metric/v1/test_proxy.py +++ /dev/null @@ -1,25 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.metric.v1 import _proxy -from openstack.metric.v1 import capabilities -from openstack.tests.unit import test_proxy_base - - -class TestMetricProxy(test_proxy_base.TestProxyBase): - def setUp(self): - super(TestMetricProxy, self).setUp() - self.proxy = _proxy.Proxy(self.session) - - def test_capabilities(self): - self.verify_list(self.proxy.capabilities, capabilities.Capabilities, - paginated=False) diff --git a/openstack/tests/unit/metric/v1/test_resource.py b/openstack/tests/unit/metric/v1/test_resource.py deleted file mode 100644 index 0a8617c0d2..0000000000 --- a/openstack/tests/unit/metric/v1/test_resource.py +++ /dev/null @@ -1,56 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.metric.v1 import resource - - -EXAMPLE_GENERIC = { - "created_by_user_id": "5521eab6-a3bc-4841-b253-d62871b65e76", - "started_at": "2015-03-09T12:14:57.233772", - "user_id": None, - "created_by_project_id": "41649c3e-5f7a-41d1-81fb-2efa76c09e6c", - "metrics": {}, - "ended_at": None, - "project_id": None, - "type": "generic", - "id": "a8d5e83b-0320-45ce-8282-7c8ad8fb8bf6", -} - - -class TestResource(testtools.TestCase): - def test_generic(self): - m = resource.Generic() - self.assertIsNone(m.resource_key) - self.assertIsNone(m.resources_key) - self.assertEqual('/resource/generic', m.base_path) - self.assertEqual('metric', m.service.service_type) - self.assertTrue(m.allow_create) - self.assertTrue(m.allow_retrieve) - self.assertTrue(m.allow_update) - self.assertTrue(m.allow_delete) - self.assertTrue(m.allow_list) - - def test_make_generic(self): - r = resource.Generic(EXAMPLE_GENERIC) - self.assertEqual(EXAMPLE_GENERIC['created_by_user_id'], - r.created_by_user_id) - self.assertEqual(EXAMPLE_GENERIC['created_by_project_id'], - r.created_by_project_id) - self.assertEqual(EXAMPLE_GENERIC['user_id'], r.user_id) - self.assertEqual(EXAMPLE_GENERIC['project_id'], r.project_id) - self.assertEqual(EXAMPLE_GENERIC['type'], r.type) - self.assertEqual(EXAMPLE_GENERIC['id'], r.id) - self.assertEqual(EXAMPLE_GENERIC['metrics'], r.metrics) - self.assertEqual(EXAMPLE_GENERIC['started_at'], r.started_at) - self.assertEqual(EXAMPLE_GENERIC['ended_at'], r.ended_at) diff --git a/openstack/tests/unit/network/test_network_service.py b/openstack/tests/unit/network/test_network_service.py deleted file mode 100644 index 22980fe36a..0000000000 --- a/openstack/tests/unit/network/test_network_service.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.network import network_service - - -class TestNetworkService(testtools.TestCase): - - def test_service(self): - sot = network_service.NetworkService() - self.assertEqual('network', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(1, len(sot.valid_versions)) - self.assertEqual('v2', sot.valid_versions[0].module) - self.assertEqual('v2.0', sot.valid_versions[0].path) diff --git a/openstack/tests/unit/network/test_version.py b/openstack/tests/unit/network/test_version.py index cfd1951a7e..64724160cf 100644 --- a/openstack/tests/unit/network/test_version.py +++ b/openstack/tests/unit/network/test_version.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network import version +from openstack.tests.unit import base + IDENTIFIER = 'v2.0' EXAMPLE = { @@ -22,22 +22,20 @@ } -class TestVersion(testtools.TestCase): - +class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): - sot = version.Version(EXAMPLE) + sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) diff --git a/openstack/tests/unit/network/v2/test_address_group.py b/openstack/tests/unit/network/v2/test_address_group.py new file mode 100644 index 0000000000..b9f9d68034 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_address_group.py @@ -0,0 +1,58 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import address_group +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'name': '1', + 'description': '2', + 'project_id': '3', + 'addresses': ['10.0.0.1/32'], +} + + +class TestAddressGroup(base.TestCase): + def test_basic(self): + sot = address_group.AddressGroup() + self.assertEqual('address_group', sot.resource_key) + self.assertEqual('address_groups', sot.resources_key) + self.assertEqual('/address-groups', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + "name": "name", + "description": "description", + "project_id": "project_id", + "sort_key": "sort_key", + "sort_dir": "sort_dir", + "limit": "limit", + "marker": "marker", + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = address_group.AddressGroup(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertCountEqual(EXAMPLE['addresses'], sot.addresses) diff --git a/openstack/tests/unit/network/v2/test_address_scope.py b/openstack/tests/unit/network/v2/test_address_scope.py index ae9cbe85b4..a00e91456d 100644 --- a/openstack/tests/unit/network/v2/test_address_scope.py +++ b/openstack/tests/unit/network/v2/test_address_scope.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import address_scope +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -20,21 +20,19 @@ 'ip_version': 4, 'name': '1', 'shared': True, - 'tenant_id': '2', + 'project_id': '2', } -class TestAddressScope(testtools.TestCase): - +class TestAddressScope(base.TestCase): def test_basic(self): sot = address_scope.AddressScope() self.assertEqual('address_scope', sot.resource_key) self.assertEqual('address_scopes', sot.resources_key) self.assertEqual('/address-scopes', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -44,4 +42,4 @@ def test_make_it(self): self.assertEqual(EXAMPLE['ip_version'], sot.ip_version) self.assertEqual(EXAMPLE['name'], sot.name) self.assertTrue(sot.is_shared) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) diff --git a/openstack/tests/unit/network/v2/test_agent.py b/openstack/tests/unit/network/v2/test_agent.py index 841af05d7f..16ad82bbd1 100644 --- a/openstack/tests/unit/network/v2/test_agent.py +++ b/openstack/tests/unit/network/v2/test_agent.py @@ -10,10 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +from unittest import mock from openstack.network.v2 import agent +from openstack.network.v2 import bgp_speaker +from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -28,22 +29,22 @@ 'heartbeat_timestamp': '2016-08-09T12:14:57.233772', 'host': 'test-host', 'id': IDENTIFIER, + 'resources_synced': False, 'started_at': '2016-07-09T12:14:57.233772', - 'topic': 'test-topic' + 'topic': 'test-topic', + 'ha_state': 'active', } -class TestAgent(testtools.TestCase): - +class TestAgent(base.TestCase): def test_basic(self): sot = agent.Agent() self.assertEqual('agent', sot.resource_key) self.assertEqual('agents', sot.resources_key) self.assertEqual('/agents', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -52,8 +53,7 @@ def test_make_it(self): self.assertTrue(sot.is_admin_state_up) self.assertEqual(EXAMPLE['agent_type'], sot.agent_type) self.assertTrue(sot.is_alive) - self.assertEqual(EXAMPLE['availability_zone'], - sot.availability_zone) + self.assertEqual(EXAMPLE['availability_zone'], sot.availability_zone) self.assertEqual(EXAMPLE['binary'], sot.binary) self.assertEqual(EXAMPLE['configurations'], sot.configuration) self.assertEqual(EXAMPLE['created_at'], sot.created_at) @@ -61,8 +61,10 @@ def test_make_it(self): self.assertEqual(EXAMPLE['heartbeat_timestamp'], sot.last_heartbeat_at) self.assertEqual(EXAMPLE['host'], sot.host) self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['resources_synced'], sot.resources_synced) self.assertEqual(EXAMPLE['started_at'], sot.started_at) self.assertEqual(EXAMPLE['topic'], sot.topic) + self.assertEqual(EXAMPLE['ha_state'], sot.ha_state) def test_add_agent_to_network(self): # Add agent to network @@ -76,31 +78,96 @@ def test_add_agent_to_network(self): self.assertEqual(response.body, net.add_agent_to_network(sess, **body)) url = 'agents/IDENTIFIER/dhcp-networks' - sess.post.assert_called_with(url, endpoint_filter=net.service, - json=body) + sess.post.assert_called_with(url, json=body) def test_remove_agent_from_network(self): # Remove agent from agent net = agent.Agent(**EXAMPLE) sess = mock.Mock() - self.assertIsNone(net.remove_agent_from_network(sess)) - body = {} + network_id = {} + self.assertIsNone(net.remove_agent_from_network(sess, network_id)) + body = {'network_id': {}} + + sess.delete.assert_called_with( + 'agents/IDENTIFIER/dhcp-networks/', json=body + ) - sess.delete.assert_called_with('agents/IDENTIFIER/dhcp-networks/', - endpoint_filter=net.service, json=body) + def test_add_router_to_agent(self): + # Add router to agent + sot = agent.Agent(**EXAMPLE) + response = mock.Mock() + response.body = {'router_id': '1'} + response.json = mock.Mock(return_value=response.body) + sess = mock.Mock() + sess.post = mock.Mock(return_value=response) + router_id = '1' + self.assertEqual( + response.body, sot.add_router_to_agent(sess, router_id) + ) + body = {'router_id': router_id} + url = 'agents/IDENTIFIER/l3-routers' + sess.post.assert_called_with(url, json=body) + def test_remove_router_from_agent(self): + # Remove router from agent + sot = agent.Agent(**EXAMPLE) + sess = mock.Mock() + router_id = {} + self.assertIsNone(sot.remove_router_from_agent(sess, router_id)) + body = {'router_id': {}} -class TestDHCPAgentHostingNetwork(testtools.TestCase): + sess.delete.assert_called_with( + 'agents/IDENTIFIER/l3-routers/', json=body + ) + @mock.patch.object(bgp_speaker.BgpSpeaker, 'list') + def test_get_bgp_speakers_hosted_by_dragent(self, mock_list): + sot = agent.Agent(**EXAMPLE) + sess = mock.Mock() + response = mock.Mock() + speaker_body = { + 'bgp_speakers': [ + {'name': 'bgp_speaker_1', 'ip_version': 4, 'id': IDENTIFIER} + ] + } + response.body = speaker_body + mock_list.return_value = [ + bgp_speaker.BgpSpeaker(**speaker_body['bgp_speakers'][0]) + ] + response.json = mock.Mock(return_value=response.body) + response.status_code = 200 + sess.get = mock.Mock(return_value=response) + resp = sot.get_bgp_speakers_hosted_by_dragent(sess) + + self.assertEqual( + resp, [bgp_speaker.BgpSpeaker(**response.body['bgp_speakers'][0])] + ) + sess.get.assert_called_with('agents/IDENTIFIER/bgp-drinstances') + + +class TestNetworkHostingDHCPAgent(base.TestCase): def test_basic(self): - net = agent.DHCPAgentHostingNetwork() - self.assertEqual('network', net.resource_key) - self.assertEqual('networks', net.resources_key) - self.assertEqual('/agents/%(agent_id)s/dhcp-networks', net.base_path) - self.assertEqual('dhcp-network', net.resource_name) - self.assertEqual('network', net.service.service_type) + net = agent.NetworkHostingDHCPAgent() + self.assertEqual('agent', net.resource_key) + self.assertEqual('agents', net.resources_key) + self.assertEqual('/networks/%(network_id)s/dhcp-agents', net.base_path) + self.assertEqual('dhcp-agent', net.resource_name) self.assertFalse(net.allow_create) - self.assertTrue(net.allow_get) - self.assertFalse(net.allow_update) + self.assertTrue(net.allow_fetch) + self.assertFalse(net.allow_commit) self.assertFalse(net.allow_delete) self.assertTrue(net.allow_list) + + +class TestRouterL3Agent(base.TestCase): + def test_basic(self): + sot = agent.RouterL3Agent() + self.assertEqual('agent', sot.resource_key) + self.assertEqual('agents', sot.resources_key) + self.assertEqual('/routers/%(router_id)s/l3-agents', sot.base_path) + self.assertEqual('l3-agent', sot.resource_name) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_retrieve) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/network/v2/test_auto_allocated_topology.py b/openstack/tests/unit/network/v2/test_auto_allocated_topology.py index 941660a1f8..acb3bccc82 100644 --- a/openstack/tests/unit/network/v2/test_auto_allocated_topology.py +++ b/openstack/tests/unit/network/v2/test_auto_allocated_topology.py @@ -10,28 +10,27 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import auto_allocated_topology +from openstack.tests.unit import base + EXAMPLE = { - 'tenant_id': '1', + 'project_id': '1', 'dry_run': False, } -class TestAutoAllocatedTopology(testtools.TestCase): - +class TestAutoAllocatedTopology(base.TestCase): def test_basic(self): topo = auto_allocated_topology.AutoAllocatedTopology self.assertEqual('auto_allocated_topology', topo.resource_key) self.assertEqual('/auto-allocated-topology', topo.base_path) self.assertFalse(topo.allow_create) - self.assertTrue(topo.allow_get) - self.assertFalse(topo.allow_update) + self.assertTrue(topo.allow_fetch) + self.assertFalse(topo.allow_commit) self.assertTrue(topo.allow_delete) self.assertFalse(topo.allow_list) def test_make_it(self): topo = auto_allocated_topology.AutoAllocatedTopology(**EXAMPLE) - self.assertEqual(EXAMPLE['tenant_id'], topo.project_id) + self.assertEqual(EXAMPLE['project_id'], topo.project_id) diff --git a/openstack/tests/unit/network/v2/test_availability_zone.py b/openstack/tests/unit/network/v2/test_availability_zone.py index 7ab1031e86..2961d6c146 100644 --- a/openstack/tests/unit/network/v2/test_availability_zone.py +++ b/openstack/tests/unit/network/v2/test_availability_zone.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import availability_zone +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,17 +23,15 @@ } -class TestAvailabilityZone(testtools.TestCase): - +class TestAvailabilityZone(base.TestCase): def test_basic(self): sot = availability_zone.AvailabilityZone() self.assertEqual('availability_zone', sot.resource_key) self.assertEqual('availability_zones', sot.resources_key) self.assertEqual('/availability_zones', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/network/v2/test_bgp_peer.py b/openstack/tests/unit/network/v2/test_bgp_peer.py new file mode 100644 index 0000000000..7842725adc --- /dev/null +++ b/openstack/tests/unit/network/v2/test_bgp_peer.py @@ -0,0 +1,55 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import bgp_peer +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'auth_type': 'none', + 'remote_as': '1001', + 'name': 'bgp-peer', + 'peer_ip': '10.0.0.3', + 'id': IDENTIFIER, + 'project_id': '42', +} + + +class TestBgpPeer(base.TestCase): + def test_basic(self): + sot = bgp_peer.BgpPeer() + self.assertEqual('bgp_peer', sot.resource_key) + self.assertEqual('bgp_peers', sot.resources_key) + self.assertEqual('/bgp-peers', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = bgp_peer.BgpPeer(**EXAMPLE) + self.assertEqual(EXAMPLE['auth_type'], sot.auth_type) + self.assertEqual(EXAMPLE['remote_as'], sot.remote_as) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['peer_ip'], sot.peer_ip) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/network/v2/test_bgp_speaker.py b/openstack/tests/unit/network/v2/test_bgp_speaker.py new file mode 100644 index 0000000000..4c07c189c7 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_bgp_speaker.py @@ -0,0 +1,195 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.network.v2 import agent +from openstack.network.v2 import bgp_speaker +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'name': 'bgp-speaker', + 'peers': [], + 'ip_version': 4, + 'advertise_floating_ip_host_routes': 'true', + 'advertise_tenant_networks': 'true', + 'local_as': 1000, + 'networks': [], + 'project_id': '42', +} + + +class TestBgpSpeaker(base.TestCase): + def test_basic(self): + sot = bgp_speaker.BgpSpeaker() + self.assertEqual('bgp_speaker', sot.resource_key) + self.assertEqual('bgp_speakers', sot.resources_key) + self.assertEqual('/bgp-speakers', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = bgp_speaker.BgpSpeaker(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['ip_version'], sot.ip_version) + self.assertEqual( + EXAMPLE['advertise_floating_ip_host_routes'], + sot.advertise_floating_ip_host_routes, + ) + self.assertEqual(EXAMPLE['local_as'], sot.local_as) + self.assertEqual(EXAMPLE['networks'], sot.networks) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) + + def test_add_bgp_peer(self): + sot = bgp_speaker.BgpSpeaker(**EXAMPLE) + response = mock.Mock() + response.body = {'bgp_peer_id': '101'} + response.json = mock.Mock(return_value=response.body) + response.status_code = 200 + sess = mock.Mock() + sess.put = mock.Mock(return_value=response) + ret = sot.add_bgp_peer(sess, '101') + self.assertIsInstance(ret, dict) + self.assertEqual(ret, {'bgp_peer_id': '101'}) + + body = {'bgp_peer_id': '101'} + url = 'bgp-speakers/IDENTIFIER/add_bgp_peer' + sess.put.assert_called_with(url, json=body) + + def test_remove_bgp_peer(self): + sot = bgp_speaker.BgpSpeaker(**EXAMPLE) + response = mock.Mock() + response.body = {'bgp_peer_id': '102'} + response.json = mock.Mock(return_value=response.body) + response.status_code = 200 + sess = mock.Mock() + sess.put = mock.Mock(return_value=response) + ret = sot.remove_bgp_peer(sess, '102') + self.assertIsNone(ret) + + body = {'bgp_peer_id': '102'} + url = 'bgp-speakers/IDENTIFIER/remove_bgp_peer' + sess.put.assert_called_with(url, json=body) + + def test_add_gateway_network(self): + sot = bgp_speaker.BgpSpeaker(**EXAMPLE) + response = mock.Mock() + response.body = {'network_id': 'net_id'} + response.json = mock.Mock(return_value=response.body) + response.status_code = 200 + sess = mock.Mock() + sess.put = mock.Mock(return_value=response) + ret = sot.add_gateway_network(sess, 'net_id') + self.assertIsInstance(ret, dict) + self.assertEqual(ret, {'network_id': 'net_id'}) + + body = {'network_id': 'net_id'} + url = 'bgp-speakers/IDENTIFIER/add_gateway_network' + sess.put.assert_called_with(url, json=body) + + def test_remove_gateway_network(self): + sot = bgp_speaker.BgpSpeaker(**EXAMPLE) + response = mock.Mock() + response.body = {'network_id': 'net_id42'} + response.json = mock.Mock(return_value=response.body) + response.status_code = 200 + sess = mock.Mock() + sess.put = mock.Mock(return_value=response) + ret = sot.remove_gateway_network(sess, 'net_id42') + self.assertIsNone(ret) + + body = {'network_id': 'net_id42'} + url = 'bgp-speakers/IDENTIFIER/remove_gateway_network' + sess.put.assert_called_with(url, json=body) + + def test_get_advertised_routes(self): + sot = bgp_speaker.BgpSpeaker(**EXAMPLE) + response = mock.Mock() + response.body = { + 'advertised_routes': [ + {'cidr': '192.168.10.0/24', 'nexthop': '10.0.0.1'} + ] + } + response.json = mock.Mock(return_value=response.body) + response.status_code = 200 + sess = mock.Mock() + sess.get = mock.Mock(return_value=response) + ret = sot.get_advertised_routes(sess) + + url = 'bgp-speakers/IDENTIFIER/get_advertised_routes' + sess.get.assert_called_with(url) + self.assertEqual(ret, response.body) + + @mock.patch.object(agent.Agent, 'list') + def test_get_bgp_dragents(self, mock_list): + sot = bgp_speaker.BgpSpeaker(**EXAMPLE) + response = mock.Mock() + agent_body = { + 'agents': [ + { + 'binary': 'neutron-bgp-dragent', + 'alive': True, + 'id': IDENTIFIER, + } + ] + } + response.body = agent_body + mock_list.return_value = [agent.Agent(**agent_body['agents'][0])] + response.json = mock.Mock(return_value=response.body) + response.status_code = 200 + sess = mock.Mock() + sess.get = mock.Mock(return_value=response) + ret = sot.get_bgp_dragents(sess) + + url = 'bgp-speakers/IDENTIFIER/bgp-dragents' + sess.get.assert_called_with(url) + self.assertEqual(ret, [agent.Agent(**response.body['agents'][0])]) + + def test_add_bgp_speaker_to_dragent(self): + sot = bgp_speaker.BgpSpeaker(**EXAMPLE) + agent_id = '123-42' + response = mock.Mock() + response.status_code = 201 + sess = mock.Mock() + sess.post = mock.Mock(return_value=response) + self.assertIsNone(sot.add_bgp_speaker_to_dragent(sess, agent_id)) + + body = {'bgp_speaker_id': sot.id} + url = f'agents/{agent_id}/bgp-drinstances' + sess.post.assert_called_with(url, json=body) + + def test_remove_bgp_speaker_from_dragent(self): + sot = bgp_speaker.BgpSpeaker(**EXAMPLE) + agent_id = '123-42' + response = mock.Mock() + response.status_code = 204 + sess = mock.Mock() + sess.delete = mock.Mock(return_value=response) + self.assertIsNone(sot.remove_bgp_speaker_from_dragent(sess, agent_id)) + + url = f'agents/{agent_id}/bgp-drinstances/{IDENTIFIER}' + sess.delete.assert_called_with(url) diff --git a/openstack/tests/unit/network/v2/test_bgpvpn.py b/openstack/tests/unit/network/v2/test_bgpvpn.py new file mode 100644 index 0000000000..0aba1fefbb --- /dev/null +++ b/openstack/tests/unit/network/v2/test_bgpvpn.py @@ -0,0 +1,104 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import bgpvpn +from openstack.network.v2 import bgpvpn_network_association +from openstack.network.v2 import bgpvpn_port_association +from openstack.network.v2 import bgpvpn_router_association +from openstack.network.v2 import network +from openstack.network.v2 import port +from openstack.network.v2 import router +from openstack.tests.unit import base + +IDENTIFIER = 'IDENTIFIER' +NET_ID = 'NET_ID' +PORT_ID = 'PORT_ID' +ROUTER_ID = 'ROUTER_ID' +EXAMPLE = { + 'id': IDENTIFIER, + 'name': 'bgpvpn', + 'project_id': '42', + 'route_distinguishers': ['64512:1777', '64512:1888', '64512:1999'], + 'route_targets': '64512:1444', + 'import_targets': '64512:1555', + 'export_targets': '64512:1666', +} + + +class TestBgpVpn(base.TestCase): + def test_basic(self): + sot = bgpvpn.BgpVpn() + self.assertEqual('bgpvpn', sot.resource_key) + self.assertEqual('bgpvpns', sot.resources_key) + self.assertEqual('/bgpvpn/bgpvpns', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = bgpvpn.BgpVpn(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual( + EXAMPLE['route_distinguishers'], sot.route_distinguishers + ) + self.assertEqual(EXAMPLE['route_targets'], sot.route_targets) + self.assertEqual(EXAMPLE['import_targets'], sot.import_targets) + self.assertEqual(EXAMPLE['export_targets'], sot.export_targets) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'local_pref': 'local_pref', + 'name': 'name', + 'networks': 'networks', + 'routers': 'routers', + 'ports': 'ports', + 'project_id': 'project_id', + 'type': 'type', + 'vni': 'vni', + }, + sot._query_mapping._mapping, + ) + + def test_create_bgpvpn_network_association(self): + test_bpgvpn = bgpvpn.BgpVpn(**EXAMPLE) + test_net = network.Network(**{'name': 'foo_net', 'id': NET_ID}) + sot = bgpvpn_network_association.BgpVpnNetworkAssociation( + bgpvn_id=test_bpgvpn.id, network_id=test_net.id + ) + self.assertEqual(test_net.id, sot.network_id) + self.assertEqual(test_bpgvpn.id, sot.bgpvn_id) + + def test_create_bgpvpn_port_association(self): + test_bpgvpn = bgpvpn.BgpVpn(**EXAMPLE) + test_port = port.Port( + **{'name': 'foo_port', 'id': PORT_ID, 'network_id': NET_ID} + ) + sot = bgpvpn_port_association.BgpVpnPortAssociation( + bgpvn_id=test_bpgvpn.id, port_id=test_port.id + ) + self.assertEqual(test_port.id, sot.port_id) + self.assertEqual(test_bpgvpn.id, sot.bgpvn_id) + + def test_create_bgpvpn_router_association(self): + test_bpgvpn = bgpvpn.BgpVpn(**EXAMPLE) + test_router = router.Router(**{'name': 'foo_port'}) + sot = bgpvpn_router_association.BgpVpnRouterAssociation( + bgpvn_id=test_bpgvpn.id, router_id=test_router.id + ) + self.assertEqual(test_router.id, sot.router_id) + self.assertEqual(test_bpgvpn.id, sot.bgpvn_id) diff --git a/openstack/tests/unit/network/v2/test_default_security_group_rule.py b/openstack/tests/unit/network/v2/test_default_security_group_rule.py new file mode 100644 index 0000000000..f4b0e27d7b --- /dev/null +++ b/openstack/tests/unit/network/v2/test_default_security_group_rule.py @@ -0,0 +1,85 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import default_security_group_rule +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'description': '1', + 'direction': '2', + 'ethertype': '3', + 'id': IDENTIFIER, + 'port_range_max': 4, + 'port_range_min': 5, + 'protocol': '6', + 'remote_group_id': '7', + 'remote_ip_prefix': '8', + 'remote_address_group_id': '13', + 'used_in_default_sg': True, + 'used_in_non_default_sg': True, +} + + +class TestDefaultSecurityGroupRule(base.TestCase): + def test_basic(self): + sot = default_security_group_rule.DefaultSecurityGroupRule() + self.assertEqual('default_security_group_rule', sot.resource_key) + self.assertEqual('default_security_group_rules', sot.resources_key) + self.assertEqual('/default-security-group-rules', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + 'description': 'description', + 'direction': 'direction', + 'id': 'id', + 'ether_type': 'ethertype', + 'limit': 'limit', + 'marker': 'marker', + 'port_range_max': 'port_range_max', + 'port_range_min': 'port_range_min', + 'protocol': 'protocol', + 'remote_group_id': 'remote_group_id', + 'remote_address_group_id': 'remote_address_group_id', + 'remote_ip_prefix': 'remote_ip_prefix', + 'sort_dir': 'sort_dir', + 'sort_key': 'sort_key', + 'used_in_default_sg': 'used_in_default_sg', + 'used_in_non_default_sg': 'used_in_non_default_sg', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = default_security_group_rule.DefaultSecurityGroupRule(**EXAMPLE) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['direction'], sot.direction) + self.assertEqual(EXAMPLE['ethertype'], sot.ether_type) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['port_range_max'], sot.port_range_max) + self.assertEqual(EXAMPLE['port_range_min'], sot.port_range_min) + self.assertEqual(EXAMPLE['protocol'], sot.protocol) + self.assertEqual(EXAMPLE['remote_group_id'], sot.remote_group_id) + self.assertEqual( + EXAMPLE['remote_address_group_id'], sot.remote_address_group_id + ) + self.assertEqual(EXAMPLE['remote_ip_prefix'], sot.remote_ip_prefix) + self.assertEqual(EXAMPLE['used_in_default_sg'], sot.used_in_default_sg) + self.assertEqual( + EXAMPLE['used_in_non_default_sg'], sot.used_in_non_default_sg + ) diff --git a/openstack/tests/unit/network/v2/test_extension.py b/openstack/tests/unit/network/v2/test_extension.py index fd2511b735..1f0d1468f6 100644 --- a/openstack/tests/unit/network/v2/test_extension.py +++ b/openstack/tests/unit/network/v2/test_extension.py @@ -10,31 +10,29 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import extension +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'alias': '1', 'description': '2', - 'links': '3', + 'links': [], 'name': '4', 'updated': '2016-03-09T12:14:57.233772', } -class TestExtension(testtools.TestCase): - +class TestExtension(base.TestCase): def test_basic(self): sot = extension.Extension() self.assertEqual('extension', sot.resource_key) self.assertEqual('extensions', sot.resources_key) self.assertEqual('/extensions', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/network/v2/test_firewall_group.py b/openstack/tests/unit/network/v2/test_firewall_group.py new file mode 100644 index 0000000000..9eb3bb4c98 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_firewall_group.py @@ -0,0 +1,61 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from openstack.network.v2 import firewall_group + +IDENTIFIER = 'IDENTIFIER' + +EXAMPLE = { + 'description': '1', + 'name': '2', + 'egress_firewall_policy_id': '3', + 'ingress_firewall_policy_id': '4', + 'shared': True, + 'status': 'ACTIVE', + 'ports': ['5', '6'], + 'project_id': '7', +} + + +class TestFirewallGroup(testtools.TestCase): + def test_basic(self): + sot = firewall_group.FirewallGroup() + self.assertEqual('firewall_group', sot.resource_key) + self.assertEqual('firewall_groups', sot.resources_key) + self.assertEqual('/fwaas/firewall_groups', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = firewall_group.FirewallGroup(**EXAMPLE) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual( + EXAMPLE['egress_firewall_policy_id'], sot.egress_firewall_policy_id + ) + self.assertEqual( + EXAMPLE['ingress_firewall_policy_id'], + sot.ingress_firewall_policy_id, + ) + self.assertEqual(EXAMPLE['shared'], sot.shared) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(list, type(sot.ports)) + self.assertEqual(EXAMPLE['ports'], sot.ports) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) diff --git a/openstack/tests/unit/network/v2/test_firewall_policy.py b/openstack/tests/unit/network/v2/test_firewall_policy.py new file mode 100644 index 0000000000..2d2312515e --- /dev/null +++ b/openstack/tests/unit/network/v2/test_firewall_policy.py @@ -0,0 +1,52 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from openstack.network.v2 import firewall_policy + + +EXAMPLE = { + 'description': '1', + 'name': '2', + 'firewall_rules': [ + 'a30b0ec2-a468-4b1c-8dbf-928ded2a57a8', + '8d562e98-24f3-46e1-bbf3-d9347c0a67ee', + ], + 'shared': True, + 'project_id': '4', +} + + +class TestFirewallPolicy(testtools.TestCase): + def test_basic(self): + sot = firewall_policy.FirewallPolicy() + self.assertEqual('firewall_policy', sot.resource_key) + self.assertEqual('firewall_policies', sot.resources_key) + self.assertEqual('/fwaas/firewall_policies', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = firewall_policy.FirewallPolicy(**EXAMPLE) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['firewall_rules'], sot.firewall_rules) + self.assertEqual(EXAMPLE['shared'], sot.shared) + self.assertEqual(list, type(sot.firewall_rules)) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) diff --git a/openstack/tests/unit/network/v2/test_firewall_rule.py b/openstack/tests/unit/network/v2/test_firewall_rule.py new file mode 100644 index 0000000000..63c764d830 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_firewall_rule.py @@ -0,0 +1,63 @@ +# Copyright (c) 2018 China Telecom Corporation +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import testtools + +from openstack.network.v2 import firewall_rule + +EXAMPLE = { + 'action': 'allow', + 'description': '1', + 'destination_ip_address': '10.0.0.2/24', + 'destination_port': '2', + 'name': '3', + 'enabled': True, + 'ip_version': 4, + 'protocol': 'tcp', + 'shared': True, + 'source_ip_address': '10.0.1.2/24', + 'source_port': '5', + 'project_id': '6', +} + + +class TestFirewallRule(testtools.TestCase): + def test_basic(self): + sot = firewall_rule.FirewallRule() + self.assertEqual('firewall_rule', sot.resource_key) + self.assertEqual('firewall_rules', sot.resources_key) + self.assertEqual('/fwaas/firewall_rules', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = firewall_rule.FirewallRule(**EXAMPLE) + self.assertEqual(EXAMPLE['action'], sot.action) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual( + EXAMPLE['destination_ip_address'], sot.destination_ip_address + ) + self.assertEqual(EXAMPLE['destination_port'], sot.destination_port) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['enabled'], sot.enabled) + self.assertEqual(EXAMPLE['ip_version'], sot.ip_version) + self.assertEqual(EXAMPLE['protocol'], sot.protocol) + self.assertEqual(EXAMPLE['shared'], sot.shared) + self.assertEqual(EXAMPLE['source_ip_address'], sot.source_ip_address) + self.assertEqual(EXAMPLE['source_port'], sot.source_port) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) diff --git a/openstack/tests/unit/network/v2/test_flavor.py b/openstack/tests/unit/network/v2/test_flavor.py index ffce616946..27246d0b7e 100644 --- a/openstack/tests/unit/network/v2/test_flavor.py +++ b/openstack/tests/unit/network/v2/test_flavor.py @@ -10,12 +10,14 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools +from unittest import mock from openstack.network.v2 import flavor +from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE_WITH_OPTIONAL = { + 'id': IDENTIFIER, 'name': 'test-flavor', 'service_type': 'VPN', 'description': 'VPN flavor', @@ -24,21 +26,21 @@ } EXAMPLE = { + 'id': IDENTIFIER, 'name': 'test-flavor', 'service_type': 'VPN', } -class TestFlavor(testtools.TestCase): +class TestFlavor(base.TestCase): def test_basic(self): flavors = flavor.Flavor() self.assertEqual('flavor', flavors.resource_key) self.assertEqual('flavors', flavors.resources_key) self.assertEqual('/flavors', flavors.base_path) - self.assertEqual('network', flavors.service.service_type) self.assertTrue(flavors.allow_create) - self.assertTrue(flavors.allow_get) - self.assertTrue(flavors.allow_update) + self.assertTrue(flavors.allow_fetch) + self.assertTrue(flavors.allow_commit) self.assertTrue(flavors.allow_delete) self.assertTrue(flavors.allow_list) @@ -50,10 +52,48 @@ def test_make_it(self): def test_make_it_with_optional(self): flavors = flavor.Flavor(**EXAMPLE_WITH_OPTIONAL) self.assertEqual(EXAMPLE_WITH_OPTIONAL['name'], flavors.name) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['service_type'], - flavors.service_type) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['description'], - flavors.description) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['service_type'], flavors.service_type + ) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['description'], flavors.description + ) self.assertEqual(EXAMPLE_WITH_OPTIONAL['enabled'], flavors.is_enabled) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['service_profiles'], - flavors.service_profile_ids) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['service_profiles'], + flavors.service_profile_ids, + ) + + def test_associate_flavor_with_service_profile(self): + flav = flavor.Flavor(EXAMPLE) + response = mock.Mock() + response.body = { + 'service_profile': {'id': '1'}, + } + response.json = mock.Mock(return_value=response.body) + sess = mock.Mock() + sess.post = mock.Mock(return_value=response) + flav.id = 'IDENTIFIER' + self.assertEqual( + response.body, + flav.associate_flavor_with_service_profile(sess, '1'), + ) + + url = 'flavors/IDENTIFIER/service_profiles' + sess.post.assert_called_with(url, json=response.body) + + def test_disassociate_flavor_from_service_profile(self): + flav = flavor.Flavor(EXAMPLE) + response = mock.Mock() + response.json = mock.Mock(return_value=response.body) + sess = mock.Mock() + sess.post = mock.Mock(return_value=response) + flav.id = 'IDENTIFIER' + self.assertEqual( + None, flav.disassociate_flavor_from_service_profile(sess, '1') + ) + + url = 'flavors/IDENTIFIER/service_profiles/1' + sess.delete.assert_called_with( + url, + ) diff --git a/openstack/tests/unit/network/v2/test_floating_ip.py b/openstack/tests/unit/network/v2/test_floating_ip.py index 4673233a03..c753090996 100644 --- a/openstack/tests/unit/network/v2/test_floating_ip.py +++ b/openstack/tests/unit/network/v2/test_floating_ip.py @@ -10,10 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +from unittest import mock from openstack.network.v2 import floating_ip +from openstack import proxy +from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,26 +24,29 @@ 'floating_network_id': '3', 'id': IDENTIFIER, 'port_id': '5', - 'tenant_id': '6', + 'qos_policy_id': '51', + 'project_id': '6', 'router_id': '7', 'description': '8', + 'dns_domain': '9', + 'dns_name': '10', 'status': 'ACTIVE', 'revision_number': 12, 'updated_at': '13', + 'subnet_id': '14', + 'tags': ['15', '16'], } -class TestFloatingIP(testtools.TestCase): - +class TestFloatingIP(base.TestCase): def test_basic(self): sot = floating_ip.FloatingIP() self.assertEqual('floatingip', sot.resource_key) self.assertEqual('floatingips', sot.resources_key) self.assertEqual('/floatingips', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -50,26 +54,60 @@ def test_make_it(self): sot = floating_ip.FloatingIP(**EXAMPLE) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['fixed_ip_address'], sot.fixed_ip_address) - self.assertEqual(EXAMPLE['floating_ip_address'], - sot.floating_ip_address) - self.assertEqual(EXAMPLE['floating_network_id'], - sot.floating_network_id) + self.assertEqual( + EXAMPLE['floating_ip_address'], sot.floating_ip_address + ) + self.assertEqual( + EXAMPLE['floating_network_id'], sot.floating_network_id + ) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['port_id'], sot.port_id) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['router_id'], sot.router_id) self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['dns_domain'], sot.dns_domain) + self.assertEqual(EXAMPLE['dns_name'], sot.dns_name) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE['subnet_id'], sot.subnet_id) + self.assertEqual(EXAMPLE['tags'], sot.tags) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'description': 'description', + 'project_id': 'project_id', + 'tenant_id': 'project_id', + 'status': 'status', + 'port_id': 'port_id', + 'subnet_id': 'subnet_id', + 'router_id': 'router_id', + 'fixed_ip_address': 'fixed_ip_address', + 'floating_ip_address': 'floating_ip_address', + 'floating_network_id': 'floating_network_id', + 'id': 'id', + 'tags': 'tags', + 'any_tags': 'tags-any', + 'not_tags': 'not-tags', + 'not_any_tags': 'not-tags-any', + 'sort_dir': 'sort_dir', + 'sort_key': 'sort_key', + }, + sot._query_mapping._mapping, + ) def test_find_available(self): - mock_session = mock.Mock() + mock_session = mock.Mock(spec=proxy.Proxy) mock_session.get_filter = mock.Mock(return_value={}) + mock_session.default_microversion = None + mock_session.session = self.cloud.session data = {'id': 'one', 'floating_ip_address': '10.0.0.1'} fake_response = mock.Mock() body = {floating_ip.FloatingIP.resources_key: [data]} fake_response.json = mock.Mock(return_value=body) + fake_response.status_code = 200 mock_session.get = mock.Mock(return_value=fake_response) result = floating_ip.FloatingIP.find_available(mock_session) @@ -77,15 +115,18 @@ def test_find_available(self): self.assertEqual('one', result.id) mock_session.get.assert_called_with( floating_ip.FloatingIP.base_path, - endpoint_filter=floating_ip.FloatingIP.service, headers={'Accept': 'application/json'}, - params={'port_id': ''}) + params={}, + microversion=None, + ) def test_find_available_nada(self): - mock_session = mock.Mock() + mock_session = mock.Mock(spec=proxy.Proxy) + mock_session.default_microversion = None fake_response = mock.Mock() body = {floating_ip.FloatingIP.resources_key: []} fake_response.json = mock.Mock(return_value=body) + fake_response.status_code = 200 mock_session.get = mock.Mock(return_value=fake_response) self.assertIsNone(floating_ip.FloatingIP.find_available(mock_session)) diff --git a/openstack/tests/unit/network/v2/test_health_monitor.py b/openstack/tests/unit/network/v2/test_health_monitor.py index b5d89fae46..fd857717ab 100644 --- a/openstack/tests/unit/network/v2/test_health_monitor.py +++ b/openstack/tests/unit/network/v2/test_health_monitor.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import health_monitor +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,7 +23,8 @@ 'id': IDENTIFIER, 'max_retries': '6', 'pools': [{'id': '7'}], - 'tenant_id': '8', + 'pool_id': '7', + 'project_id': '8', 'timeout': '9', 'type': '10', 'url_path': '11', @@ -31,17 +32,15 @@ } -class TestHealthMonitor(testtools.TestCase): - +class TestHealthMonitor(base.TestCase): def test_basic(self): sot = health_monitor.HealthMonitor() self.assertEqual('healthmonitor', sot.resource_key) self.assertEqual('healthmonitors', sot.resources_key) self.assertEqual('/lbaas/healthmonitors', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -54,7 +53,8 @@ def test_make_it(self): self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['max_retries'], sot.max_retries) self.assertEqual(EXAMPLE['pools'], sot.pool_ids) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['pool_id'], sot.pool_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['timeout'], sot.timeout) self.assertEqual(EXAMPLE['type'], sot.type) self.assertEqual(EXAMPLE['url_path'], sot.url_path) diff --git a/openstack/tests/unit/network/v2/test_l3_conntrack_helper.py b/openstack/tests/unit/network/v2/test_l3_conntrack_helper.py new file mode 100644 index 0000000000..4136475e9b --- /dev/null +++ b/openstack/tests/unit/network/v2/test_l3_conntrack_helper.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import l3_conntrack_helper +from openstack.tests.unit import base + + +EXAMPLE = { + 'id': 'ct_helper_id', + 'protocol': 'udp', + 'port': 69, + 'helper': 'tftp', +} + + +class TestL3ConntrackHelper(base.TestCase): + def test_basic(self): + sot = l3_conntrack_helper.ConntrackHelper() + self.assertEqual('conntrack_helper', sot.resource_key) + self.assertEqual('conntrack_helpers', sot.resources_key) + self.assertEqual( + '/routers/%(router_id)s/conntrack_helpers', sot.base_path + ) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = l3_conntrack_helper.ConntrackHelper(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['protocol'], sot.protocol) + self.assertEqual(EXAMPLE['port'], sot.port) + self.assertEqual(EXAMPLE['helper'], sot.helper) diff --git a/openstack/tests/unit/network/v2/test_listener.py b/openstack/tests/unit/network/v2/test_listener.py index 34c91d824a..fea5259f4a 100644 --- a/openstack/tests/unit/network/v2/test_listener.py +++ b/openstack/tests/unit/network/v2/test_listener.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import listener +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -22,6 +22,7 @@ 'description': '4', 'id': IDENTIFIER, 'loadbalancers': [{'id': '6'}], + 'loadbalancer_id': '6', 'name': '7', 'project_id': '8', 'protocol': '9', @@ -31,17 +32,15 @@ } -class TestListener(testtools.TestCase): - +class TestListener(base.TestCase): def test_basic(self): sot = listener.Listener() self.assertEqual('listener', sot.resource_key) self.assertEqual('listeners', sot.resources_key) self.assertEqual('/lbaas/listeners', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -53,11 +52,12 @@ def test_make_it(self): self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['loadbalancers'], sot.load_balancer_ids) + self.assertEqual(EXAMPLE['loadbalancer_id'], sot.load_balancer_id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['protocol'], sot.protocol) self.assertEqual(EXAMPLE['protocol_port'], sot.protocol_port) - self.assertEqual(EXAMPLE['default_tls_container_ref'], - sot.default_tls_container_ref) - self.assertEqual(EXAMPLE['sni_container_refs'], - sot.sni_container_refs) + self.assertEqual( + EXAMPLE['default_tls_container_ref'], sot.default_tls_container_ref + ) + self.assertEqual(EXAMPLE['sni_container_refs'], sot.sni_container_refs) diff --git a/openstack/tests/unit/network/v2/test_load_balancer.py b/openstack/tests/unit/network/v2/test_load_balancer.py index 9a657c4b97..6ff4725b2a 100644 --- a/openstack/tests/unit/network/v2/test_load_balancer.py +++ b/openstack/tests/unit/network/v2/test_load_balancer.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import load_balancer +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,7 +23,7 @@ 'name': '5', 'operating_status': '6', 'provisioning_status': '7', - 'tenant_id': '8', + 'project_id': '8', 'vip_address': '9', 'vip_subnet_id': '10', 'vip_port_id': '11', @@ -32,17 +32,15 @@ } -class TestLoadBalancer(testtools.TestCase): - +class TestLoadBalancer(base.TestCase): def test_basic(self): sot = load_balancer.LoadBalancer() self.assertEqual('loadbalancer', sot.resource_key) self.assertEqual('loadbalancers', sot.resources_key) self.assertEqual('/lbaas/loadbalancers', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -54,9 +52,10 @@ def test_make_it(self): self.assertEqual(EXAMPLE['listeners'], sot.listener_ids) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['operating_status'], sot.operating_status) - self.assertEqual(EXAMPLE['provisioning_status'], - sot.provisioning_status) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual( + EXAMPLE['provisioning_status'], sot.provisioning_status + ) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['vip_address'], sot.vip_address) self.assertEqual(EXAMPLE['vip_subnet_id'], sot.vip_subnet_id) self.assertEqual(EXAMPLE['vip_port_id'], sot.vip_port_id) diff --git a/openstack/tests/unit/network/v2/test_local_ip.py b/openstack/tests/unit/network/v2/test_local_ip.py new file mode 100644 index 0000000000..29ab1dacc7 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_local_ip.py @@ -0,0 +1,76 @@ +# Copyright 2021 Huawei, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from openstack.network.v2 import local_ip +from openstack.tests.unit import base + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'created_at': '0', + 'id': IDENTIFIER, + 'name': '1', + 'description': '2', + 'project_id': '3', + 'local_port_id': '4', + 'network_id': '5', + 'local_ip_address': '127.0.0.1', + 'ip_mode': 'translate', + 'revision_number': '6', + 'updated_at': '7', +} + + +class TestLocalIP(base.TestCase): + def test_basic(self): + sot = local_ip.LocalIP() + self.assertEqual('local_ip', sot.resource_key) + self.assertEqual('local_ips', sot.resources_key) + self.assertEqual('/local_ips', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + "name": "name", + "description": "description", + "project_id": "project_id", + "network_id": "network_id", + "local_port_id": "local_port_id", + "local_ip_address": "local_ip_address", + "ip_mode": "ip_mode", + "sort_key": "sort_key", + "sort_dir": "sort_dir", + "limit": "limit", + "marker": "marker", + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = local_ip.LocalIP(**EXAMPLE) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['local_port_id'], sot.local_port_id) + self.assertEqual(EXAMPLE['network_id'], sot.network_id) + self.assertEqual(EXAMPLE['local_ip_address'], sot.local_ip_address) + self.assertEqual(EXAMPLE['ip_mode'], sot.ip_mode) + self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/network/v2/test_local_ip_association.py b/openstack/tests/unit/network/v2/test_local_ip_association.py new file mode 100644 index 0000000000..973105b7a8 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_local_ip_association.py @@ -0,0 +1,61 @@ +# Copyright 2021 Huawei, Inc. All rights reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. +# + +from openstack.network.v2 import local_ip_association +from openstack.tests.unit import base + +EXAMPLE = { + 'local_ip_id': '0', + 'local_ip_address': '127.0.0.1', + 'fixed_port_id': '1', + 'fixed_ip': '127.0.0.2', + 'host': '2', +} + + +class TestLocalIP(base.TestCase): + def test_basic(self): + sot = local_ip_association.LocalIPAssociation() + self.assertEqual('port_association', sot.resource_key) + self.assertEqual('port_associations', sot.resources_key) + self.assertEqual( + '/local_ips/%(local_ip_id)s/port_associations', sot.base_path + ) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + 'fixed_port_id': 'fixed_port_id', + 'fixed_ip': 'fixed_ip', + 'host': 'host', + 'limit': 'limit', + 'marker': 'marker', + 'sort_dir': 'sort_dir', + 'sort_key': 'sort_key', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = local_ip_association.LocalIPAssociation(**EXAMPLE) + self.assertEqual(EXAMPLE['local_ip_id'], sot.local_ip_id) + self.assertEqual(EXAMPLE['local_ip_address'], sot.local_ip_address) + self.assertEqual(EXAMPLE['fixed_port_id'], sot.fixed_port_id) + self.assertEqual(EXAMPLE['fixed_ip'], sot.fixed_ip) + self.assertEqual(EXAMPLE['host'], sot.host) diff --git a/openstack/tests/unit/network/v2/test_metering_label.py b/openstack/tests/unit/network/v2/test_metering_label.py index 4d1e57a98f..be177d6f94 100644 --- a/openstack/tests/unit/network/v2/test_metering_label.py +++ b/openstack/tests/unit/network/v2/test_metering_label.py @@ -10,31 +10,29 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import metering_label +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'description': '1', 'id': IDENTIFIER, 'name': '3', - 'tenant_id': '4', + 'project_id': '4', 'shared': False, } -class TestMeteringLabel(testtools.TestCase): - +class TestMeteringLabel(base.TestCase): def test_basic(self): sot = metering_label.MeteringLabel() self.assertEqual('metering_label', sot.resource_key) self.assertEqual('metering_labels', sot.resources_key) self.assertEqual('/metering/metering-labels', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -43,5 +41,5 @@ def test_make_it(self): self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['shared'], sot.is_shared) diff --git a/openstack/tests/unit/network/v2/test_metering_label_rule.py b/openstack/tests/unit/network/v2/test_metering_label_rule.py index 03c134b42b..fa63407083 100644 --- a/openstack/tests/unit/network/v2/test_metering_label_rule.py +++ b/openstack/tests/unit/network/v2/test_metering_label_rule.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import metering_label_rule +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -20,22 +20,20 @@ 'excluded': False, 'id': IDENTIFIER, 'metering_label_id': '4', - 'tenant_id': '5', + 'project_id': '5', 'remote_ip_prefix': '6', } -class TestMeteringLabelRule(testtools.TestCase): - +class TestMeteringLabelRule(base.TestCase): def test_basic(self): sot = metering_label_rule.MeteringLabelRule() self.assertEqual('metering_label_rule', sot.resource_key) self.assertEqual('metering_label_rules', sot.resources_key) self.assertEqual('/metering/metering-label-rules', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -45,5 +43,29 @@ def test_make_it(self): self.assertFalse(sot.is_excluded) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['metering_label_id'], sot.metering_label_id) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['remote_ip_prefix'], sot.remote_ip_prefix) + + def test_make_it_source_and_destination(self): + custom_example = EXAMPLE.copy() + custom_example["source_ip_prefix"] = "192.168.0.11/32" + custom_example["destination_ip_prefix"] = "0.0.0.0/0" + + sot = metering_label_rule.MeteringLabelRule(**custom_example) + self.assertEqual(custom_example['direction'], sot.direction) + self.assertFalse(sot.is_excluded) + self.assertEqual(custom_example['id'], sot.id) + self.assertEqual( + custom_example['metering_label_id'], sot.metering_label_id + ) + self.assertEqual(custom_example['project_id'], sot.project_id) + self.assertEqual( + custom_example['remote_ip_prefix'], sot.remote_ip_prefix + ) + + self.assertEqual( + custom_example['source_ip_prefix'], sot.source_ip_prefix + ) + self.assertEqual( + custom_example['destination_ip_prefix'], sot.destination_ip_prefix + ) diff --git a/openstack/tests/unit/network/v2/test_ndp_proxy.py b/openstack/tests/unit/network/v2/test_ndp_proxy.py new file mode 100644 index 0000000000..56ac1aa846 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_ndp_proxy.py @@ -0,0 +1,49 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import ndp_proxy +from openstack.tests.unit import base + +EXAMPLE = { + 'id': 'np_id', + 'name': 'np_name', + 'router_id': 'router-uuid', + 'port_id': 'port-uuid', + 'project_id': 'project-uuid', + 'description': 'fake-desc', + 'created_at': '2021-12-21T19:14:57.233772', + 'updated_at': '2021-12-21T19:14:57.233772', +} + + +class TestNDPProxy(base.TestCase): + def test_basic(self): + sot = ndp_proxy.NDPProxy() + self.assertEqual('ndp_proxy', sot.resource_key) + self.assertEqual('ndp_proxies', sot.resources_key) + self.assertEqual('/ndp_proxies', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = ndp_proxy.NDPProxy(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['router_id'], sot.router_id) + self.assertEqual(EXAMPLE['port_id'], sot.port_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/network/v2/test_network.py b/openstack/tests/unit/network/v2/test_network.py index 9007c94511..e7e964bf1f 100644 --- a/openstack/tests/unit/network/v2/test_network.py +++ b/openstack/tests/unit/network/v2/test_network.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import network +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -41,49 +41,54 @@ 'status': '17', 'subnets': ['18', '19'], 'updated_at': '2016-07-09T12:14:57.233772', + 'vlan_transparent': False, + 'vlan_qinq': False, } -class TestNetwork(testtools.TestCase): - +class TestNetwork(base.TestCase): def test_basic(self): sot = network.Network() self.assertEqual('network', sot.resource_key) self.assertEqual('networks', sot.resources_key) self.assertEqual('/networks', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = network.Network(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) - self.assertEqual(EXAMPLE['availability_zone_hints'], - sot.availability_zone_hints) - self.assertEqual(EXAMPLE['availability_zones'], - sot.availability_zones) + self.assertEqual( + EXAMPLE['availability_zone_hints'], sot.availability_zone_hints + ) + self.assertEqual(EXAMPLE['availability_zones'], sot.availability_zones) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['dns_domain'], sot.dns_domain) self.assertEqual(EXAMPLE['id'], sot.id) - self.assertEqual(EXAMPLE['ipv4_address_scope'], - sot.ipv4_address_scope_id) - self.assertEqual(EXAMPLE['ipv6_address_scope'], - sot.ipv6_address_scope_id) + self.assertEqual( + EXAMPLE['ipv4_address_scope'], sot.ipv4_address_scope_id + ) + self.assertEqual( + EXAMPLE['ipv6_address_scope'], sot.ipv6_address_scope_id + ) self.assertFalse(sot.is_default) self.assertEqual(EXAMPLE['mtu'], sot.mtu) self.assertEqual(EXAMPLE['name'], sot.name) self.assertTrue(sot.is_port_security_enabled) self.assertEqual(EXAMPLE['project_id'], sot.project_id) - self.assertEqual(EXAMPLE['provider:network_type'], - sot.provider_network_type) - self.assertEqual(EXAMPLE['provider:physical_network'], - sot.provider_physical_network) - self.assertEqual(EXAMPLE['provider:segmentation_id'], - sot.provider_segmentation_id) + self.assertEqual( + EXAMPLE['provider:network_type'], sot.provider_network_type + ) + self.assertEqual( + EXAMPLE['provider:physical_network'], sot.provider_physical_network + ) + self.assertEqual( + EXAMPLE['provider:segmentation_id'], sot.provider_segmentation_id + ) self.assertEqual(EXAMPLE['qos_policy_id'], sot.qos_policy_id) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertTrue(sot.is_router_external) @@ -92,19 +97,47 @@ def test_make_it(self): self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['subnets'], sot.subnet_ids) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE['vlan_transparent'], sot.is_vlan_transparent) + self.assertEqual(EXAMPLE['vlan_qinq'], sot.is_vlan_qinq) + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'description': 'description', + 'id': 'id', + 'name': 'name', + 'project_id': 'project_id', + 'status': 'status', + 'ipv4_address_scope_id': 'ipv4_address_scope', + 'ipv6_address_scope_id': 'ipv6_address_scope', + 'is_admin_state_up': 'admin_state_up', + 'is_port_security_enabled': 'port_security_enabled', + 'is_router_external': 'router:external', + 'is_shared': 'shared', + 'provider_network_type': 'provider:network_type', + 'provider_physical_network': 'provider:physical_network', + 'provider_segmentation_id': 'provider:segmentation_id', + 'tags': 'tags', + 'any_tags': 'tags-any', + 'not_tags': 'not-tags', + 'not_any_tags': 'not-tags-any', + 'sort_dir': 'sort_dir', + 'sort_key': 'sort_key', + }, + sot._query_mapping._mapping, + ) -class TestNetworkHostingDHCPAgent(testtools.TestCase): +class TestDHCPAgentHostingNetwork(base.TestCase): def test_basic(self): - net = network.NetworkHostingDHCPAgent() - self.assertEqual('agent', net.resource_key) - self.assertEqual('agents', net.resources_key) - self.assertEqual('/networks/%(network_id)s/dhcp-agents', net.base_path) - self.assertEqual('dhcp-agent', net.resource_name) - self.assertEqual('network', net.service.service_type) + net = network.DHCPAgentHostingNetwork() + self.assertEqual('network', net.resource_key) + self.assertEqual('networks', net.resources_key) + self.assertEqual('/agents/%(agent_id)s/dhcp-networks', net.base_path) + self.assertEqual('dhcp-network', net.resource_name) self.assertFalse(net.allow_create) - self.assertTrue(net.allow_get) - self.assertFalse(net.allow_update) + self.assertTrue(net.allow_fetch) + self.assertFalse(net.allow_commit) self.assertFalse(net.allow_delete) self.assertTrue(net.allow_list) diff --git a/openstack/tests/unit/network/v2/test_network_ip_availability.py b/openstack/tests/unit/network/v2/test_network_ip_availability.py index 3d0246b199..0d033ab365 100644 --- a/openstack/tests/unit/network/v2/test_network_ip_availability.py +++ b/openstack/tests/unit/network/v2/test_network_ip_availability.py @@ -10,16 +10,16 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import network_ip_availability +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'network_id': IDENTIFIER, 'network_name': 'private', 'subnet_ip_availability': [], - 'tenant_id': '5', + 'project_id': '5', 'total_ips': 6, 'used_ips': 10, } @@ -27,29 +27,32 @@ EXAMPLE_WITH_OPTIONAL = { 'network_id': IDENTIFIER, 'network_name': 'private', - 'subnet_ip_availability': [{"used_ips": 3, "subnet_id": - "2e4db1d6-ab2d-4bb1-93bb-a003fdbc9b39", - "subnet_name": "private-subnet", - "ip_version": 6, "cidr": "fd91:c3ba:e818::/64", - "total_ips": 18446744073709551614}], - 'tenant_id': '2', + 'subnet_ip_availability': [ + { + "used_ips": 3, + "subnet_id": "2e4db1d6-ab2d-4bb1-93bb-a003fdbc9b39", + "subnet_name": "private-subnet", + "ip_version": 6, + "cidr": "fd91:c3ba:e818::/64", + "total_ips": 18446744073709551614, + } + ], + 'project_id': '2', 'total_ips': 1844, 'used_ips': 6, } -class TestNetworkIPAvailability(testtools.TestCase): - +class TestNetworkIPAvailability(base.TestCase): def test_basic(self): sot = network_ip_availability.NetworkIPAvailability() self.assertEqual('network_ip_availability', sot.resource_key) self.assertEqual('network_ip_availabilities', sot.resources_key) self.assertEqual('/network-ip-availabilities', sot.base_path) self.assertEqual('network_name', sot.name_attribute) - self.assertEqual('network', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -57,20 +60,25 @@ def test_make_it(self): sot = network_ip_availability.NetworkIPAvailability(**EXAMPLE) self.assertEqual(EXAMPLE['network_id'], sot.network_id) self.assertEqual(EXAMPLE['network_name'], sot.network_name) - self.assertEqual(EXAMPLE['subnet_ip_availability'], - sot.subnet_ip_availability) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual( + EXAMPLE['subnet_ip_availability'], sot.subnet_ip_availability + ) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['total_ips'], sot.total_ips) self.assertEqual(EXAMPLE['used_ips'], sot.used_ips) def test_make_it_with_optional(self): sot = network_ip_availability.NetworkIPAvailability( - **EXAMPLE_WITH_OPTIONAL) + **EXAMPLE_WITH_OPTIONAL + ) self.assertEqual(EXAMPLE_WITH_OPTIONAL['network_id'], sot.network_id) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['network_name'], - sot.network_name) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['subnet_ip_availability'], - sot.subnet_ip_availability) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['tenant_id'], sot.project_id) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['network_name'], sot.network_name + ) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['subnet_ip_availability'], + sot.subnet_ip_availability, + ) + self.assertEqual(EXAMPLE_WITH_OPTIONAL['project_id'], sot.project_id) self.assertEqual(EXAMPLE_WITH_OPTIONAL['total_ips'], sot.total_ips) self.assertEqual(EXAMPLE_WITH_OPTIONAL['used_ips'], sot.used_ips) diff --git a/openstack/tests/unit/network/v2/test_network_segment_range.py b/openstack/tests/unit/network/v2/test_network_segment_range.py new file mode 100644 index 0000000000..636ab59950 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_network_segment_range.py @@ -0,0 +1,65 @@ +# Copyright (c) 2018, Intel Corporation. +# All Rights Reserved. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import network_segment_range +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'name': '1', + 'default': False, + 'shared': False, + 'project_id': '2', + 'network_type': '3', + 'physical_network': '4', + 'minimum': 5, + 'maximum': 6, + 'used': {}, + 'available': [], +} + + +class TestNetworkSegmentRange(base.TestCase): + def test_basic(self): + test_seg_range = network_segment_range.NetworkSegmentRange() + self.assertEqual('network_segment_range', test_seg_range.resource_key) + self.assertEqual( + 'network_segment_ranges', test_seg_range.resources_key + ) + self.assertEqual('/network_segment_ranges', test_seg_range.base_path) + + self.assertTrue(test_seg_range.allow_create) + self.assertTrue(test_seg_range.allow_fetch) + self.assertTrue(test_seg_range.allow_commit) + self.assertTrue(test_seg_range.allow_delete) + self.assertTrue(test_seg_range.allow_list) + + def test_make_it(self): + test_seg_range = network_segment_range.NetworkSegmentRange(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], test_seg_range.id) + self.assertEqual(EXAMPLE['name'], test_seg_range.name) + self.assertEqual(EXAMPLE['default'], test_seg_range.default) + self.assertEqual(EXAMPLE['shared'], test_seg_range.shared) + self.assertEqual(EXAMPLE['project_id'], test_seg_range.project_id) + self.assertEqual(EXAMPLE['network_type'], test_seg_range.network_type) + self.assertEqual( + EXAMPLE['physical_network'], test_seg_range.physical_network + ) + self.assertEqual(EXAMPLE['minimum'], test_seg_range.minimum) + self.assertEqual(EXAMPLE['maximum'], test_seg_range.maximum) + self.assertEqual(EXAMPLE['used'], test_seg_range.used) + self.assertEqual(EXAMPLE['available'], test_seg_range.available) diff --git a/openstack/tests/unit/network/v2/test_pool.py b/openstack/tests/unit/network/v2/test_pool.py index e8e3c8849a..ab9509cd80 100644 --- a/openstack/tests/unit/network/v2/test_pool.py +++ b/openstack/tests/unit/network/v2/test_pool.py @@ -10,22 +10,24 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import pool +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'admin_state_up': True, 'description': '2', + 'healthmonitor_id': '3-1', 'health_monitors': ['3'], 'health_monitor_status': ['4'], 'id': IDENTIFIER, 'lb_algorithm': '5', 'listeners': [{'id': '6'}], + 'listener_id': '6', 'members': [{'id': '7'}], 'name': '8', - 'tenant_id': '9', + 'project_id': '9', 'protocol': '10', 'provider': '11', 'session_persistence': '12', @@ -33,21 +35,20 @@ 'status_description': '14', 'subnet_id': '15', 'loadbalancers': [{'id': '16'}], + 'loadbalancer_id': '16', 'vip_id': '17', } -class TestPool(testtools.TestCase): - +class TestPool(base.TestCase): def test_basic(self): sot = pool.Pool() self.assertEqual('pool', sot.resource_key) self.assertEqual('pools', sot.resources_key) self.assertEqual('/lbaas/pools', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -55,21 +56,26 @@ def test_make_it(self): sot = pool.Pool(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['healthmonitor_id'], sot.health_monitor_id) self.assertEqual(EXAMPLE['health_monitors'], sot.health_monitor_ids) - self.assertEqual(EXAMPLE['health_monitor_status'], - sot.health_monitor_status) + self.assertEqual( + EXAMPLE['health_monitor_status'], sot.health_monitor_status + ) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['lb_algorithm'], sot.lb_algorithm) self.assertEqual(EXAMPLE['listeners'], sot.listener_ids) + self.assertEqual(EXAMPLE['listener_id'], sot.listener_id) self.assertEqual(EXAMPLE['members'], sot.member_ids) self.assertEqual(EXAMPLE['name'], sot.name) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['protocol'], sot.protocol) self.assertEqual(EXAMPLE['provider'], sot.provider) - self.assertEqual(EXAMPLE['session_persistence'], - sot.session_persistence) + self.assertEqual( + EXAMPLE['session_persistence'], sot.session_persistence + ) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['status_description'], sot.status_description) self.assertEqual(EXAMPLE['subnet_id'], sot.subnet_id) self.assertEqual(EXAMPLE['loadbalancers'], sot.load_balancer_ids) + self.assertEqual(EXAMPLE['loadbalancer_id'], sot.load_balancer_id) self.assertEqual(EXAMPLE['vip_id'], sot.virtual_ip_id) diff --git a/openstack/tests/unit/network/v2/test_pool_member.py b/openstack/tests/unit/network/v2/test_pool_member.py index f175cb35b3..7e836f1acf 100644 --- a/openstack/tests/unit/network/v2/test_pool_member.py +++ b/openstack/tests/unit/network/v2/test_pool_member.py @@ -10,16 +10,16 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import pool_member +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'address': '1', 'admin_state_up': True, 'id': IDENTIFIER, - 'tenant_id': '4', + 'project_id': '4', 'protocol_port': 5, 'subnet_id': '6', 'weight': 7, @@ -28,17 +28,15 @@ } -class TestPoolMember(testtools.TestCase): - +class TestPoolMember(base.TestCase): def test_basic(self): sot = pool_member.PoolMember() self.assertEqual('member', sot.resource_key) self.assertEqual('members', sot.resources_key) self.assertEqual('/lbaas/pools/%(pool_id)s/members', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -47,7 +45,7 @@ def test_make_it(self): self.assertEqual(EXAMPLE['address'], sot.address) self.assertTrue(sot.is_admin_state_up) self.assertEqual(EXAMPLE['id'], sot.id) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['protocol_port'], sot.protocol_port) self.assertEqual(EXAMPLE['subnet_id'], sot.subnet_id) self.assertEqual(EXAMPLE['weight'], sot.weight) diff --git a/openstack/tests/unit/network/v2/test_port.py b/openstack/tests/unit/network/v2/test_port.py index 160546545c..6376ae487e 100644 --- a/openstack/tests/unit/network/v2/test_port.py +++ b/openstack/tests/unit/network/v2/test_port.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import port +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -24,76 +24,148 @@ 'binding:vif_type': '6', 'binding:vnic_type': '7', 'created_at': '2016-03-09T12:14:57.233772', + 'data_plane_status': '32', 'description': '8', 'device_id': '9', 'device_owner': '10', + 'device_profile': 'cyborg_device_profile_1', 'dns_assignment': [{'11': 11}], + 'dns_domain': 'a11', 'dns_name': '12', 'extra_dhcp_opts': [{'13': 13}], 'fixed_ips': [{'14': '14'}], + 'hardware_offload_type': None, 'id': IDENTIFIER, - 'ip_address': '15', + 'ip_allocation': 'immediate', 'mac_address': '16', 'name': '17', 'network_id': '18', - 'opt_name': '19', - 'opt_value': '20', + 'numa_affinity_policy': False, 'port_security_enabled': True, + 'qos_network_policy_id': '32', 'qos_policy_id': '21', + 'propagate_uplink_status': False, + 'resource_request': { + 'required': ['CUSTOM_PHYSNET_PUBLIC', 'CUSTOM_VNIC_TYPE_NORMAL'], + 'resources': { + 'NET_BW_EGR_KILOBIT_PER_SEC': 1, + 'NET_BW_IGR_KILOBIT_PER_SEC': 2, + }, + }, 'revision_number': 22, 'security_groups': ['23'], - 'subnet_id': '24', 'status': '25', - 'tenant_id': '26', + 'project_id': '26', + 'trunk_details': { + 'trunk_id': '27', + 'sub_ports': [ + { + 'port_id': '28', + 'segmentation_id': 29, + 'segmentation_type': '30', + 'mac_address': '31', + } + ], + }, + 'trusted': True, 'updated_at': '2016-07-09T12:14:57.233772', } -class TestPort(testtools.TestCase): - +class TestPort(base.TestCase): def test_basic(self): sot = port.Port() self.assertEqual('port', sot.resource_key) self.assertEqual('ports', sot.resources_key) self.assertEqual('/ports', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) + self.assertDictEqual( + { + "binding:host_id": "binding:host_id", + "binding:profile": "binding:profile", + "binding:vif_details": "binding:vif_details", + "binding:vif_type": "binding:vif_type", + "binding:vnic_type": "binding:vnic_type", + "description": "description", + "device_id": "device_id", + "device_owner": "device_owner", + "fields": "fields", + "fixed_ips": "fixed_ips", + "id": "id", + "ip_address": "ip_address", + "mac_address": "mac_address", + "name": "name", + "network_id": "network_id", + "security_groups": "security_groups", + "status": "status", + "subnet_id": "subnet_id", + "is_admin_state_up": "admin_state_up", + "is_port_security_enabled": "port_security_enabled", + "project_id": "project_id", + "tenant_id": "project_id", + "security_group_ids": "security_groups", + "limit": "limit", + "marker": "marker", + "any_tags": "tags-any", + "not_any_tags": "not-tags-any", + "not_tags": "not-tags", + "tags": "tags", + 'sort_dir': 'sort_dir', + 'sort_key': 'sort_key', + }, + sot._query_mapping._mapping, + ) + def test_make_it(self): sot = port.Port(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) - self.assertEqual(EXAMPLE['allowed_address_pairs'], - sot.allowed_address_pairs) + self.assertEqual( + EXAMPLE['allowed_address_pairs'], sot.allowed_address_pairs + ) self.assertEqual(EXAMPLE['binding:host_id'], sot.binding_host_id) self.assertEqual(EXAMPLE['binding:profile'], sot.binding_profile) - self.assertEqual(EXAMPLE['binding:vif_details'], - sot.binding_vif_details) + self.assertEqual( + EXAMPLE['binding:vif_details'], sot.binding_vif_details + ) self.assertEqual(EXAMPLE['binding:vif_type'], sot.binding_vif_type) self.assertEqual(EXAMPLE['binding:vnic_type'], sot.binding_vnic_type) self.assertEqual(EXAMPLE['created_at'], sot.created_at) + self.assertEqual(EXAMPLE['data_plane_status'], sot.data_plane_status) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['device_id'], sot.device_id) self.assertEqual(EXAMPLE['device_owner'], sot.device_owner) + self.assertEqual(EXAMPLE['device_profile'], sot.device_profile) self.assertEqual(EXAMPLE['dns_assignment'], sot.dns_assignment) + self.assertEqual(EXAMPLE['dns_domain'], sot.dns_domain) self.assertEqual(EXAMPLE['dns_name'], sot.dns_name) self.assertEqual(EXAMPLE['extra_dhcp_opts'], sot.extra_dhcp_opts) self.assertEqual(EXAMPLE['fixed_ips'], sot.fixed_ips) self.assertEqual(EXAMPLE['id'], sot.id) - self.assertEqual(EXAMPLE['ip_address'], sot.ip_address) + self.assertEqual(EXAMPLE['ip_allocation'], sot.ip_allocation) self.assertEqual(EXAMPLE['mac_address'], sot.mac_address) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['network_id'], sot.network_id) - self.assertEqual(EXAMPLE['opt_name'], sot.option_name) - self.assertEqual(EXAMPLE['opt_value'], sot.option_value) + self.assertEqual( + EXAMPLE['numa_affinity_policy'], sot.numa_affinity_policy + ) self.assertTrue(sot.is_port_security_enabled) + self.assertEqual( + EXAMPLE['qos_network_policy_id'], sot.qos_network_policy_id + ) self.assertEqual(EXAMPLE['qos_policy_id'], sot.qos_policy_id) + self.assertEqual( + EXAMPLE['propagate_uplink_status'], sot.propagate_uplink_status + ) + self.assertEqual(EXAMPLE['resource_request'], sot.resource_request) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertEqual(EXAMPLE['security_groups'], sot.security_group_ids) self.assertEqual(EXAMPLE['status'], sot.status) - self.assertEqual(EXAMPLE['subnet_id'], sot.subnet_id) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['trunk_details'], sot.trunk_details) + self.assertEqual(EXAMPLE['trusted'], sot.trusted) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/network/v2/test_port_binding.py b/openstack/tests/unit/network/v2/test_port_binding.py new file mode 100644 index 0000000000..79e162e2e5 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_port_binding.py @@ -0,0 +1,45 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import port_binding +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'host': 'host1', + 'profile': {}, + 'vif_details': {'bridge_name': 'br-int'}, + 'vif_type': 'ovs', + 'vnic_type': 'normal', +} + + +class TestPortBinding(base.TestCase): + def test_basic(self): + sot = port_binding.PortBinding() + self.assertEqual('binding', sot.resource_key) + self.assertEqual('bindings', sot.resources_key) + self.assertEqual('/ports/%(port_id)s/bindings', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = port_binding.PortBinding(**EXAMPLE) + self.assertEqual(EXAMPLE['host'], sot.host) + self.assertEqual(EXAMPLE['profile'], sot.profile) + self.assertEqual(EXAMPLE['vif_details'], sot.vif_details) + self.assertEqual(EXAMPLE['vif_type'], sot.vif_type) + self.assertCountEqual(EXAMPLE['vnic_type'], sot.vnic_type) diff --git a/openstack/tests/unit/network/v2/test_port_forwarding.py b/openstack/tests/unit/network/v2/test_port_forwarding.py new file mode 100644 index 0000000000..15bb9fcb67 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_port_forwarding.py @@ -0,0 +1,67 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import port_forwarding +from openstack.tests.unit import base + + +EXAMPLE = { + 'id': 'pf_id', + 'protocol': 'tcp', + 'internal_ip_address': '1.2.3.4', + 'floatingip_id': 'floating-ip-uuid', + 'internal_port': 80, + 'internal_port_id': 'internal-port-uuid', + 'external_port': 8080, + 'description': 'description', +} + + +class TestFloatingIP(base.TestCase): + def test_basic(self): + sot = port_forwarding.PortForwarding() + self.assertEqual('port_forwarding', sot.resource_key) + self.assertEqual('port_forwardings', sot.resources_key) + self.assertEqual( + '/floatingips/%(floatingip_id)s/port_forwardings', sot.base_path + ) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + self.assertDictEqual( + { + 'internal_port_id': 'internal_port_id', + 'external_port': 'external_port', + 'limit': 'limit', + 'marker': 'marker', + 'protocol': 'protocol', + 'sort_dir': 'sort_dir', + 'sort_key': 'sort_key', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = port_forwarding.PortForwarding(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['floatingip_id'], sot.floatingip_id) + self.assertEqual(EXAMPLE['protocol'], sot.protocol) + self.assertEqual( + EXAMPLE['internal_ip_address'], sot.internal_ip_address + ) + self.assertEqual(EXAMPLE['internal_port'], sot.internal_port) + self.assertEqual(EXAMPLE['internal_port_id'], sot.internal_port_id) + self.assertEqual(EXAMPLE['external_port'], sot.external_port) + self.assertEqual(EXAMPLE['description'], sot.description) diff --git a/openstack/tests/unit/network/v2/test_proxy.py b/openstack/tests/unit/network/v2/test_proxy.py index 41dbdd636b..00e4fb09ae 100644 --- a/openstack/tests/unit/network/v2/test_proxy.py +++ b/openstack/tests/unit/network/v2/test_proxy.py @@ -10,30 +10,50 @@ # License for the specific language governing permissions and limitations # under the License. -import mock +from unittest import mock import uuid +from openstack import exceptions from openstack.network.v2 import _proxy +from openstack.network.v2 import address_group from openstack.network.v2 import address_scope from openstack.network.v2 import agent from openstack.network.v2 import auto_allocated_topology from openstack.network.v2 import availability_zone +from openstack.network.v2 import bgp_peer +from openstack.network.v2 import bgp_speaker +from openstack.network.v2 import bgpvpn +from openstack.network.v2 import bgpvpn_network_association +from openstack.network.v2 import bgpvpn_port_association +from openstack.network.v2 import bgpvpn_router_association from openstack.network.v2 import extension +from openstack.network.v2 import firewall_group +from openstack.network.v2 import firewall_policy +from openstack.network.v2 import firewall_rule from openstack.network.v2 import flavor from openstack.network.v2 import floating_ip from openstack.network.v2 import health_monitor +from openstack.network.v2 import l3_conntrack_helper from openstack.network.v2 import listener from openstack.network.v2 import load_balancer +from openstack.network.v2 import local_ip +from openstack.network.v2 import local_ip_association from openstack.network.v2 import metering_label from openstack.network.v2 import metering_label_rule +from openstack.network.v2 import ndp_proxy from openstack.network.v2 import network from openstack.network.v2 import network_ip_availability +from openstack.network.v2 import network_segment_range from openstack.network.v2 import pool from openstack.network.v2 import pool_member from openstack.network.v2 import port +from openstack.network.v2 import port_binding +from openstack.network.v2 import port_forwarding from openstack.network.v2 import qos_bandwidth_limit_rule from openstack.network.v2 import qos_dscp_marking_rule from openstack.network.v2 import qos_minimum_bandwidth_rule +from openstack.network.v2 import qos_minimum_packet_rate_rule +from openstack.network.v2 import qos_packet_rate_limit_rule from openstack.network.v2 import qos_policy from openstack.network.v2 import qos_rule_type from openstack.network.v2 import quota @@ -46,53 +66,179 @@ from openstack.network.v2 import service_provider from openstack.network.v2 import subnet from openstack.network.v2 import subnet_pool +from openstack.network.v2 import tap_mirror +from openstack.network.v2 import vpn_endpoint_group +from openstack.network.v2 import vpn_ike_policy +from openstack.network.v2 import vpn_ipsec_policy +from openstack.network.v2 import vpn_ipsec_site_connection from openstack.network.v2 import vpn_service -from openstack import proxy2 as proxy_base2 -from openstack.tests.unit import test_proxy_base2 +from openstack import proxy as proxy_base +from openstack.tests.unit import test_proxy_base QOS_POLICY_ID = 'qos-policy-id-' + uuid.uuid4().hex QOS_RULE_ID = 'qos-rule-id-' + uuid.uuid4().hex NETWORK_ID = 'network-id-' + uuid.uuid4().hex AGENT_ID = 'agent-id-' + uuid.uuid4().hex +ROUTER_ID = 'router-id-' + uuid.uuid4().hex +FIP_ID = 'fip-id-' + uuid.uuid4().hex +CT_HELPER_ID = 'ct-helper-id-' + uuid.uuid4().hex +LOCAL_IP_ID = 'lip-id-' + uuid.uuid4().hex +BGPVPN_ID = 'bgpvpn-id-' + uuid.uuid4().hex +PORT_ID = 'port-id-' + uuid.uuid4().hex -class TestNetworkProxy(test_proxy_base2.TestProxyBase): +class TestNetworkProxy(test_proxy_base.TestProxyBase): def setUp(self): - super(TestNetworkProxy, self).setUp() + super().setUp() self.proxy = _proxy.Proxy(self.session) + def verify_update( + self, + test_method, + resource_type, + base_path=None, + *, + method_args=None, + method_kwargs=None, + expected_args=None, + expected_kwargs=None, + expected_result="result", + mock_method="openstack.network.v2._proxy.Proxy._update", + ): + super().verify_update( + test_method, + resource_type, + base_path=base_path, + method_args=method_args, + method_kwargs=method_kwargs, + expected_args=expected_args, + expected_kwargs=expected_kwargs, + expected_result=expected_result, + mock_method=mock_method, + ) + + def verify_delete( + self, + test_method, + resource_type, + ignore_missing=True, + *, + method_args=None, + method_kwargs=None, + expected_args=None, + expected_kwargs=None, + mock_method="openstack.network.v2._proxy.Proxy._delete", + ): + super().verify_delete( + test_method, + resource_type, + ignore_missing=ignore_missing, + method_args=method_args, + method_kwargs=method_kwargs, + expected_args=expected_args, + expected_kwargs=expected_kwargs, + mock_method=mock_method, + ) + + +class TestNetworkAddressGroup(TestNetworkProxy): + def test_address_group_create_attrs(self): + self.verify_create( + self.proxy.create_address_group, address_group.AddressGroup + ) + + def test_address_group_delete(self): + self.verify_delete( + self.proxy.delete_address_group, address_group.AddressGroup, False + ) + + def test_address_group_delete_ignore(self): + self.verify_delete( + self.proxy.delete_address_group, address_group.AddressGroup, True + ) + + def test_address_group_find(self): + self.verify_find( + self.proxy.find_address_group, address_group.AddressGroup + ) + + def test_address_group_get(self): + self.verify_get( + self.proxy.get_address_group, address_group.AddressGroup + ) + + def test_address_groups(self): + self.verify_list(self.proxy.address_groups, address_group.AddressGroup) + + def test_address_group_update(self): + self.verify_update( + self.proxy.update_address_group, address_group.AddressGroup + ) + + @mock.patch( + 'openstack.network.v2._proxy.Proxy.add_addresses_to_address_group' + ) + def test_add_addresses_to_address_group(self, add_addresses): + data = mock.sentinel + + self.proxy.add_addresses_to_address_group( + address_group.AddressGroup, data + ) + + add_addresses.assert_called_once_with(address_group.AddressGroup, data) + + @mock.patch( + 'openstack.network.v2._proxy.Proxy.remove_addresses_from_address_group' + ) + def test_remove_addresses_from_address_group(self, remove_addresses): + data = mock.sentinel + + self.proxy.remove_addresses_from_address_group( + address_group.AddressGroup, data + ) + + remove_addresses.assert_called_once_with( + address_group.AddressGroup, data + ) + + +class TestNetworkAddressScope(TestNetworkProxy): def test_address_scope_create_attrs(self): - self.verify_create(self.proxy.create_address_scope, - address_scope.AddressScope) + self.verify_create( + self.proxy.create_address_scope, address_scope.AddressScope + ) def test_address_scope_delete(self): - self.verify_delete(self.proxy.delete_address_scope, - address_scope.AddressScope, - False) + self.verify_delete( + self.proxy.delete_address_scope, address_scope.AddressScope, False + ) def test_address_scope_delete_ignore(self): - self.verify_delete(self.proxy.delete_address_scope, - address_scope.AddressScope, - True) + self.verify_delete( + self.proxy.delete_address_scope, address_scope.AddressScope, True + ) def test_address_scope_find(self): - self.verify_find(self.proxy.find_address_scope, - address_scope.AddressScope) + self.verify_find( + self.proxy.find_address_scope, address_scope.AddressScope + ) def test_address_scope_get(self): - self.verify_get(self.proxy.get_address_scope, - address_scope.AddressScope) + self.verify_get( + self.proxy.get_address_scope, address_scope.AddressScope + ) def test_address_scopes(self): - self.verify_list(self.proxy.address_scopes, - address_scope.AddressScope, - paginated=False) + self.verify_list(self.proxy.address_scopes, address_scope.AddressScope) def test_address_scope_update(self): - self.verify_update(self.proxy.update_address_scope, - address_scope.AddressScope) + self.verify_update( + self.proxy.update_address_scope, address_scope.AddressScope + ) + +class TestNetworkAgent(TestNetworkProxy): def test_agent_delete(self): self.verify_delete(self.proxy.delete_agent, agent.Agent, True) @@ -100,52 +246,69 @@ def test_agent_get(self): self.verify_get(self.proxy.get_agent, agent.Agent) def test_agents(self): - self.verify_list(self.proxy.agents, agent.Agent, - paginated=False) + self.verify_list(self.proxy.agents, agent.Agent) def test_agent_update(self): self.verify_update(self.proxy.update_agent, agent.Agent) + +class TestNetworkAvailability(TestNetworkProxy): def test_availability_zones(self): - self.verify_list_no_kwargs(self.proxy.availability_zones, - availability_zone.AvailabilityZone, - paginated=False) + self.verify_list( + self.proxy.availability_zones, availability_zone.AvailabilityZone + ) def test_dhcp_agent_hosting_networks(self): self.verify_list( self.proxy.dhcp_agent_hosting_networks, - agent.DHCPAgentHostingNetwork, - paginated=False, + network.DHCPAgentHostingNetwork, method_kwargs={'agent': AGENT_ID}, - expected_kwargs={'agent_id': AGENT_ID} + expected_kwargs={'agent_id': AGENT_ID}, ) def test_network_hosting_dhcp_agents(self): self.verify_list( self.proxy.network_hosting_dhcp_agents, - network.NetworkHostingDHCPAgent, - paginated=False, + agent.NetworkHostingDHCPAgent, method_kwargs={'network': NETWORK_ID}, - expected_kwargs={'network_id': NETWORK_ID} + expected_kwargs={'network_id': NETWORK_ID}, ) + +class TestNetworkExtension(TestNetworkProxy): def test_extension_find(self): self.verify_find(self.proxy.find_extension, extension.Extension) def test_extensions(self): - self.verify_list(self.proxy.extensions, extension.Extension, - paginated=False) + self.verify_list(self.proxy.extensions, extension.Extension) def test_floating_ip_create_attrs(self): self.verify_create(self.proxy.create_ip, floating_ip.FloatingIP) def test_floating_ip_delete(self): - self.verify_delete(self.proxy.delete_ip, floating_ip.FloatingIP, - False) + self.verify_delete( + self.proxy.delete_ip, + floating_ip.FloatingIP, + False, + expected_kwargs={'if_revision': None}, + ) def test_floating_ip_delete_ignore(self): - self.verify_delete(self.proxy.delete_ip, floating_ip.FloatingIP, - True) + self.verify_delete( + self.proxy.delete_ip, + floating_ip.FloatingIP, + True, + expected_kwargs={'if_revision': None}, + ) + + def test_floating_ip_delete_if_revision(self): + self.verify_delete( + self.proxy.delete_ip, + floating_ip.FloatingIP, + True, + method_kwargs={'if_revision': 42}, + expected_kwargs={'if_revision': 42}, + ) def test_floating_ip_find(self): self.verify_find(self.proxy.find_ip, floating_ip.FloatingIP) @@ -154,51 +317,76 @@ def test_floating_ip_get(self): self.verify_get(self.proxy.get_ip, floating_ip.FloatingIP) def test_ips(self): - self.verify_list(self.proxy.ips, floating_ip.FloatingIP, - paginated=False) + self.verify_list(self.proxy.ips, floating_ip.FloatingIP) def test_floating_ip_update(self): - self.verify_update(self.proxy.update_ip, floating_ip.FloatingIP) + self.verify_update( + self.proxy.update_ip, + floating_ip.FloatingIP, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, + ) + + def test_floating_ip_update_if_revision(self): + self.verify_update( + self.proxy.update_ip, + floating_ip.FloatingIP, + method_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, + ) + +class TestNetworkHealthMonitor(TestNetworkProxy): def test_health_monitor_create_attrs(self): - self.verify_create(self.proxy.create_health_monitor, - health_monitor.HealthMonitor) + self.verify_create( + self.proxy.create_health_monitor, health_monitor.HealthMonitor + ) def test_health_monitor_delete(self): - self.verify_delete(self.proxy.delete_health_monitor, - health_monitor.HealthMonitor, False) + self.verify_delete( + self.proxy.delete_health_monitor, + health_monitor.HealthMonitor, + False, + ) def test_health_monitor_delete_ignore(self): - self.verify_delete(self.proxy.delete_health_monitor, - health_monitor.HealthMonitor, True) + self.verify_delete( + self.proxy.delete_health_monitor, + health_monitor.HealthMonitor, + True, + ) def test_health_monitor_find(self): - self.verify_find(self.proxy.find_health_monitor, - health_monitor.HealthMonitor) + self.verify_find( + self.proxy.find_health_monitor, health_monitor.HealthMonitor + ) def test_health_monitor_get(self): - self.verify_get(self.proxy.get_health_monitor, - health_monitor.HealthMonitor) + self.verify_get( + self.proxy.get_health_monitor, health_monitor.HealthMonitor + ) def test_health_monitors(self): - self.verify_list(self.proxy.health_monitors, - health_monitor.HealthMonitor, - paginated=False) + self.verify_list( + self.proxy.health_monitors, health_monitor.HealthMonitor + ) def test_health_monitor_update(self): - self.verify_update(self.proxy.update_health_monitor, - health_monitor.HealthMonitor) + self.verify_update( + self.proxy.update_health_monitor, health_monitor.HealthMonitor + ) + +class TestNetworkListener(TestNetworkProxy): def test_listener_create_attrs(self): self.verify_create(self.proxy.create_listener, listener.Listener) def test_listener_delete(self): - self.verify_delete(self.proxy.delete_listener, - listener.Listener, False) + self.verify_delete( + self.proxy.delete_listener, listener.Listener, False + ) def test_listener_delete_ignore(self): - self.verify_delete(self.proxy.delete_listener, - listener.Listener, True) + self.verify_delete(self.proxy.delete_listener, listener.Listener, True) def test_listener_find(self): self.verify_find(self.proxy.find_listener, listener.Listener) @@ -207,121 +395,197 @@ def test_listener_get(self): self.verify_get(self.proxy.get_listener, listener.Listener) def test_listeners(self): - self.verify_list(self.proxy.listeners, listener.Listener, - paginated=False) + self.verify_list(self.proxy.listeners, listener.Listener) def test_listener_update(self): self.verify_update(self.proxy.update_listener, listener.Listener) + +class TestNetworkLoadBalancer(TestNetworkProxy): def test_load_balancer_create_attrs(self): - self.verify_create(self.proxy.create_load_balancer, - load_balancer.LoadBalancer) + self.verify_create( + self.proxy.create_load_balancer, load_balancer.LoadBalancer + ) def test_load_balancer_delete(self): - self.verify_delete(self.proxy.delete_load_balancer, - load_balancer.LoadBalancer, False) + self.verify_delete( + self.proxy.delete_load_balancer, load_balancer.LoadBalancer, False + ) def test_load_balancer_delete_ignore(self): - self.verify_delete(self.proxy.delete_load_balancer, - load_balancer.LoadBalancer, True) + self.verify_delete( + self.proxy.delete_load_balancer, load_balancer.LoadBalancer, True + ) def test_load_balancer_find(self): - self.verify_find(self.proxy.find_load_balancer, - load_balancer.LoadBalancer) + self.verify_find( + self.proxy.find_load_balancer, load_balancer.LoadBalancer + ) def test_load_balancer_get(self): - self.verify_get(self.proxy.get_load_balancer, - load_balancer.LoadBalancer) + self.verify_get( + self.proxy.get_load_balancer, load_balancer.LoadBalancer + ) def test_load_balancers(self): - self.verify_list(self.proxy.load_balancers, - load_balancer.LoadBalancer, - paginated=False) + self.verify_list(self.proxy.load_balancers, load_balancer.LoadBalancer) def test_load_balancer_update(self): - self.verify_update(self.proxy.update_load_balancer, - load_balancer.LoadBalancer) + self.verify_update( + self.proxy.update_load_balancer, load_balancer.LoadBalancer + ) + +class TestNetworkMeteringLabel(TestNetworkProxy): def test_metering_label_create_attrs(self): - self.verify_create(self.proxy.create_metering_label, - metering_label.MeteringLabel) + self.verify_create( + self.proxy.create_metering_label, metering_label.MeteringLabel + ) def test_metering_label_delete(self): - self.verify_delete(self.proxy.delete_metering_label, - metering_label.MeteringLabel, False) + self.verify_delete( + self.proxy.delete_metering_label, + metering_label.MeteringLabel, + False, + ) def test_metering_label_delete_ignore(self): - self.verify_delete(self.proxy.delete_metering_label, - metering_label.MeteringLabel, True) + self.verify_delete( + self.proxy.delete_metering_label, + metering_label.MeteringLabel, + True, + ) def test_metering_label_find(self): - self.verify_find(self.proxy.find_metering_label, - metering_label.MeteringLabel) + self.verify_find( + self.proxy.find_metering_label, metering_label.MeteringLabel + ) def test_metering_label_get(self): - self.verify_get(self.proxy.get_metering_label, - metering_label.MeteringLabel) + self.verify_get( + self.proxy.get_metering_label, metering_label.MeteringLabel + ) def test_metering_labels(self): - self.verify_list(self.proxy.metering_labels, - metering_label.MeteringLabel, - paginated=False) + self.verify_list( + self.proxy.metering_labels, metering_label.MeteringLabel + ) def test_metering_label_update(self): - self.verify_update(self.proxy.update_metering_label, - metering_label.MeteringLabel) + self.verify_update( + self.proxy.update_metering_label, metering_label.MeteringLabel + ) def test_metering_label_rule_create_attrs(self): - self.verify_create(self.proxy.create_metering_label_rule, - metering_label_rule.MeteringLabelRule) + self.verify_create( + self.proxy.create_metering_label_rule, + metering_label_rule.MeteringLabelRule, + ) def test_metering_label_rule_delete(self): - self.verify_delete(self.proxy.delete_metering_label_rule, - metering_label_rule.MeteringLabelRule, False) + self.verify_delete( + self.proxy.delete_metering_label_rule, + metering_label_rule.MeteringLabelRule, + False, + ) def test_metering_label_rule_delete_ignore(self): - self.verify_delete(self.proxy.delete_metering_label_rule, - metering_label_rule.MeteringLabelRule, True) + self.verify_delete( + self.proxy.delete_metering_label_rule, + metering_label_rule.MeteringLabelRule, + True, + ) def test_metering_label_rule_find(self): - self.verify_find(self.proxy.find_metering_label_rule, - metering_label_rule.MeteringLabelRule) + self.verify_find( + self.proxy.find_metering_label_rule, + metering_label_rule.MeteringLabelRule, + ) def test_metering_label_rule_get(self): - self.verify_get(self.proxy.get_metering_label_rule, - metering_label_rule.MeteringLabelRule) + self.verify_get( + self.proxy.get_metering_label_rule, + metering_label_rule.MeteringLabelRule, + ) def test_metering_label_rules(self): - self.verify_list(self.proxy.metering_label_rules, - metering_label_rule.MeteringLabelRule, - paginated=False) + self.verify_list( + self.proxy.metering_label_rules, + metering_label_rule.MeteringLabelRule, + ) def test_metering_label_rule_update(self): - self.verify_update(self.proxy.update_metering_label_rule, - metering_label_rule.MeteringLabelRule) + self.verify_update( + self.proxy.update_metering_label_rule, + metering_label_rule.MeteringLabelRule, + ) + +class TestNetworkNetwork(TestNetworkProxy): def test_network_create_attrs(self): self.verify_create(self.proxy.create_network, network.Network) def test_network_delete(self): - self.verify_delete(self.proxy.delete_network, network.Network, False) + self.verify_delete( + self.proxy.delete_network, + network.Network, + False, + expected_kwargs={'if_revision': None}, + ) def test_network_delete_ignore(self): - self.verify_delete(self.proxy.delete_network, network.Network, True) + self.verify_delete( + self.proxy.delete_network, + network.Network, + True, + expected_kwargs={'if_revision': None}, + ) + + def test_network_delete_if_revision(self): + self.verify_delete( + self.proxy.delete_network, + network.Network, + True, + method_kwargs={'if_revision': 42}, + expected_kwargs={'if_revision': 42}, + ) def test_network_find(self): self.verify_find(self.proxy.find_network, network.Network) + def test_network_find_with_filter(self): + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_network, + method_args=["net1"], + method_kwargs={"project_id": "1"}, + expected_args=[network.Network, "net1"], + expected_kwargs={"project_id": "1", "ignore_missing": True}, + ) + def test_network_get(self): self.verify_get(self.proxy.get_network, network.Network) def test_networks(self): - self.verify_list(self.proxy.networks, network.Network, - paginated=False) + self.verify_list(self.proxy.networks, network.Network) def test_network_update(self): - self.verify_update(self.proxy.update_network, network.Network) + self.verify_update( + self.proxy.update_network, + network.Network, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, + ) + def test_network_update_if_revision(self): + self.verify_update( + self.proxy.update_network, + network.Network, + method_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, + ) + + +class TestNetworkFlavor(TestNetworkProxy): def test_flavor_create_attrs(self): self.verify_create(self.proxy.create_flavor, flavor.Flavor) @@ -338,88 +602,251 @@ def test_flavor_update(self): self.verify_update(self.proxy.update_flavor, flavor.Flavor) def test_flavors(self): - self.verify_list(self.proxy.flavors, flavor.Flavor, - paginated=True) + self.verify_list(self.proxy.flavors, flavor.Flavor) + + +class TestNetworkLocalIp(TestNetworkProxy): + def test_local_ip_create_attrs(self): + self.verify_create(self.proxy.create_local_ip, local_ip.LocalIP) + + def test_local_ip_delete(self): + self.verify_delete( + self.proxy.delete_local_ip, + local_ip.LocalIP, + False, + expected_kwargs={'if_revision': None}, + ) + def test_local_ip_delete_ignore(self): + self.verify_delete( + self.proxy.delete_local_ip, + local_ip.LocalIP, + True, + expected_kwargs={'if_revision': None}, + ) + + def test_local_ip_delete_if_revision(self): + self.verify_delete( + self.proxy.delete_local_ip, + local_ip.LocalIP, + True, + method_kwargs={'if_revision': 42}, + expected_kwargs={'if_revision': 42}, + ) + + def test_local_ip_find(self): + self.verify_find(self.proxy.find_local_ip, local_ip.LocalIP) + + def test_local_ip_get(self): + self.verify_get(self.proxy.get_local_ip, local_ip.LocalIP) + + def test_local_ips(self): + self.verify_list(self.proxy.local_ips, local_ip.LocalIP) + + def test_local_ip_update(self): + self.verify_update( + self.proxy.update_local_ip, + local_ip.LocalIP, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, + ) + + def test_local_ip_update_if_revision(self): + self.verify_update( + self.proxy.update_local_ip, + local_ip.LocalIP, + method_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, + ) + + +class TestNetworkLocalIpAssociation(TestNetworkProxy): + def test_local_ip_association_create_attrs(self): + self.verify_create( + self.proxy.create_local_ip_association, + local_ip_association.LocalIPAssociation, + method_kwargs={'local_ip': LOCAL_IP_ID}, + expected_kwargs={'local_ip_id': LOCAL_IP_ID}, + ) + + def test_local_ip_association_delete(self): + self.verify_delete( + self.proxy.delete_local_ip_association, + local_ip_association.LocalIPAssociation, + ignore_missing=False, + method_args=[LOCAL_IP_ID, "resource_or_id"], + expected_args=["resource_or_id"], + expected_kwargs={'if_revision': None, 'local_ip_id': LOCAL_IP_ID}, + ) + + def test_local_ip_association_delete_ignore(self): + self.verify_delete( + self.proxy.delete_local_ip_association, + local_ip_association.LocalIPAssociation, + ignore_missing=True, + method_args=[LOCAL_IP_ID, "resource_or_id"], + expected_args=["resource_or_id"], + expected_kwargs={'if_revision': None, 'local_ip_id': LOCAL_IP_ID}, + ) + + def test_local_ip_association_find(self): + lip = local_ip.LocalIP.new(id=LOCAL_IP_ID) + + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_local_ip_association, + method_args=['local_ip_association_id', lip], + expected_args=[ + local_ip_association.LocalIPAssociation, + 'local_ip_association_id', + ], + expected_kwargs={ + 'ignore_missing': True, + 'local_ip_id': LOCAL_IP_ID, + }, + ) + + def test_local_ip_association_get(self): + lip = local_ip.LocalIP.new(id=LOCAL_IP_ID) + + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_local_ip_association, + method_args=['local_ip_association_id', lip], + expected_args=[ + local_ip_association.LocalIPAssociation, + 'local_ip_association_id', + ], + expected_kwargs={'local_ip_id': LOCAL_IP_ID}, + ) + + def test_local_ip_associations(self): + self.verify_list( + self.proxy.local_ip_associations, + local_ip_association.LocalIPAssociation, + method_kwargs={'local_ip': LOCAL_IP_ID}, + expected_kwargs={'local_ip_id': LOCAL_IP_ID}, + ) + + +class TestNetworkServiceProfile(TestNetworkProxy): def test_service_profile_create_attrs(self): - self.verify_create(self.proxy.create_service_profile, - service_profile.ServiceProfile) + self.verify_create( + self.proxy.create_service_profile, service_profile.ServiceProfile + ) def test_service_profile_delete(self): - self.verify_delete(self.proxy.delete_service_profile, - service_profile.ServiceProfile, True) + self.verify_delete( + self.proxy.delete_service_profile, + service_profile.ServiceProfile, + True, + ) def test_service_profile_find(self): - self.verify_find(self.proxy.find_service_profile, - service_profile.ServiceProfile) + self.verify_find( + self.proxy.find_service_profile, service_profile.ServiceProfile + ) def test_service_profile_get(self): - self.verify_get(self.proxy.get_service_profile, - service_profile.ServiceProfile) + self.verify_get( + self.proxy.get_service_profile, service_profile.ServiceProfile + ) def test_service_profiles(self): - self.verify_list(self.proxy.service_profiles, - service_profile.ServiceProfile, paginated=True) + self.verify_list( + self.proxy.service_profiles, service_profile.ServiceProfile + ) def test_service_profile_update(self): - self.verify_update(self.proxy.update_service_profile, - service_profile.ServiceProfile) + self.verify_update( + self.proxy.update_service_profile, service_profile.ServiceProfile + ) + +class TestNetworkIpAvailability(TestNetworkProxy): def test_network_ip_availability_find(self): - self.verify_find(self.proxy.find_network_ip_availability, - network_ip_availability.NetworkIPAvailability) + self.verify_find( + self.proxy.find_network_ip_availability, + network_ip_availability.NetworkIPAvailability, + ) def test_network_ip_availability_get(self): - self.verify_get(self.proxy.get_network_ip_availability, - network_ip_availability.NetworkIPAvailability) + self.verify_get( + self.proxy.get_network_ip_availability, + network_ip_availability.NetworkIPAvailability, + ) def test_network_ip_availabilities(self): - self.verify_list(self.proxy.network_ip_availabilities, - network_ip_availability.NetworkIPAvailability) + self.verify_list( + self.proxy.network_ip_availabilities, + network_ip_availability.NetworkIPAvailability, + ) def test_pool_member_create_attrs(self): - self.verify_create(self.proxy.create_pool_member, - pool_member.PoolMember, - method_kwargs={"pool": "test_id"}, - expected_kwargs={"pool_id": "test_id"}) + self.verify_create( + self.proxy.create_pool_member, + pool_member.PoolMember, + method_kwargs={"pool": "test_id"}, + expected_kwargs={"pool_id": "test_id"}, + ) + +class TestNetworkPoolMember(TestNetworkProxy): def test_pool_member_delete(self): - self.verify_delete(self.proxy.delete_pool_member, - pool_member.PoolMember, False, - {"pool": "test_id"}, {"pool_id": "test_id"}) + self.verify_delete( + self.proxy.delete_pool_member, + pool_member.PoolMember, + ignore_missing=False, + method_kwargs={"pool": "test_id"}, + expected_kwargs={"pool_id": "test_id"}, + ) def test_pool_member_delete_ignore(self): - self.verify_delete(self.proxy.delete_pool_member, - pool_member.PoolMember, True, - {"pool": "test_id"}, {"pool_id": "test_id"}) + self.verify_delete( + self.proxy.delete_pool_member, + pool_member.PoolMember, + ignore_missing=True, + method_kwargs={"pool": "test_id"}, + expected_kwargs={"pool_id": "test_id"}, + ) def test_pool_member_find(self): - self._verify2('openstack.proxy2.BaseProxy._find', - self.proxy.find_pool_member, - method_args=["MEMBER", "POOL"], - expected_args=[pool_member.PoolMember, "MEMBER"], - expected_kwargs={"pool_id": "POOL", - "ignore_missing": True}) + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_pool_member, + method_args=["MEMBER", "POOL"], + expected_args=[pool_member.PoolMember, "MEMBER"], + expected_kwargs={"pool_id": "POOL", "ignore_missing": True}, + ) def test_pool_member_get(self): - self._verify2('openstack.proxy2.BaseProxy._get', - self.proxy.get_pool_member, - method_args=["MEMBER", "POOL"], - expected_args=[pool_member.PoolMember, "MEMBER"], - expected_kwargs={"pool_id": "POOL"}) + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_pool_member, + method_args=["MEMBER", "POOL"], + expected_args=[pool_member.PoolMember, "MEMBER"], + expected_kwargs={"pool_id": "POOL"}, + ) def test_pool_members(self): - self.verify_list(self.proxy.pool_members, pool_member.PoolMember, - paginated=False, method_args=["test_id"], - expected_kwargs={"pool_id": "test_id"}) + self.verify_list( + self.proxy.pool_members, + pool_member.PoolMember, + method_args=["test_id"], + expected_args=[], + expected_kwargs={"pool_id": "test_id"}, + ) def test_pool_member_update(self): - self._verify2("openstack.proxy2.BaseProxy._update", - self.proxy.update_pool_member, - method_args=["MEMBER", "POOL"], - expected_args=[pool_member.PoolMember, "MEMBER"], - expected_kwargs={"pool_id": "POOL"}) + self._verify( + "openstack.network.v2._proxy.Proxy._update", + self.proxy.update_pool_member, + method_args=["MEMBER", "POOL"], + expected_args=[pool_member.PoolMember, "MEMBER"], + expected_kwargs={"pool_id": "POOL"}, + ) + +class TestNetworkPool(TestNetworkProxy): def test_pool_create_attrs(self): self.verify_create(self.proxy.create_pool, pool.Pool) @@ -436,7 +863,7 @@ def test_pool_get(self): self.verify_get(self.proxy.get_pool, pool.Pool) def test_pools(self): - self.verify_list(self.proxy.pools, pool.Pool, paginated=False) + self.verify_list(self.proxy.pools, pool.Pool) def test_pool_update(self): self.verify_update(self.proxy.update_pool, pool.Pool) @@ -445,10 +872,29 @@ def test_port_create_attrs(self): self.verify_create(self.proxy.create_port, port.Port) def test_port_delete(self): - self.verify_delete(self.proxy.delete_port, port.Port, False) + self.verify_delete( + self.proxy.delete_port, + port.Port, + False, + expected_kwargs={'if_revision': None}, + ) def test_port_delete_ignore(self): - self.verify_delete(self.proxy.delete_port, port.Port, True) + self.verify_delete( + self.proxy.delete_port, + port.Port, + True, + expected_kwargs={'if_revision': None}, + ) + + def test_port_delete_if_revision(self): + self.verify_delete( + self.proxy.delete_port, + port.Port, + True, + method_kwargs={'if_revision': 42}, + expected_kwargs={'if_revision': 42}, + ) def test_port_find(self): self.verify_find(self.proxy.find_port, port.Port) @@ -457,252 +903,486 @@ def test_port_get(self): self.verify_get(self.proxy.get_port, port.Port) def test_ports(self): - self.verify_list(self.proxy.ports, port.Port, paginated=False) + self.verify_list(self.proxy.ports, port.Port) def test_port_update(self): - self.verify_update(self.proxy.update_port, port.Port) + self.verify_update( + self.proxy.update_port, + port.Port, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, + ) + + def test_port_update_if_revision(self): + self.verify_update( + self.proxy.update_port, + port.Port, + method_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, + ) + + @mock.patch('openstack.network.v2._proxy.Proxy._bulk_create') + def test_ports_create(self, bc): + data = mock.sentinel + + self.proxy.create_ports(data) + bc.assert_called_once_with(port.Port, data) + + +class TestNetworkQosBandwidth(TestNetworkProxy): def test_qos_bandwidth_limit_rule_create_attrs(self): self.verify_create( self.proxy.create_qos_bandwidth_limit_rule, qos_bandwidth_limit_rule.QoSBandwidthLimitRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, - expected_kwargs={'qos_policy_id': QOS_POLICY_ID}) + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_bandwidth_limit_rule_delete(self): self.verify_delete( self.proxy.delete_qos_bandwidth_limit_rule, qos_bandwidth_limit_rule.QoSBandwidthLimitRule, - False, input_path_args=["resource_or_id", QOS_POLICY_ID], - expected_kwargs={'qos_policy_id': QOS_POLICY_ID}) + ignore_missing=False, + method_args=["resource_or_id", QOS_POLICY_ID], + expected_args=["resource_or_id"], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_bandwidth_limit_rule_delete_ignore(self): self.verify_delete( self.proxy.delete_qos_bandwidth_limit_rule, qos_bandwidth_limit_rule.QoSBandwidthLimitRule, - True, input_path_args=["resource_or_id", QOS_POLICY_ID], - expected_kwargs={'qos_policy_id': QOS_POLICY_ID}) + ignore_missing=True, + method_args=["resource_or_id", QOS_POLICY_ID], + expected_args=["resource_or_id"], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_bandwidth_limit_rule_find(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) - self._verify2('openstack.proxy2.BaseProxy._find', - self.proxy.find_qos_bandwidth_limit_rule, - method_args=['rule_id', policy], - expected_args=[ - qos_bandwidth_limit_rule.QoSBandwidthLimitRule, - 'rule_id'], - expected_kwargs={'ignore_missing': True, - 'qos_policy_id': QOS_POLICY_ID}) + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_qos_bandwidth_limit_rule, + method_args=['rule_id', policy], + expected_args=[ + qos_bandwidth_limit_rule.QoSBandwidthLimitRule, + 'rule_id', + ], + expected_kwargs={ + 'ignore_missing': True, + 'qos_policy_id': QOS_POLICY_ID, + }, + ) def test_qos_bandwidth_limit_rule_get(self): self.verify_get( self.proxy.get_qos_bandwidth_limit_rule, qos_bandwidth_limit_rule.QoSBandwidthLimitRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, - expected_kwargs={'qos_policy_id': QOS_POLICY_ID}) + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_bandwidth_limit_rules(self): self.verify_list( self.proxy.qos_bandwidth_limit_rules, qos_bandwidth_limit_rule.QoSBandwidthLimitRule, - paginated=False, method_kwargs={'qos_policy': QOS_POLICY_ID}, - expected_kwargs={'qos_policy_id': QOS_POLICY_ID}) + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_bandwidth_limit_rule_update(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) - self._verify2('openstack.proxy2.BaseProxy._update', - self.proxy.update_qos_bandwidth_limit_rule, - method_args=['rule_id', policy], - method_kwargs={'foo': 'bar'}, - expected_args=[ - qos_bandwidth_limit_rule.QoSBandwidthLimitRule, - 'rule_id'], - expected_kwargs={'qos_policy_id': QOS_POLICY_ID, - 'foo': 'bar'}) + self._verify( + 'openstack.network.v2._proxy.Proxy._update', + self.proxy.update_qos_bandwidth_limit_rule, + method_args=['rule_id', policy], + method_kwargs={'foo': 'bar'}, + expected_args=[ + qos_bandwidth_limit_rule.QoSBandwidthLimitRule, + 'rule_id', + ], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID, 'foo': 'bar'}, + ) + +class TestNetworkQosDscpMarking(TestNetworkProxy): def test_qos_dscp_marking_rule_create_attrs(self): self.verify_create( self.proxy.create_qos_dscp_marking_rule, qos_dscp_marking_rule.QoSDSCPMarkingRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, - expected_kwargs={'qos_policy_id': QOS_POLICY_ID}) + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_dscp_marking_rule_delete(self): self.verify_delete( self.proxy.delete_qos_dscp_marking_rule, qos_dscp_marking_rule.QoSDSCPMarkingRule, - False, input_path_args=["resource_or_id", QOS_POLICY_ID], - expected_path_args={'qos_policy_id': QOS_POLICY_ID},) + ignore_missing=False, + method_args=["resource_or_id", QOS_POLICY_ID], + expected_args=["resource_or_id"], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_dscp_marking_rule_delete_ignore(self): self.verify_delete( self.proxy.delete_qos_dscp_marking_rule, qos_dscp_marking_rule.QoSDSCPMarkingRule, - True, input_path_args=["resource_or_id", QOS_POLICY_ID], - expected_path_args={'qos_policy_id': QOS_POLICY_ID}, ) + ignore_missing=True, + method_args=["resource_or_id", QOS_POLICY_ID], + expected_args=["resource_or_id"], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_dscp_marking_rule_find(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) - self._verify2('openstack.proxy2.BaseProxy._find', - self.proxy.find_qos_dscp_marking_rule, - method_args=['rule_id', policy], - expected_args=[qos_dscp_marking_rule.QoSDSCPMarkingRule, - 'rule_id'], - expected_kwargs={'ignore_missing': True, - 'qos_policy_id': QOS_POLICY_ID}) + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_qos_dscp_marking_rule, + method_args=['rule_id', policy], + expected_args=[ + qos_dscp_marking_rule.QoSDSCPMarkingRule, + 'rule_id', + ], + expected_kwargs={ + 'ignore_missing': True, + 'qos_policy_id': QOS_POLICY_ID, + }, + ) def test_qos_dscp_marking_rule_get(self): self.verify_get( self.proxy.get_qos_dscp_marking_rule, qos_dscp_marking_rule.QoSDSCPMarkingRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, - expected_kwargs={'qos_policy_id': QOS_POLICY_ID}) + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_dscp_marking_rules(self): self.verify_list( self.proxy.qos_dscp_marking_rules, qos_dscp_marking_rule.QoSDSCPMarkingRule, - paginated=False, method_kwargs={'qos_policy': QOS_POLICY_ID}, - expected_kwargs={'qos_policy_id': QOS_POLICY_ID}) + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_dscp_marking_rule_update(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) - self._verify2('openstack.proxy2.BaseProxy._update', - self.proxy.update_qos_dscp_marking_rule, - method_args=['rule_id', policy], - method_kwargs={'foo': 'bar'}, - expected_args=[ - qos_dscp_marking_rule.QoSDSCPMarkingRule, - 'rule_id'], - expected_kwargs={'qos_policy_id': QOS_POLICY_ID, - 'foo': 'bar'}) + self._verify( + 'openstack.network.v2._proxy.Proxy._update', + self.proxy.update_qos_dscp_marking_rule, + method_args=['rule_id', policy], + method_kwargs={'foo': 'bar'}, + expected_args=[ + qos_dscp_marking_rule.QoSDSCPMarkingRule, + 'rule_id', + ], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID, 'foo': 'bar'}, + ) + +class TestNetworkQosMinimumBandwidth(TestNetworkProxy): def test_qos_minimum_bandwidth_rule_create_attrs(self): self.verify_create( self.proxy.create_qos_minimum_bandwidth_rule, qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, - expected_kwargs={'qos_policy_id': QOS_POLICY_ID}) + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_minimum_bandwidth_rule_delete(self): self.verify_delete( self.proxy.delete_qos_minimum_bandwidth_rule, qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, - False, input_path_args=["resource_or_id", QOS_POLICY_ID], - expected_path_args={'qos_policy_id': QOS_POLICY_ID},) + ignore_missing=False, + method_args=["resource_or_id", QOS_POLICY_ID], + expected_args=["resource_or_id"], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_minimum_bandwidth_rule_delete_ignore(self): self.verify_delete( self.proxy.delete_qos_minimum_bandwidth_rule, qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, - True, input_path_args=["resource_or_id", QOS_POLICY_ID], - expected_path_args={'qos_policy_id': QOS_POLICY_ID}, ) + ignore_missing=True, + method_args=["resource_or_id", QOS_POLICY_ID], + expected_args=["resource_or_id"], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_minimum_bandwidth_rule_find(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) - self._verify2('openstack.proxy2.BaseProxy._find', - self.proxy.find_qos_minimum_bandwidth_rule, - method_args=['rule_id', policy], - expected_args=[ - qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, - 'rule_id'], - expected_kwargs={'ignore_missing': True, - 'qos_policy_id': QOS_POLICY_ID}) + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_qos_minimum_bandwidth_rule, + method_args=['rule_id', policy], + expected_args=[ + qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, + 'rule_id', + ], + expected_kwargs={ + 'ignore_missing': True, + 'qos_policy_id': QOS_POLICY_ID, + }, + ) def test_qos_minimum_bandwidth_rule_get(self): self.verify_get( self.proxy.get_qos_minimum_bandwidth_rule, qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, method_kwargs={'qos_policy': QOS_POLICY_ID}, - expected_kwargs={'qos_policy_id': QOS_POLICY_ID}) + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_minimum_bandwidth_rules(self): self.verify_list( self.proxy.qos_minimum_bandwidth_rules, qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, - paginated=False, method_kwargs={'qos_policy': QOS_POLICY_ID}, - expected_kwargs={'qos_policy_id': QOS_POLICY_ID}) + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) def test_qos_minimum_bandwidth_rule_update(self): policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) - self._verify2('openstack.proxy2.BaseProxy._update', - self.proxy.update_qos_minimum_bandwidth_rule, - method_args=['rule_id', policy], - method_kwargs={'foo': 'bar'}, - expected_args=[ - qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, - 'rule_id'], - expected_kwargs={'qos_policy_id': QOS_POLICY_ID, - 'foo': 'bar'}) + self._verify( + 'openstack.network.v2._proxy.Proxy._update', + self.proxy.update_qos_minimum_bandwidth_rule, + method_args=['rule_id', policy], + method_kwargs={'foo': 'bar'}, + expected_args=[ + qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule, + 'rule_id', + ], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID, 'foo': 'bar'}, + ) - def test_qos_policy_create_attrs(self): - self.verify_create(self.proxy.create_qos_policy, qos_policy.QoSPolicy) - def test_qos_policy_delete(self): - self.verify_delete(self.proxy.delete_qos_policy, qos_policy.QoSPolicy, - False) +class TestNetworkQosMinimumPacketRate(TestNetworkProxy): + def test_qos_minimum_packet_rate_rule_create_attrs(self): + self.verify_create( + self.proxy.create_qos_minimum_packet_rate_rule, + qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + method_kwargs={'qos_policy': QOS_POLICY_ID}, + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) - def test_qos_policy_delete_ignore(self): - self.verify_delete(self.proxy.delete_qos_policy, qos_policy.QoSPolicy, - True) + def test_qos_minimum_packet_rate_rule_delete(self): + self.verify_delete( + self.proxy.delete_qos_minimum_packet_rate_rule, + qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + ignore_missing=False, + method_args=["resource_or_id", QOS_POLICY_ID], + expected_args=["resource_or_id"], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) - def test_qos_policy_find(self): - self.verify_find(self.proxy.find_qos_policy, qos_policy.QoSPolicy) + def test_qos_minimum_packet_rate_rule_delete_ignore(self): + self.verify_delete( + self.proxy.delete_qos_minimum_packet_rate_rule, + qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + ignore_missing=True, + method_args=["resource_or_id", QOS_POLICY_ID], + expected_args=["resource_or_id"], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) - def test_qos_policy_get(self): - self.verify_get(self.proxy.get_qos_policy, qos_policy.QoSPolicy) + def test_qos_minimum_packet_rate_rule_find(self): + policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_qos_minimum_packet_rate_rule, + method_args=['rule_id', policy], + expected_args=[ + qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + 'rule_id', + ], + expected_kwargs={ + 'ignore_missing': True, + 'qos_policy_id': QOS_POLICY_ID, + }, + ) - def test_qos_policies(self): - self.verify_list(self.proxy.qos_policies, qos_policy.QoSPolicy, - paginated=False) + def test_qos_minimum_packet_rate_rule_get(self): + self.verify_get( + self.proxy.get_qos_minimum_packet_rate_rule, + qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + method_kwargs={'qos_policy': QOS_POLICY_ID}, + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) - def test_qos_policy_update(self): - self.verify_update(self.proxy.update_qos_policy, qos_policy.QoSPolicy) + def test_qos_minimum_packet_rate_rules(self): + self.verify_list( + self.proxy.qos_minimum_packet_rate_rules, + qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + method_kwargs={'qos_policy': QOS_POLICY_ID}, + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) - def test_qos_rule_types(self): - self.verify_list(self.proxy.qos_rule_types, qos_rule_type.QoSRuleType, - paginated=False) + def test_qos_minimum_packet_rate_rule_update(self): + policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) + self._verify( + 'openstack.network.v2._proxy.Proxy._update', + self.proxy.update_qos_minimum_packet_rate_rule, + method_args=['rule_id', policy], + method_kwargs={'foo': 'bar'}, + expected_args=[ + qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule, + 'rule_id', + ], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID, 'foo': 'bar'}, + ) - def test_quota_delete(self): - self.verify_delete(self.proxy.delete_quota, quota.Quota, False) - def test_quota_delete_ignore(self): - self.verify_delete(self.proxy.delete_quota, quota.Quota, True) +class TestNetworkQosPacketRateLimitRule(TestNetworkProxy): + def test_qos_packet_rate_limit_rule_create_attrs(self): + self.verify_create( + self.proxy.create_qos_packet_rate_limit_rule, + qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + method_kwargs={'qos_policy': QOS_POLICY_ID}, + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) - def test_quota_get(self): - self.verify_get(self.proxy.get_quota, quota.Quota) + def test_qos_packet_rate_limit_rule_delete(self): + self.verify_delete( + self.proxy.delete_qos_packet_rate_limit_rule, + qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + ignore_missing=False, + method_args=["resource_or_id", QOS_POLICY_ID], + expected_args=["resource_or_id"], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) - @mock.patch.object(proxy_base2.BaseProxy, "_get_resource") - def test_quota_default_get(self, mock_get): - fake_quota = mock.Mock(project_id='PROJECT') + def test_qos_packet_rate_limit_rule_delete_ignore(self): + self.verify_delete( + self.proxy.delete_qos_packet_rate_limit_rule, + qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + ignore_missing=True, + method_args=["resource_or_id", QOS_POLICY_ID], + expected_args=["resource_or_id"], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) + + def test_qos_packet_rate_limit_rule_find(self): + policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_qos_packet_rate_limit_rule, + method_args=['rule_id', policy], + expected_args=[ + qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + 'rule_id', + ], + expected_kwargs={ + 'ignore_missing': True, + 'qos_policy_id': QOS_POLICY_ID, + }, + ) + + def test_qos_packet_rate_limit_rule_get(self): + self.verify_get( + self.proxy.get_qos_packet_rate_limit_rule, + qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + method_kwargs={'qos_policy': QOS_POLICY_ID}, + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) + + def test_qos_packet_rate_limit_rules(self): + self.verify_list( + self.proxy.qos_packet_rate_limit_rules, + qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + method_kwargs={'qos_policy': QOS_POLICY_ID}, + expected_kwargs={'qos_policy_id': QOS_POLICY_ID}, + ) + + def test_qos_packet_rate_limit_rule_update(self): + policy = qos_policy.QoSPolicy.new(id=QOS_POLICY_ID) + self._verify( + 'openstack.network.v2._proxy.Proxy._update', + self.proxy.update_qos_packet_rate_limit_rule, + method_args=['rule_id', policy], + method_kwargs={'foo': 'bar'}, + expected_args=[ + qos_packet_rate_limit_rule.QoSPacketRateLimitRule, + 'rule_id', + ], + expected_kwargs={'qos_policy_id': QOS_POLICY_ID, 'foo': 'bar'}, + ) + + +class TestNetworkQosRuleType(TestNetworkProxy): + def test_qos_rule_type_find(self): + self.verify_find( + self.proxy.find_qos_rule_type, qos_rule_type.QoSRuleType + ) + + def test_qos_rule_type_get(self): + self.verify_get( + self.proxy.get_qos_rule_type, qos_rule_type.QoSRuleType + ) + + def test_qos_rule_types(self): + self.verify_list(self.proxy.qos_rule_types, qos_rule_type.QoSRuleType) + + +class TestNetworkQuota(TestNetworkProxy): + def test_quota_delete(self): + self.verify_delete(self.proxy.delete_quota, quota.Quota, False) + + def test_quota_delete_ignore(self): + self.verify_delete(self.proxy.delete_quota, quota.Quota, True) + + def test_quota_get(self): + self.verify_get(self.proxy.get_quota, quota.Quota) + + @mock.patch.object(proxy_base.Proxy, "_get_resource") + def test_quota_get_details(self, mock_get): + fake_quota = mock.Mock(project_id='PROJECT') mock_get.return_value = fake_quota - self._verify2("openstack.proxy2.BaseProxy._get", - self.proxy.get_quota_default, - method_args=['QUOTA_ID'], - expected_args=[quota.QuotaDefault], - expected_kwargs={'project': "PROJECT"}) + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_quota, + method_args=['QUOTA_ID'], + method_kwargs={'details': True}, + expected_args=[quota.QuotaDetails], + expected_kwargs={'project': fake_quota.id, 'requires_id': False}, + ) + mock_get.assert_called_once_with(quota.Quota, 'QUOTA_ID') + + @mock.patch.object(proxy_base.Proxy, "_get_resource") + def test_quota_default_get(self, mock_get): + fake_quota = mock.Mock(project_id='PROJECT') + mock_get.return_value = fake_quota + self._verify( + "openstack.proxy.Proxy._get", + self.proxy.get_quota_default, + method_args=['QUOTA_ID'], + expected_args=[quota.QuotaDefault], + expected_kwargs={'project': fake_quota.id, 'requires_id': False}, + ) mock_get.assert_called_once_with(quota.Quota, 'QUOTA_ID') def test_quotas(self): - self.verify_list(self.proxy.quotas, quota.Quota, paginated=False) + self.verify_list(self.proxy.quotas, quota.Quota) def test_quota_update(self): self.verify_update(self.proxy.update_quota, quota.Quota) + +class TestNetworkRbacPolicy(TestNetworkProxy): def test_rbac_policy_create_attrs(self): - self.verify_create(self.proxy.create_rbac_policy, - rbac_policy.RBACPolicy) + self.verify_create( + self.proxy.create_rbac_policy, rbac_policy.RBACPolicy + ) def test_rbac_policy_delete(self): - self.verify_delete(self.proxy.delete_rbac_policy, - rbac_policy.RBACPolicy, False) + self.verify_delete( + self.proxy.delete_rbac_policy, rbac_policy.RBACPolicy, False + ) def test_rbac_policy_delete_ignore(self): - self.verify_delete(self.proxy.delete_rbac_policy, - rbac_policy.RBACPolicy, True) + self.verify_delete( + self.proxy.delete_rbac_policy, rbac_policy.RBACPolicy, True + ) def test_rbac_policy_find(self): self.verify_find(self.proxy.find_rbac_policy, rbac_policy.RBACPolicy) @@ -711,21 +1391,42 @@ def test_rbac_policy_get(self): self.verify_get(self.proxy.get_rbac_policy, rbac_policy.RBACPolicy) def test_rbac_policies(self): - self.verify_list(self.proxy.rbac_policies, - rbac_policy.RBACPolicy, paginated=False) + self.verify_list(self.proxy.rbac_policies, rbac_policy.RBACPolicy) def test_rbac_policy_update(self): - self.verify_update(self.proxy.update_rbac_policy, - rbac_policy.RBACPolicy) + self.verify_update( + self.proxy.update_rbac_policy, rbac_policy.RBACPolicy + ) + +class TestNetworkRouter(TestNetworkProxy): def test_router_create_attrs(self): self.verify_create(self.proxy.create_router, router.Router) def test_router_delete(self): - self.verify_delete(self.proxy.delete_router, router.Router, False) + self.verify_delete( + self.proxy.delete_router, + router.Router, + False, + expected_kwargs={'if_revision': None}, + ) def test_router_delete_ignore(self): - self.verify_delete(self.proxy.delete_router, router.Router, True) + self.verify_delete( + self.proxy.delete_router, + router.Router, + True, + expected_kwargs={'if_revision': None}, + ) + + def test_router_delete_if_revision(self): + self.verify_delete( + self.proxy.delete_router, + router.Router, + True, + method_kwargs={'if_revision': 42}, + expected_kwargs={'if_revision': 42}, + ) def test_router_find(self): self.verify_find(self.proxy.find_router, router.Router) @@ -734,106 +1435,503 @@ def test_router_get(self): self.verify_get(self.proxy.get_router, router.Router) def test_routers(self): - self.verify_list(self.proxy.routers, router.Router, paginated=False) + self.verify_list(self.proxy.routers, router.Router) def test_router_update(self): - self.verify_update(self.proxy.update_router, router.Router) + self.verify_update( + self.proxy.update_router, + router.Router, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, + ) + + def test_router_update_if_revision(self): + self.verify_update( + self.proxy.update_router, + router.Router, + method_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, + ) + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + @mock.patch.object(router.Router, 'add_interface') + def test_add_interface_to_router_with_port( + self, mock_add_interface, mock_get + ): + x_router = router.Router.new(id="ROUTER_ID") + mock_get.return_value = x_router + + self._verify( + "openstack.network.v2.router.Router.add_interface", + self.proxy.add_interface_to_router, + method_args=["FAKE_ROUTER"], + method_kwargs={"port_id": "PORT"}, + expected_args=[self.proxy], + expected_kwargs={"port_id": "PORT"}, + ) + mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + @mock.patch.object(router.Router, 'add_interface') + def test_add_interface_to_router_with_subnet( + self, mock_add_interface, mock_get + ): + x_router = router.Router.new(id="ROUTER_ID") + mock_get.return_value = x_router + + self._verify( + "openstack.network.v2.router.Router.add_interface", + self.proxy.add_interface_to_router, + method_args=["FAKE_ROUTER"], + method_kwargs={"subnet_id": "SUBNET"}, + expected_args=[self.proxy], + expected_kwargs={"subnet_id": "SUBNET"}, + ) + mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + @mock.patch.object(router.Router, 'remove_interface') + def test_remove_interface_from_router_with_port( + self, mock_remove, mock_get + ): + x_router = router.Router.new(id="ROUTER_ID") + mock_get.return_value = x_router + + self._verify( + "openstack.network.v2.router.Router.remove_interface", + self.proxy.remove_interface_from_router, + method_args=["FAKE_ROUTER"], + method_kwargs={"port_id": "PORT"}, + expected_args=[self.proxy], + expected_kwargs={"port_id": "PORT"}, + ) + mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + @mock.patch.object(router.Router, 'remove_interface') + def test_remove_interface_from_router_with_subnet( + self, mock_remove, mock_get + ): + x_router = router.Router.new(id="ROUTER_ID") + mock_get.return_value = x_router + + self._verify( + "openstack.network.v2.router.Router.remove_interface", + self.proxy.remove_interface_from_router, + method_args=["FAKE_ROUTER"], + method_kwargs={"subnet_id": "SUBNET"}, + expected_args=[self.proxy], + expected_kwargs={"subnet_id": "SUBNET"}, + ) + mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + @mock.patch.object(router.Router, 'add_extra_routes') + def test_add_extra_routes_to_router(self, mock_add_extra_routes, mock_get): + x_router = router.Router.new(id="ROUTER_ID") + mock_get.return_value = x_router + + self._verify( + "openstack.network.v2.router.Router.add_extra_routes", + self.proxy.add_extra_routes_to_router, + method_args=["FAKE_ROUTER"], + method_kwargs={"body": {"router": {"routes": []}}}, + expected_args=[self.proxy], + expected_kwargs={"body": {"router": {"routes": []}}}, + ) + mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + @mock.patch.object(router.Router, 'remove_extra_routes') + def test_remove_extra_routes_from_router( + self, mock_remove_extra_routes, mock_get + ): + x_router = router.Router.new(id="ROUTER_ID") + mock_get.return_value = x_router + + self._verify( + "openstack.network.v2.router.Router.remove_extra_routes", + self.proxy.remove_extra_routes_from_router, + method_args=["FAKE_ROUTER"], + method_kwargs={"body": {"router": {"routes": []}}}, + expected_args=[self.proxy], + expected_kwargs={"body": {"router": {"routes": []}}}, + ) + mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + @mock.patch.object(router.Router, 'add_gateway') + def test_add_gateway_to_router(self, mock_add, mock_get): + x_router = router.Router.new(id="ROUTER_ID") + mock_get.return_value = x_router + + self._verify( + "openstack.network.v2.router.Router.add_gateway", + self.proxy.add_gateway_to_router, + method_args=["FAKE_ROUTER"], + method_kwargs={"foo": "bar"}, + expected_args=[self.proxy], + expected_kwargs={"foo": "bar"}, + ) + mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + @mock.patch.object(router.Router, 'remove_gateway') + def test_remove_gateway_from_router(self, mock_remove, mock_get): + x_router = router.Router.new(id="ROUTER_ID") + mock_get.return_value = x_router + + self._verify( + "openstack.network.v2.router.Router.remove_gateway", + self.proxy.remove_gateway_from_router, + method_args=["FAKE_ROUTER"], + method_kwargs={"foo": "bar"}, + expected_args=[self.proxy], + expected_kwargs={"foo": "bar"}, + ) + mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + @mock.patch.object(router.Router, 'add_external_gateways') + def test_add_external_gateways(self, mock_add, mock_get): + x_router = router.Router.new(id="ROUTER_ID") + mock_get.return_value = x_router + + self._verify( + "openstack.network.v2.router.Router.add_external_gateways", + self.proxy.add_external_gateways, + method_args=["FAKE_ROUTER", "bar"], + expected_args=[self.proxy, "bar"], + ) + mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + @mock.patch.object(router.Router, 'update_external_gateways') + def test_update_external_gateways(self, mock_remove, mock_get): + x_router = router.Router.new(id="ROUTER_ID") + mock_get.return_value = x_router + + self._verify( + "openstack.network.v2.router.Router.update_external_gateways", + self.proxy.update_external_gateways, + method_args=["FAKE_ROUTER", "bar"], + expected_args=[self.proxy, "bar"], + ) + mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") + + @mock.patch.object(proxy_base.Proxy, '_get_resource') + @mock.patch.object(router.Router, 'remove_external_gateways') + def test_remove_external_gateways(self, mock_remove, mock_get): + x_router = router.Router.new(id="ROUTER_ID") + mock_get.return_value = x_router + + self._verify( + "openstack.network.v2.router.Router.remove_external_gateways", + self.proxy.remove_external_gateways, + method_args=["FAKE_ROUTER", "bar"], + expected_args=[self.proxy, "bar"], + ) + mock_get.assert_called_once_with(router.Router, "FAKE_ROUTER") + + def test_router_hosting_l3_agents_list(self): + self.verify_list( + self.proxy.routers_hosting_l3_agents, + agent.RouterL3Agent, + method_kwargs={'router': ROUTER_ID}, + expected_kwargs={'router_id': ROUTER_ID}, + ) + + def test_agent_hosted_routers_list(self): + self.verify_list( + self.proxy.agent_hosted_routers, + router.L3AgentRouter, + method_kwargs={'agent': AGENT_ID}, + expected_kwargs={'agent_id': AGENT_ID}, + ) + + +class TestNetworkFirewallGroup(TestNetworkProxy): + def test_firewall_group_create_attrs(self): + self.verify_create( + self.proxy.create_firewall_group, firewall_group.FirewallGroup + ) + + def test_firewall_group_delete(self): + self.verify_delete( + self.proxy.delete_firewall_group, + firewall_group.FirewallGroup, + False, + ) + + def test_firewall_group_delete_ignore(self): + self.verify_delete( + self.proxy.delete_firewall_group, + firewall_group.FirewallGroup, + True, + ) + + def test_firewall_group_find(self): + self.verify_find( + self.proxy.find_firewall_group, firewall_group.FirewallGroup + ) + + def test_firewall_group_get(self): + self.verify_get( + self.proxy.get_firewall_group, firewall_group.FirewallGroup + ) + + def test_firewall_groups(self): + self.verify_list( + self.proxy.firewall_groups, firewall_group.FirewallGroup + ) + + def test_firewall_group_update(self): + self.verify_update( + self.proxy.update_firewall_group, firewall_group.FirewallGroup + ) + + +class TestNetworkPolicy(TestNetworkProxy): + def test_firewall_policy_create_attrs(self): + self.verify_create( + self.proxy.create_firewall_policy, firewall_policy.FirewallPolicy + ) + + def test_firewall_policy_delete(self): + self.verify_delete( + self.proxy.delete_firewall_policy, + firewall_policy.FirewallPolicy, + False, + ) + + def test_firewall_policy_delete_ignore(self): + self.verify_delete( + self.proxy.delete_firewall_policy, + firewall_policy.FirewallPolicy, + True, + ) + + def test_firewall_policy_find(self): + self.verify_find( + self.proxy.find_firewall_policy, firewall_policy.FirewallPolicy + ) + + def test_firewall_policy_get(self): + self.verify_get( + self.proxy.get_firewall_policy, firewall_policy.FirewallPolicy + ) + + def test_firewall_policies(self): + self.verify_list( + self.proxy.firewall_policies, firewall_policy.FirewallPolicy + ) + + def test_firewall_policy_update(self): + self.verify_update( + self.proxy.update_firewall_policy, firewall_policy.FirewallPolicy + ) + + +class TestNetworkRule(TestNetworkProxy): + def test_firewall_rule_create_attrs(self): + self.verify_create( + self.proxy.create_firewall_rule, firewall_rule.FirewallRule + ) + + def test_firewall_rule_delete(self): + self.verify_delete( + self.proxy.delete_firewall_rule, firewall_rule.FirewallRule, False + ) + + def test_firewall_rule_delete_ignore(self): + self.verify_delete( + self.proxy.delete_firewall_rule, firewall_rule.FirewallRule, True + ) + + def test_firewall_rule_find(self): + self.verify_find( + self.proxy.find_firewall_rule, firewall_rule.FirewallRule + ) + def test_firewall_rule_get(self): + self.verify_get( + self.proxy.get_firewall_rule, firewall_rule.FirewallRule + ) + + def test_firewall_rules(self): + self.verify_list(self.proxy.firewall_rules, firewall_rule.FirewallRule) + + def test_firewall_rule_update(self): + self.verify_update( + self.proxy.update_firewall_rule, firewall_rule.FirewallRule + ) + + +class TestNetworkNetworkSegment(TestNetworkProxy): + def test_network_segment_range_create_attrs(self): + self.verify_create( + self.proxy.create_network_segment_range, + network_segment_range.NetworkSegmentRange, + ) + + def test_network_segment_range_delete(self): + self.verify_delete( + self.proxy.delete_network_segment_range, + network_segment_range.NetworkSegmentRange, + False, + ) + + def test_network_segment_range_delete_ignore(self): + self.verify_delete( + self.proxy.delete_network_segment_range, + network_segment_range.NetworkSegmentRange, + True, + ) + + def test_network_segment_range_find(self): + self.verify_find( + self.proxy.find_network_segment_range, + network_segment_range.NetworkSegmentRange, + ) + + def test_network_segment_range_get(self): + self.verify_get( + self.proxy.get_network_segment_range, + network_segment_range.NetworkSegmentRange, + ) + + def test_network_segment_ranges(self): + self.verify_list( + self.proxy.network_segment_ranges, + network_segment_range.NetworkSegmentRange, + ) + + def test_network_segment_range_update(self): + self.verify_update( + self.proxy.update_network_segment_range, + network_segment_range.NetworkSegmentRange, + ) + + +class TestNetworkSecurityGroup(TestNetworkProxy): def test_security_group_create_attrs(self): - self.verify_create(self.proxy.create_security_group, - security_group.SecurityGroup) + self.verify_create( + self.proxy.create_security_group, security_group.SecurityGroup + ) def test_security_group_delete(self): - self.verify_delete(self.proxy.delete_security_group, - security_group.SecurityGroup, False) + self.verify_delete( + self.proxy.delete_security_group, + security_group.SecurityGroup, + False, + expected_kwargs={'if_revision': None}, + ) def test_security_group_delete_ignore(self): - self.verify_delete(self.proxy.delete_security_group, - security_group.SecurityGroup, True) + self.verify_delete( + self.proxy.delete_security_group, + security_group.SecurityGroup, + True, + expected_kwargs={'if_revision': None}, + ) + + def test_security_group_delete_if_revision(self): + self.verify_delete( + self.proxy.delete_security_group, + security_group.SecurityGroup, + True, + method_kwargs={'if_revision': 42}, + expected_kwargs={'if_revision': 42}, + ) def test_security_group_find(self): - self.verify_find(self.proxy.find_security_group, - security_group.SecurityGroup) + self.verify_find( + self.proxy.find_security_group, security_group.SecurityGroup + ) def test_security_group_get(self): - self.verify_get(self.proxy.get_security_group, - security_group.SecurityGroup) + self.verify_get( + self.proxy.get_security_group, security_group.SecurityGroup + ) def test_security_groups(self): - self.verify_list(self.proxy.security_groups, - security_group.SecurityGroup, - paginated=False) + self.verify_list( + self.proxy.security_groups, security_group.SecurityGroup + ) def test_security_group_update(self): - self.verify_update(self.proxy.update_security_group, - security_group.SecurityGroup) - - def test_security_group_open_port(self): - mock_class = 'openstack.network.v2._proxy.Proxy' - mock_method = mock_class + '.create_security_group_rule' - expected_result = 'result' - sgid = 1 - port = 2 - with mock.patch(mock_method) as mocked: - mocked.return_value = expected_result - actual = self.proxy.security_group_open_port(sgid, port) - self.assertEqual(expected_result, actual) - expected_args = { - 'direction': 'ingress', - 'protocol': 'tcp', - 'remote_ip_prefix': '0.0.0.0/0', - 'port_range_max': port, - 'security_group_id': sgid, - 'port_range_min': port, - 'ethertype': 'IPv4', - } - mocked.assert_called_with(**expected_args) - - def test_security_group_allow_ping(self): - mock_class = 'openstack.network.v2._proxy.Proxy' - mock_method = mock_class + '.create_security_group_rule' - expected_result = 'result' - sgid = 1 - with mock.patch(mock_method) as mocked: - mocked.return_value = expected_result - actual = self.proxy.security_group_allow_ping(sgid) - self.assertEqual(expected_result, actual) - expected_args = { - 'direction': 'ingress', - 'protocol': 'icmp', - 'remote_ip_prefix': '0.0.0.0/0', - 'port_range_max': None, - 'security_group_id': sgid, - 'port_range_min': None, - 'ethertype': 'IPv4', - } - mocked.assert_called_with(**expected_args) + self.verify_update( + self.proxy.update_security_group, + security_group.SecurityGroup, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, + ) + + def test_security_group_update_if_revision(self): + self.verify_update( + self.proxy.update_security_group, + security_group.SecurityGroup, + method_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': 42}, + ) def test_security_group_rule_create_attrs(self): - self.verify_create(self.proxy.create_security_group_rule, - security_group_rule.SecurityGroupRule) + self.verify_create( + self.proxy.create_security_group_rule, + security_group_rule.SecurityGroupRule, + ) def test_security_group_rule_delete(self): - self.verify_delete(self.proxy.delete_security_group_rule, - security_group_rule.SecurityGroupRule, False) + self.verify_delete( + self.proxy.delete_security_group_rule, + security_group_rule.SecurityGroupRule, + False, + expected_kwargs={'if_revision': None}, + ) def test_security_group_rule_delete_ignore(self): - self.verify_delete(self.proxy.delete_security_group_rule, - security_group_rule.SecurityGroupRule, True) + self.verify_delete( + self.proxy.delete_security_group_rule, + security_group_rule.SecurityGroupRule, + True, + expected_kwargs={'if_revision': None}, + ) + + def test_security_group_rule_delete_if_revision(self): + self.verify_delete( + self.proxy.delete_security_group_rule, + security_group_rule.SecurityGroupRule, + True, + method_kwargs={'if_revision': 42}, + expected_kwargs={'if_revision': 42}, + ) def test_security_group_rule_find(self): - self.verify_find(self.proxy.find_security_group_rule, - security_group_rule.SecurityGroupRule) + self.verify_find( + self.proxy.find_security_group_rule, + security_group_rule.SecurityGroupRule, + ) def test_security_group_rule_get(self): - self.verify_get(self.proxy.get_security_group_rule, - security_group_rule.SecurityGroupRule) + self.verify_get( + self.proxy.get_security_group_rule, + security_group_rule.SecurityGroupRule, + ) def test_security_group_rules(self): - self.verify_list(self.proxy.security_group_rules, - security_group_rule.SecurityGroupRule, - paginated=False) + self.verify_list( + self.proxy.security_group_rules, + security_group_rule.SecurityGroupRule, + ) + + @mock.patch('openstack.network.v2._proxy.Proxy._bulk_create') + def test_security_group_rules_create(self, bc): + data = mock.sentinel + self.proxy.create_security_group_rules(data) + + bc.assert_called_once_with(security_group_rule.SecurityGroupRule, data) + + +class TestNetworkSegment(TestNetworkProxy): def test_segment_create_attrs(self): self.verify_create(self.proxy.create_segment, segment.Segment) @@ -850,19 +1948,40 @@ def test_segment_get(self): self.verify_get(self.proxy.get_segment, segment.Segment) def test_segments(self): - self.verify_list(self.proxy.segments, segment.Segment, paginated=False) + self.verify_list(self.proxy.segments, segment.Segment) def test_segment_update(self): self.verify_update(self.proxy.update_segment, segment.Segment) + +class TestNetworkSubnet(TestNetworkProxy): def test_subnet_create_attrs(self): self.verify_create(self.proxy.create_subnet, subnet.Subnet) def test_subnet_delete(self): - self.verify_delete(self.proxy.delete_subnet, subnet.Subnet, False) + self.verify_delete( + self.proxy.delete_subnet, + subnet.Subnet, + False, + expected_kwargs={'if_revision': None}, + ) def test_subnet_delete_ignore(self): - self.verify_delete(self.proxy.delete_subnet, subnet.Subnet, True) + self.verify_delete( + self.proxy.delete_subnet, + subnet.Subnet, + True, + expected_kwargs={'if_revision': None}, + ) + + def test_subnet_delete_if_revision(self): + self.verify_delete( + self.proxy.delete_subnet, + subnet.Subnet, + True, + method_kwargs={'if_revision': 42}, + expected_kwargs={'if_revision': 42}, + ) def test_subnet_find(self): self.verify_find(self.proxy.find_subnet, subnet.Subnet) @@ -871,90 +1990,803 @@ def test_subnet_get(self): self.verify_get(self.proxy.get_subnet, subnet.Subnet) def test_subnets(self): - self.verify_list(self.proxy.subnets, subnet.Subnet, paginated=False) + self.verify_list(self.proxy.subnets, subnet.Subnet) def test_subnet_update(self): - self.verify_update(self.proxy.update_subnet, subnet.Subnet) + self.verify_update( + self.proxy.update_subnet, + subnet.Subnet, + expected_kwargs={'x': 1, 'y': 2, 'z': 3, 'if_revision': None}, + ) def test_subnet_pool_create_attrs(self): - self.verify_create(self.proxy.create_subnet_pool, - subnet_pool.SubnetPool) + self.verify_create( + self.proxy.create_subnet_pool, subnet_pool.SubnetPool + ) def test_subnet_pool_delete(self): - self.verify_delete(self.proxy.delete_subnet_pool, - subnet_pool.SubnetPool, False) + self.verify_delete( + self.proxy.delete_subnet_pool, subnet_pool.SubnetPool, False + ) def test_subnet_pool_delete_ignore(self): - self.verify_delete(self.proxy.delete_subnet_pool, - subnet_pool.SubnetPool, True) + self.verify_delete( + self.proxy.delete_subnet_pool, subnet_pool.SubnetPool, True + ) def test_subnet_pool_find(self): - self.verify_find(self.proxy.find_subnet_pool, - subnet_pool.SubnetPool) + self.verify_find(self.proxy.find_subnet_pool, subnet_pool.SubnetPool) def test_subnet_pool_get(self): - self.verify_get(self.proxy.get_subnet_pool, - subnet_pool.SubnetPool) + self.verify_get(self.proxy.get_subnet_pool, subnet_pool.SubnetPool) def test_subnet_pools(self): - self.verify_list(self.proxy.subnet_pools, - subnet_pool.SubnetPool, - paginated=False) + self.verify_list(self.proxy.subnet_pools, subnet_pool.SubnetPool) def test_subnet_pool_update(self): - self.verify_update(self.proxy.update_subnet_pool, - subnet_pool.SubnetPool) + self.verify_update( + self.proxy.update_subnet_pool, subnet_pool.SubnetPool + ) + + +class TestNetworkVpnEndpointGroup(TestNetworkProxy): + def test_vpn_endpoint_group_create_attrs(self): + self.verify_create( + self.proxy.create_vpn_endpoint_group, + vpn_endpoint_group.VpnEndpointGroup, + ) + def test_vpn_endpoint_group_delete(self): + self.verify_delete( + self.proxy.delete_vpn_endpoint_group, + vpn_endpoint_group.VpnEndpointGroup, + False, + ) + + def test_vpn_endpoint_group_delete_ignore(self): + self.verify_delete( + self.proxy.delete_vpn_endpoint_group, + vpn_endpoint_group.VpnEndpointGroup, + True, + ) + + def test_vpn_endpoint_group_find(self): + self.verify_find( + self.proxy.find_vpn_endpoint_group, + vpn_endpoint_group.VpnEndpointGroup, + ) + + def test_vpn_endpoint_group_get(self): + self.verify_get( + self.proxy.get_vpn_endpoint_group, + vpn_endpoint_group.VpnEndpointGroup, + ) + + def test_vpn_endpoint_groups(self): + self.verify_list( + self.proxy.vpn_endpoint_groups, vpn_endpoint_group.VpnEndpointGroup + ) + + def test_vpn_endpoint_group_update(self): + self.verify_update( + self.proxy.update_vpn_endpoint_group, + vpn_endpoint_group.VpnEndpointGroup, + ) + + +class TestNetworkVpnSiteConnection(TestNetworkProxy): + def test_ipsec_site_connection_create_attrs(self): + self.verify_create( + self.proxy.create_vpn_ipsec_site_connection, + vpn_ipsec_site_connection.VpnIPSecSiteConnection, + ) + + def test_ipsec_site_connection_delete(self): + self.verify_delete( + self.proxy.delete_vpn_ipsec_site_connection, + vpn_ipsec_site_connection.VpnIPSecSiteConnection, + False, + ) + + def test_ipsec_site_connection_delete_ignore(self): + self.verify_delete( + self.proxy.delete_vpn_ipsec_site_connection, + vpn_ipsec_site_connection.VpnIPSecSiteConnection, + True, + ) + + def test_ipsec_site_connection_find(self): + self.verify_find( + self.proxy.find_vpn_ipsec_site_connection, + vpn_ipsec_site_connection.VpnIPSecSiteConnection, + ) + + def test_ipsec_site_connection_get(self): + self.verify_get( + self.proxy.get_vpn_ipsec_site_connection, + vpn_ipsec_site_connection.VpnIPSecSiteConnection, + ) + + def test_ipsec_site_connections(self): + self.verify_list( + self.proxy.vpn_ipsec_site_connections, + vpn_ipsec_site_connection.VpnIPSecSiteConnection, + ) + + def test_ipsec_site_connection_update(self): + self.verify_update( + self.proxy.update_vpn_ipsec_site_connection, + vpn_ipsec_site_connection.VpnIPSecSiteConnection, + ) + + +class TestNetworkVpnIkePolicy(TestNetworkProxy): + def test_ike_policy_create_attrs(self): + self.verify_create( + self.proxy.create_vpn_ike_policy, vpn_ike_policy.VpnIkePolicy + ) + + def test_ike_policy_delete(self): + self.verify_delete( + self.proxy.delete_vpn_ike_policy, + vpn_ike_policy.VpnIkePolicy, + False, + ) + + def test_ike_policy_delete_ignore(self): + self.verify_delete( + self.proxy.delete_vpn_ike_policy, vpn_ike_policy.VpnIkePolicy, True + ) + + def test_ike_policy_find(self): + self.verify_find( + self.proxy.find_vpn_ike_policy, vpn_ike_policy.VpnIkePolicy + ) + + def test_ike_policy_get(self): + self.verify_get( + self.proxy.get_vpn_ike_policy, vpn_ike_policy.VpnIkePolicy + ) + + def test_ike_policies(self): + self.verify_list( + self.proxy.vpn_ike_policies, vpn_ike_policy.VpnIkePolicy + ) + + def test_ike_policy_update(self): + self.verify_update( + self.proxy.update_vpn_ike_policy, vpn_ike_policy.VpnIkePolicy + ) + + +class TestNetworkVpnIpsecPolicy(TestNetworkProxy): + def test_ipsec_policy_create_attrs(self): + self.verify_create( + self.proxy.create_vpn_ipsec_policy, vpn_ipsec_policy.VpnIpsecPolicy + ) + + def test_ipsec_policy_delete(self): + self.verify_delete( + self.proxy.delete_vpn_ipsec_policy, + vpn_ipsec_policy.VpnIpsecPolicy, + False, + ) + + def test_ipsec_policy_delete_ignore(self): + self.verify_delete( + self.proxy.delete_vpn_ipsec_policy, + vpn_ipsec_policy.VpnIpsecPolicy, + True, + ) + + def test_ipsec_policy_find(self): + self.verify_find( + self.proxy.find_vpn_ipsec_policy, vpn_ipsec_policy.VpnIpsecPolicy + ) + + def test_ipsec_policy_get(self): + self.verify_get( + self.proxy.get_vpn_ipsec_policy, vpn_ipsec_policy.VpnIpsecPolicy + ) + + def test_ipsec_policies(self): + self.verify_list( + self.proxy.vpn_ipsec_policies, vpn_ipsec_policy.VpnIpsecPolicy + ) + + def test_ipsec_policy_update(self): + self.verify_update( + self.proxy.update_vpn_ipsec_policy, vpn_ipsec_policy.VpnIpsecPolicy + ) + + +class TestNetworkVpnService(TestNetworkProxy): def test_vpn_service_create_attrs(self): - self.verify_create(self.proxy.create_vpn_service, - vpn_service.VPNService) + self.verify_create( + self.proxy.create_vpn_service, vpn_service.VpnService + ) def test_vpn_service_delete(self): - self.verify_delete(self.proxy.delete_vpn_service, - vpn_service.VPNService, False) + self.verify_delete( + self.proxy.delete_vpn_service, vpn_service.VpnService, False + ) def test_vpn_service_delete_ignore(self): - self.verify_delete(self.proxy.delete_vpn_service, - vpn_service.VPNService, True) + self.verify_delete( + self.proxy.delete_vpn_service, vpn_service.VpnService, True + ) def test_vpn_service_find(self): - self.verify_find(self.proxy.find_vpn_service, - vpn_service.VPNService) + self.verify_find(self.proxy.find_vpn_service, vpn_service.VpnService) def test_vpn_service_get(self): - self.verify_get(self.proxy.get_vpn_service, vpn_service.VPNService) + self.verify_get(self.proxy.get_vpn_service, vpn_service.VpnService) def test_vpn_services(self): - self.verify_list(self.proxy.vpn_services, vpn_service.VPNService, - paginated=False) + self.verify_list(self.proxy.vpn_services, vpn_service.VpnService) def test_vpn_service_update(self): - self.verify_update(self.proxy.update_vpn_service, - vpn_service.VPNService) + self.verify_update( + self.proxy.update_vpn_service, vpn_service.VpnService + ) + +class TestNetworkServiceProvider(TestNetworkProxy): def test_service_provider(self): - self.verify_list(self.proxy.service_providers, - service_provider.ServiceProvider, - paginated=False) + self.verify_list( + self.proxy.service_providers, service_provider.ServiceProvider + ) + +class TestNetworkAutoAllocatedTopology(TestNetworkProxy): def test_auto_allocated_topology_get(self): - self.verify_get(self.proxy.get_auto_allocated_topology, - auto_allocated_topology.AutoAllocatedTopology) + self.verify_get( + self.proxy.get_auto_allocated_topology, + auto_allocated_topology.AutoAllocatedTopology, + ) def test_auto_allocated_topology_delete(self): - self.verify_delete(self.proxy.delete_auto_allocated_topology, - auto_allocated_topology.AutoAllocatedTopology, - False) + self.verify_delete( + self.proxy.delete_auto_allocated_topology, + auto_allocated_topology.AutoAllocatedTopology, + False, + ) def test_auto_allocated_topology_delete_ignore(self): - self.verify_delete(self.proxy.delete_auto_allocated_topology, - auto_allocated_topology.AutoAllocatedTopology, - True) + self.verify_delete( + self.proxy.delete_auto_allocated_topology, + auto_allocated_topology.AutoAllocatedTopology, + True, + ) def test_validate_topology(self): - self.verify_get(self.proxy.validate_auto_allocated_topology, - auto_allocated_topology.ValidateTopology, - value=[mock.sentinel.project_id], - expected_args=[ - auto_allocated_topology.ValidateTopology], - expected_kwargs={"project": mock.sentinel.project_id}) + self.verify_get( + self.proxy.validate_auto_allocated_topology, + auto_allocated_topology.ValidateTopology, + method_args=[mock.sentinel.project_id], + expected_args=[], + expected_kwargs={ + "project": mock.sentinel.project_id, + "requires_id": False, + }, + ) + + +class TestNetworkTags(TestNetworkProxy): + def test_set_tags(self): + x_network = network.Network.new(id='NETWORK_ID') + self._verify( + 'openstack.network.v2.network.Network.set_tags', + self.proxy.set_tags, + method_args=[x_network, ['TAG1', 'TAG2']], + expected_args=[self.proxy, ['TAG1', 'TAG2']], + expected_result=mock.sentinel.result_set_tags, + ) + + @mock.patch('openstack.network.v2.network.Network.set_tags') + def test_set_tags_resource_without_tag_suport(self, mock_set_tags): + no_tag_resource = object() + self.assertRaises( + exceptions.InvalidRequest, + self.proxy.set_tags, + no_tag_resource, + ['TAG1', 'TAG2'], + ) + self.assertEqual(0, mock_set_tags.call_count) + + +class TestNetworkFloatingIp(TestNetworkProxy): + def test_create_floating_ip_port_forwarding(self): + self.verify_create( + self.proxy.create_floating_ip_port_forwarding, + port_forwarding.PortForwarding, + method_kwargs={'floating_ip': FIP_ID}, + expected_kwargs={'floatingip_id': FIP_ID}, + ) + + def test_delete_floating_ip_port_forwarding(self): + self.verify_delete( + self.proxy.delete_floating_ip_port_forwarding, + port_forwarding.PortForwarding, + ignore_missing=False, + method_args=[FIP_ID, "resource_or_id"], + expected_args=["resource_or_id"], + expected_kwargs={'floatingip_id': FIP_ID}, + ) + + def test_delete_floating_ip_port_forwarding_ignore(self): + self.verify_delete( + self.proxy.delete_floating_ip_port_forwarding, + port_forwarding.PortForwarding, + ignore_missing=True, + method_args=[FIP_ID, "resource_or_id"], + expected_args=["resource_or_id"], + expected_kwargs={'floatingip_id': FIP_ID}, + ) + + def test_find_floating_ip_port_forwarding(self): + fip = floating_ip.FloatingIP.new(id=FIP_ID) + self._verify( + 'openstack.proxy.Proxy._find', + self.proxy.find_floating_ip_port_forwarding, + method_args=[fip, 'port_forwarding_id'], + expected_args=[ + port_forwarding.PortForwarding, + 'port_forwarding_id', + ], + expected_kwargs={'ignore_missing': True, 'floatingip_id': FIP_ID}, + ) + + def test_get_floating_ip_port_forwarding(self): + fip = floating_ip.FloatingIP.new(id=FIP_ID) + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_floating_ip_port_forwarding, + method_args=[fip, 'port_forwarding_id'], + expected_args=[ + port_forwarding.PortForwarding, + 'port_forwarding_id', + ], + expected_kwargs={'floatingip_id': FIP_ID}, + ) + + def test_floating_ip_port_forwardings(self): + self.verify_list( + self.proxy.floating_ip_port_forwardings, + port_forwarding.PortForwarding, + method_kwargs={'floating_ip': FIP_ID}, + expected_kwargs={'floatingip_id': FIP_ID}, + ) + + def test_update_floating_ip_port_forwarding(self): + fip = floating_ip.FloatingIP.new(id=FIP_ID) + self._verify( + 'openstack.network.v2._proxy.Proxy._update', + self.proxy.update_floating_ip_port_forwarding, + method_args=[fip, 'port_forwarding_id'], + method_kwargs={'foo': 'bar'}, + expected_args=[ + port_forwarding.PortForwarding, + 'port_forwarding_id', + ], + expected_kwargs={'floatingip_id': FIP_ID, 'foo': 'bar'}, + ) + + def test_create_l3_conntrack_helper(self): + self.verify_create( + self.proxy.create_conntrack_helper, + l3_conntrack_helper.ConntrackHelper, + method_kwargs={'router': ROUTER_ID}, + expected_kwargs={'router_id': ROUTER_ID}, + ) + + def test_delete_l3_conntrack_helper(self): + r = router.Router.new(id=ROUTER_ID) + self.verify_delete( + self.proxy.delete_conntrack_helper, + l3_conntrack_helper.ConntrackHelper, + ignore_missing=False, + method_args=['resource_or_id', r], + expected_args=['resource_or_id'], + expected_kwargs={'router_id': ROUTER_ID}, + ) + + def test_delete_l3_conntrack_helper_ignore(self): + r = router.Router.new(id=ROUTER_ID) + self.verify_delete( + self.proxy.delete_conntrack_helper, + l3_conntrack_helper.ConntrackHelper, + ignore_missing=True, + method_args=['resource_or_id', r], + expected_args=['resource_or_id'], + expected_kwargs={'router_id': ROUTER_ID}, + ) + + def test_get_l3_conntrack_helper(self): + r = router.Router.new(id=ROUTER_ID) + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_conntrack_helper, + method_args=['conntrack_helper_id', r], + expected_args=[ + l3_conntrack_helper.ConntrackHelper, + 'conntrack_helper_id', + ], + expected_kwargs={'router_id': ROUTER_ID}, + ) + + def test_l3_conntrack_helpers(self): + self.verify_list( + self.proxy.conntrack_helpers, + l3_conntrack_helper.ConntrackHelper, + method_args=[ROUTER_ID], + expected_args=[], + expected_kwargs={'router_id': ROUTER_ID}, + ) + + def test_update_l3_conntrack_helper(self): + r = router.Router.new(id=ROUTER_ID) + self._verify( + 'openstack.network.v2._proxy.Proxy._update', + self.proxy.update_conntrack_helper, + method_args=['conntrack_helper_id', r], + method_kwargs={'foo': 'bar'}, + expected_args=[ + l3_conntrack_helper.ConntrackHelper, + 'conntrack_helper_id', + ], + expected_kwargs={'router_id': ROUTER_ID, 'foo': 'bar'}, + ) + + +class TestNetworkNDPProxy(TestNetworkProxy): + def test_ndp_proxy_create_attrs(self): + self.verify_create(self.proxy.create_ndp_proxy, ndp_proxy.NDPProxy) + + def test_ndp_proxy_delete(self): + self.verify_delete( + self.proxy.delete_ndp_proxy, ndp_proxy.NDPProxy, False + ) + + def test_ndp_proxy_delete_ignore(self): + self.verify_delete( + self.proxy.delete_ndp_proxy, ndp_proxy.NDPProxy, True + ) + + def test_ndp_proxy_find(self): + self.verify_find(self.proxy.find_ndp_proxy, ndp_proxy.NDPProxy) + + def test_ndp_proxy_get(self): + self.verify_get(self.proxy.get_ndp_proxy, ndp_proxy.NDPProxy) + + def test_ndp_proxies(self): + self.verify_list(self.proxy.ndp_proxies, ndp_proxy.NDPProxy) + + def test_ndp_proxy_update(self): + self.verify_update(self.proxy.update_ndp_proxy, ndp_proxy.NDPProxy) + + +class TestNetworkBGP(TestNetworkProxy): + def test_bgp_speaker_create(self): + self.verify_create( + self.proxy.create_bgp_speaker, bgp_speaker.BgpSpeaker + ) + + def test_bgp_speaker_delete(self): + self.verify_delete( + self.proxy.delete_bgp_speaker, bgp_speaker.BgpSpeaker, False + ) + + def test_bgp_speaker_delete_ignore(self): + self.verify_delete( + self.proxy.delete_bgp_speaker, bgp_speaker.BgpSpeaker, True + ) + + def test_bgp_speaker_find(self): + self.verify_find(self.proxy.find_bgp_speaker, bgp_speaker.BgpSpeaker) + + def test_bgp_speaker_get(self): + self.verify_get(self.proxy.get_bgp_speaker, bgp_speaker.BgpSpeaker) + + def test_bgp_speakers(self): + self.verify_list(self.proxy.bgp_speakers, bgp_speaker.BgpSpeaker) + + def test_bgp_speaker_update(self): + self.verify_update( + self.proxy.update_bgp_speaker, bgp_speaker.BgpSpeaker + ) + + def test_bgp_peer_create(self): + self.verify_create(self.proxy.create_bgp_peer, bgp_peer.BgpPeer) + + def test_bgp_peer_delete(self): + self.verify_delete(self.proxy.delete_bgp_peer, bgp_peer.BgpPeer, False) + + def test_bgp_peer_delete_ignore(self): + self.verify_delete(self.proxy.delete_bgp_peer, bgp_peer.BgpPeer, True) + + def test_bgp_peer_find(self): + self.verify_find(self.proxy.find_bgp_peer, bgp_peer.BgpPeer) + + def test_bgp_peer_get(self): + self.verify_get(self.proxy.get_bgp_peer, bgp_peer.BgpPeer) + + def test_bgp_peers(self): + self.verify_list(self.proxy.bgp_peers, bgp_peer.BgpPeer) + + def test_bgp_peer_update(self): + self.verify_update(self.proxy.update_bgp_peer, bgp_peer.BgpPeer) + + +class TestNetworkBGPVPN(TestNetworkProxy): + NETWORK_ASSOCIATION = 'net-assoc-id' + uuid.uuid4().hex + PORT_ASSOCIATION = 'port-assoc-id' + uuid.uuid4().hex + ROUTER_ASSOCIATION = 'router-assoc-id' + uuid.uuid4().hex + + def test_bgpvpn_create(self): + self.verify_create(self.proxy.create_bgpvpn, bgpvpn.BgpVpn) + + def test_bgpvpn_delete(self): + self.verify_delete(self.proxy.delete_bgpvpn, bgpvpn.BgpVpn, False) + + def test_bgpvpn_delete_ignore(self): + self.verify_delete(self.proxy.delete_bgpvpn, bgpvpn.BgpVpn, True) + + def test_bgpvpn_find(self): + self.verify_find(self.proxy.find_bgpvpn, bgpvpn.BgpVpn) + + def test_bgpvpn_get(self): + self.verify_get(self.proxy.get_bgpvpn, bgpvpn.BgpVpn) + + def test_bgpvpns(self): + self.verify_list(self.proxy.bgpvpns, bgpvpn.BgpVpn) + + def test_bgpvpn_update(self): + self.verify_update(self.proxy.update_bgpvpn, bgpvpn.BgpVpn) + + def test_bgpvpn_network_association_create(self): + self.verify_create( + self.proxy.create_bgpvpn_network_association, + bgpvpn_network_association.BgpVpnNetworkAssociation, + method_kwargs={'bgpvpn': BGPVPN_ID}, + expected_kwargs={'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_network_association_delete(self): + self.verify_delete( + self.proxy.delete_bgpvpn_network_association, + bgpvpn_network_association.BgpVpnNetworkAssociation, + False, + method_args=[BGPVPN_ID, self.NETWORK_ASSOCIATION], + expected_args=[self.NETWORK_ASSOCIATION], + expected_kwargs={'ignore_missing': False, 'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_network_association_delete_ignore(self): + self.verify_delete( + self.proxy.delete_bgpvpn_network_association, + bgpvpn_network_association.BgpVpnNetworkAssociation, + True, + method_args=[BGPVPN_ID, self.NETWORK_ASSOCIATION], + expected_args=[self.NETWORK_ASSOCIATION], + expected_kwargs={'ignore_missing': True, 'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_network_association_get(self): + self.verify_get( + self.proxy.get_bgpvpn_network_association, + bgpvpn_network_association.BgpVpnNetworkAssociation, + method_args=[BGPVPN_ID, self.NETWORK_ASSOCIATION], + expected_args=[self.NETWORK_ASSOCIATION], + expected_kwargs={'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_network_associations(self): + self.verify_list( + self.proxy.bgpvpn_network_associations, + bgpvpn_network_association.BgpVpnNetworkAssociation, + method_args=[ + BGPVPN_ID, + ], + expected_args=[], + expected_kwargs={'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_port_association_create(self): + self.verify_create( + self.proxy.create_bgpvpn_port_association, + bgpvpn_port_association.BgpVpnPortAssociation, + method_kwargs={'bgpvpn': BGPVPN_ID}, + expected_kwargs={'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_port_association_delete(self): + self.verify_delete( + self.proxy.delete_bgpvpn_port_association, + bgpvpn_port_association.BgpVpnPortAssociation, + False, + method_args=[BGPVPN_ID, self.PORT_ASSOCIATION], + expected_args=[self.PORT_ASSOCIATION], + expected_kwargs={'ignore_missing': False, 'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_port_association_delete_ignore(self): + self.verify_delete( + self.proxy.delete_bgpvpn_port_association, + bgpvpn_port_association.BgpVpnPortAssociation, + True, + method_args=[BGPVPN_ID, self.PORT_ASSOCIATION], + expected_args=[self.PORT_ASSOCIATION], + expected_kwargs={'ignore_missing': True, 'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_port_association_find(self): + self.verify_find( + self.proxy.find_bgpvpn_port_association, + bgpvpn_port_association.BgpVpnPortAssociation, + method_args=[BGPVPN_ID], + expected_args=['resource_name'], + method_kwargs={'ignore_missing': True}, + expected_kwargs={'ignore_missing': True, 'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_port_association_get(self): + self.verify_get( + self.proxy.get_bgpvpn_port_association, + bgpvpn_port_association.BgpVpnPortAssociation, + method_args=[BGPVPN_ID, self.PORT_ASSOCIATION], + expected_args=[self.PORT_ASSOCIATION], + expected_kwargs={'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_port_associations(self): + self.verify_list( + self.proxy.bgpvpn_port_associations, + bgpvpn_port_association.BgpVpnPortAssociation, + method_args=[ + BGPVPN_ID, + ], + expected_args=[], + expected_kwargs={'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_port_association_update(self): + self.verify_update( + self.proxy.update_bgpvpn_port_association, + bgpvpn_port_association.BgpVpnPortAssociation, + method_args=[BGPVPN_ID, self.PORT_ASSOCIATION], + method_kwargs={}, + expected_args=[self.PORT_ASSOCIATION], + expected_kwargs={'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_router_association_create(self): + self.verify_create( + self.proxy.create_bgpvpn_router_association, + bgpvpn_router_association.BgpVpnRouterAssociation, + method_kwargs={'bgpvpn': BGPVPN_ID}, + expected_kwargs={'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_router_association_delete(self): + self.verify_delete( + self.proxy.delete_bgpvpn_router_association, + bgpvpn_router_association.BgpVpnRouterAssociation, + False, + method_args=[BGPVPN_ID, self.ROUTER_ASSOCIATION], + expected_args=[self.ROUTER_ASSOCIATION], + expected_kwargs={'ignore_missing': False, 'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_router_association_delete_ignore(self): + self.verify_delete( + self.proxy.delete_bgpvpn_router_association, + bgpvpn_router_association.BgpVpnRouterAssociation, + True, + method_args=[BGPVPN_ID, self.ROUTER_ASSOCIATION], + expected_args=[self.ROUTER_ASSOCIATION], + expected_kwargs={'ignore_missing': True, 'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_router_association_get(self): + self.verify_get( + self.proxy.get_bgpvpn_router_association, + bgpvpn_router_association.BgpVpnRouterAssociation, + method_args=[BGPVPN_ID, self.ROUTER_ASSOCIATION], + expected_args=[self.ROUTER_ASSOCIATION], + expected_kwargs={'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_router_associations(self): + self.verify_list( + self.proxy.bgpvpn_router_associations, + bgpvpn_router_association.BgpVpnRouterAssociation, + method_args=[ + BGPVPN_ID, + ], + expected_args=[], + expected_kwargs={'bgpvpn_id': BGPVPN_ID}, + ) + + def test_bgpvpn_router_association_update(self): + self.verify_update( + self.proxy.update_bgpvpn_router_association, + bgpvpn_router_association.BgpVpnRouterAssociation, + method_args=[BGPVPN_ID, self.ROUTER_ASSOCIATION], + method_kwargs={}, + expected_args=[self.ROUTER_ASSOCIATION], + expected_kwargs={'bgpvpn_id': BGPVPN_ID}, + ) + + +class TestNetworkTapMirror(TestNetworkProxy): + def test_create_tap_mirror(self): + self.verify_create(self.proxy.create_tap_mirror, tap_mirror.TapMirror) + + def test_delete_tap_mirror(self): + self.verify_delete( + self.proxy.delete_tap_mirror, tap_mirror.TapMirror, False + ) + + def test_delete_tap_mirror_ignore(self): + self.verify_delete( + self.proxy.delete_tap_mirror, tap_mirror.TapMirror, True + ) + + def test_find_tap_mirror(self): + self.verify_find(self.proxy.find_tap_mirror, tap_mirror.TapMirror) + + def test_get_tap_mirror(self): + self.verify_get(self.proxy.get_tap_mirror, tap_mirror.TapMirror) + + def test_tap_mirrors(self): + self.verify_list(self.proxy.tap_mirrors, tap_mirror.TapMirror) + + def test_update_tap_mirror(self): + self.verify_update(self.proxy.update_tap_mirror, tap_mirror.TapMirror) + + +class TestNetworkPortBinding(TestNetworkProxy): + @mock.patch.object(proxy_base.Proxy, '_get') + def test_create_port_binding(self, mock_get): + res_port = port.Port.new(id=PORT_ID) + mock_get.return_value = res_port + + self.verify_create( + self.proxy.create_port_binding, + port_binding.PortBinding, + method_kwargs={'port': PORT_ID}, + expected_kwargs={'port_id': PORT_ID}, + ) + + @mock.patch('openstack.network.v2._proxy.Proxy.activate_port_binding') + def test_activate_port_binding(self, activate_binding): + data = mock.sentinel + self.proxy.activate_port_binding(port_binding.PortBinding, data) + activate_binding.assert_called_once_with( + port_binding.PortBinding, data + ) + + @mock.patch.object(proxy_base.Proxy, '_get') + def test_port_bindings(self, mock_get): + res_port = port.Port.new(id=PORT_ID) + mock_get.return_value = res_port + + self.verify_list( + self.proxy.port_bindings, + port_binding.PortBinding, + method_kwargs={'port': PORT_ID}, + expected_kwargs={'port_id': PORT_ID}, + ) + + @mock.patch('openstack.network.v2._proxy.Proxy.delete_port_binding') + @mock.patch.object(proxy_base.Proxy, '_get') + def test_delete_port_binding(self, mock_get, delete_port_binding): + res_port = port.Port.new(id=PORT_ID) + mock_get.return_value = res_port + data = mock.sentinel + + self.proxy.delete_port_binding(port_binding.PortBinding, data) + delete_port_binding.assert_called_once_with( + port_binding.PortBinding, data + ) diff --git a/openstack/tests/unit/network/v2/test_qos_bandwidth_limit_rule.py b/openstack/tests/unit/network/v2/test_qos_bandwidth_limit_rule.py index 56b7446de7..bb3c59e496 100644 --- a/openstack/tests/unit/network/v2/test_qos_bandwidth_limit_rule.py +++ b/openstack/tests/unit/network/v2/test_qos_bandwidth_limit_rule.py @@ -10,34 +10,33 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools import uuid from openstack.network.v2 import qos_bandwidth_limit_rule +from openstack.tests.unit import base + EXAMPLE = { 'id': 'IDENTIFIER', 'qos_policy_id': 'qos-policy-' + uuid.uuid4().hex, 'max_kbps': 1500, 'max_burst_kbps': 1200, - # NOTE(ralonsoh): to be implemented in bug 1560961 - # 'direction': 'egress', + 'direction': 'egress', } -class TestQoSBandwidthLimitRule(testtools.TestCase): - +class TestQoSBandwidthLimitRule(base.TestCase): def test_basic(self): sot = qos_bandwidth_limit_rule.QoSBandwidthLimitRule() self.assertEqual('bandwidth_limit_rule', sot.resource_key) self.assertEqual('bandwidth_limit_rules', sot.resources_key) self.assertEqual( '/qos/policies/%(qos_policy_id)s/bandwidth_limit_rules', - sot.base_path) - self.assertEqual('network', sot.service.service_type) + sot.base_path, + ) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -47,5 +46,4 @@ def test_make_it(self): self.assertEqual(EXAMPLE['qos_policy_id'], sot.qos_policy_id) self.assertEqual(EXAMPLE['max_kbps'], sot.max_kbps) self.assertEqual(EXAMPLE['max_burst_kbps'], sot.max_burst_kbps) - # NOTE(ralonsoh): to be implemented in bug 1560961 - # self.assertEqual(EXAMPLE['direction'], sot.direction) + self.assertEqual(EXAMPLE['direction'], sot.direction) diff --git a/openstack/tests/unit/network/v2/test_qos_dscp_marking_rule.py b/openstack/tests/unit/network/v2/test_qos_dscp_marking_rule.py index 46bffd1cd3..0df8a59fa4 100644 --- a/openstack/tests/unit/network/v2/test_qos_dscp_marking_rule.py +++ b/openstack/tests/unit/network/v2/test_qos_dscp_marking_rule.py @@ -10,10 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools import uuid from openstack.network.v2 import qos_dscp_marking_rule +from openstack.tests.unit import base + EXAMPLE = { 'id': 'IDENTIFIER', @@ -22,18 +23,17 @@ } -class TestQoSDSCPMarkingRule(testtools.TestCase): - +class TestQoSDSCPMarkingRule(base.TestCase): def test_basic(self): sot = qos_dscp_marking_rule.QoSDSCPMarkingRule() self.assertEqual('dscp_marking_rule', sot.resource_key) self.assertEqual('dscp_marking_rules', sot.resources_key) - self.assertEqual('/qos/policies/%(qos_policy_id)s/dscp_marking_rules', - sot.base_path) - self.assertEqual('network', sot.service.service_type) + self.assertEqual( + '/qos/policies/%(qos_policy_id)s/dscp_marking_rules', sot.base_path + ) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/network/v2/test_qos_minimum_bandwidth_rule.py b/openstack/tests/unit/network/v2/test_qos_minimum_bandwidth_rule.py index f6b2f44227..6e725cee96 100644 --- a/openstack/tests/unit/network/v2/test_qos_minimum_bandwidth_rule.py +++ b/openstack/tests/unit/network/v2/test_qos_minimum_bandwidth_rule.py @@ -10,10 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools import uuid from openstack.network.v2 import qos_minimum_bandwidth_rule +from openstack.tests.unit import base + EXAMPLE = { 'id': 'IDENTIFIER', @@ -23,19 +24,18 @@ } -class TestQoSMinimumBandwidthRule(testtools.TestCase): - +class TestQoSMinimumBandwidthRule(base.TestCase): def test_basic(self): sot = qos_minimum_bandwidth_rule.QoSMinimumBandwidthRule() self.assertEqual('minimum_bandwidth_rule', sot.resource_key) self.assertEqual('minimum_bandwidth_rules', sot.resources_key) self.assertEqual( '/qos/policies/%(qos_policy_id)s/minimum_bandwidth_rules', - sot.base_path) - self.assertEqual('network', sot.service.service_type) + sot.base_path, + ) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/network/v2/test_qos_minimum_packet_rate_rule.py b/openstack/tests/unit/network/v2/test_qos_minimum_packet_rate_rule.py new file mode 100644 index 0000000000..9ae6d3bea4 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_qos_minimum_packet_rate_rule.py @@ -0,0 +1,47 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import uuid + +from openstack.network.v2 import qos_minimum_packet_rate_rule +from openstack.tests.unit import base + + +EXAMPLE = { + 'id': 'IDENTIFIER', + 'qos_policy_id': 'qos-policy-' + uuid.uuid4().hex, + 'min_kpps': 1500, + 'direction': 'any', +} + + +class TestQoSMinimumPacketRateRule(base.TestCase): + def test_basic(self): + sot = qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule() + self.assertEqual('minimum_packet_rate_rule', sot.resource_key) + self.assertEqual('minimum_packet_rate_rules', sot.resources_key) + self.assertEqual( + '/qos/policies/%(qos_policy_id)s/minimum_packet_rate_rules', + sot.base_path, + ) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = qos_minimum_packet_rate_rule.QoSMinimumPacketRateRule(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['qos_policy_id'], sot.qos_policy_id) + self.assertEqual(EXAMPLE['min_kpps'], sot.min_kpps) + self.assertEqual(EXAMPLE['direction'], sot.direction) diff --git a/openstack/tests/unit/network/v2/test_qos_packet_rate_limit_rule.py b/openstack/tests/unit/network/v2/test_qos_packet_rate_limit_rule.py new file mode 100644 index 0000000000..5bf210c52f --- /dev/null +++ b/openstack/tests/unit/network/v2/test_qos_packet_rate_limit_rule.py @@ -0,0 +1,49 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import uuid + +from openstack.network.v2 import qos_packet_rate_limit_rule +from openstack.tests.unit import base + + +EXAMPLE = { + 'id': 'IDENTIFIER', + 'qos_policy_id': 'qos-policy-' + uuid.uuid4().hex, + 'max_kpps': 1600, + 'max_burst_kpps': 1300, + 'direction': 'any', +} + + +class TestQoSBandwidthLimitRule(base.TestCase): + def test_basic(self): + sot = qos_packet_rate_limit_rule.QoSPacketRateLimitRule() + self.assertEqual('packet_rate_limit_rule', sot.resource_key) + self.assertEqual('packet_rate_limit_rules', sot.resources_key) + self.assertEqual( + '/qos/policies/%(qos_policy_id)s/packet_rate_limit_rules', + sot.base_path, + ) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = qos_packet_rate_limit_rule.QoSPacketRateLimitRule(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['qos_policy_id'], sot.qos_policy_id) + self.assertEqual(EXAMPLE['max_kpps'], sot.max_kpps) + self.assertEqual(EXAMPLE['max_burst_kpps'], sot.max_burst_kpps) + self.assertEqual(EXAMPLE['direction'], sot.direction) diff --git a/openstack/tests/unit/network/v2/test_qos_policy.py b/openstack/tests/unit/network/v2/test_qos_policy.py index d91633d379..2cb22999b5 100644 --- a/openstack/tests/unit/network/v2/test_qos_policy.py +++ b/openstack/tests/unit/network/v2/test_qos_policy.py @@ -10,32 +10,33 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools import uuid from openstack.network.v2 import qos_policy +from openstack.tests.unit import base + EXAMPLE = { 'id': 'IDENTIFIER', 'description': 'QoS policy description', 'name': 'qos-policy-name', 'shared': True, - 'tenant_id': '2', - 'rules': [uuid.uuid4().hex] + 'project_id': '2', + 'rules': [uuid.uuid4().hex], + 'is_default': False, + 'tags': ['3'], } -class TestQoSPolicy(testtools.TestCase): - +class TestQoSPolicy(base.TestCase): def test_basic(self): sot = qos_policy.QoSPolicy() self.assertEqual('policy', sot.resource_key) self.assertEqual('policies', sot.resources_key) self.assertEqual('/qos/policies', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -44,6 +45,7 @@ def test_make_it(self): self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['name'], sot.name) - self.assertTrue(sot.is_shared) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['rules'], sot.rules) + self.assertEqual(EXAMPLE['is_default'], sot.is_default) + self.assertEqual(EXAMPLE['tags'], sot.tags) diff --git a/openstack/tests/unit/network/v2/test_qos_rule_type.py b/openstack/tests/unit/network/v2/test_qos_rule_type.py index 59b2f6bf0a..81faa977d7 100644 --- a/openstack/tests/unit/network/v2/test_qos_rule_type.py +++ b/openstack/tests/unit/network/v2/test_qos_rule_type.py @@ -10,29 +10,61 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import qos_rule_type +from openstack.tests.unit import base + EXAMPLE = { 'type': 'bandwidth_limit', + 'drivers': [ + { + 'name': 'openvswitch', + 'supported_parameters': [ + { + 'parameter_values': {'start': 0, 'end': 2147483647}, + 'parameter_type': 'range', + 'parameter_name': 'max_kbps', + }, + { + 'parameter_values': ['ingress', 'egress'], + 'parameter_type': 'choices', + 'parameter_name': 'direction', + }, + { + 'parameter_values': {'start': 0, 'end': 2147483647}, + 'parameter_type': 'range', + 'parameter_name': 'max_burst_kbps', + }, + ], + } + ], } -class TestQoSRuleType(testtools.TestCase): - +class TestQoSRuleType(base.TestCase): def test_basic(self): sot = qos_rule_type.QoSRuleType() self.assertEqual('rule_type', sot.resource_key) self.assertEqual('rule_types', sot.resources_key) self.assertEqual('/qos/rule-types', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) + self.assertEqual( + { + 'type': 'type', + 'drivers': 'drivers', + 'all_rules': 'all_rules', + 'all_supported': 'all_supported', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) def test_make_it(self): sot = qos_rule_type.QoSRuleType(**EXAMPLE) self.assertEqual(EXAMPLE['type'], sot.type) + self.assertEqual(EXAMPLE['drivers'], sot.drivers) diff --git a/openstack/tests/unit/network/v2/test_quota.py b/openstack/tests/unit/network/v2/test_quota.py index 0c06a54d3f..ed0f668776 100644 --- a/openstack/tests/unit/network/v2/test_quota.py +++ b/openstack/tests/unit/network/v2/test_quota.py @@ -10,16 +10,17 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import quota +from openstack import resource +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { 'floatingip': 1, 'network': 2, 'port': 3, - 'tenant_id': '4', + 'project_id': '4', 'router': 5, 'subnet': 6, 'subnetpool': 7, @@ -31,20 +32,19 @@ 'loadbalancer': 13, 'l7policy': 14, 'pool': 15, + 'check_limit': True, } -class TestQuota(testtools.TestCase): - +class TestQuota(base.TestCase): def test_basic(self): sot = quota.Quota() self.assertEqual('quota', sot.resource_key) self.assertEqual('quotas', sot.resources_key) self.assertEqual('/quotas', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -53,12 +53,13 @@ def test_make_it(self): self.assertEqual(EXAMPLE['floatingip'], sot.floating_ips) self.assertEqual(EXAMPLE['network'], sot.networks) self.assertEqual(EXAMPLE['port'], sot.ports) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['router'], sot.routers) self.assertEqual(EXAMPLE['subnet'], sot.subnets) self.assertEqual(EXAMPLE['subnetpool'], sot.subnet_pools) - self.assertEqual(EXAMPLE['security_group_rule'], - sot.security_group_rules) + self.assertEqual( + EXAMPLE['security_group_rule'], sot.security_group_rules + ) self.assertEqual(EXAMPLE['security_group'], sot.security_groups) self.assertEqual(EXAMPLE['rbac_policy'], sot.rbac_policies) self.assertEqual(EXAMPLE['healthmonitor'], sot.health_monitors) @@ -66,19 +67,30 @@ def test_make_it(self): self.assertEqual(EXAMPLE['loadbalancer'], sot.load_balancers) self.assertEqual(EXAMPLE['l7policy'], sot.l7_policies) self.assertEqual(EXAMPLE['pool'], sot.pools) + self.assertEqual(EXAMPLE['check_limit'], sot.check_limit) + + def test_prepare_request(self): + body = {'id': 'ABCDEFGH', 'network': '12345'} + quota_obj = quota.Quota(**body) + response = quota_obj._prepare_request() + self.assertNotIn('id', response) + def test_alternate_id(self): + my_project_id = 'my-tenant-id' + body = {'project_id': my_project_id, 'network': 12345} + quota_obj = quota.Quota(**body) + self.assertEqual(my_project_id, resource.Resource._get_id(quota_obj)) -class TestQuotaDefault(testtools.TestCase): +class TestQuotaDefault(base.TestCase): def test_basic(self): sot = quota.QuotaDefault() self.assertEqual('quota', sot.resource_key) self.assertEqual('quotas', sot.resources_key) self.assertEqual('/quotas/%(project)s/default', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) @@ -87,12 +99,13 @@ def test_make_it(self): self.assertEqual(EXAMPLE['floatingip'], sot.floating_ips) self.assertEqual(EXAMPLE['network'], sot.networks) self.assertEqual(EXAMPLE['port'], sot.ports) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['router'], sot.routers) self.assertEqual(EXAMPLE['subnet'], sot.subnets) self.assertEqual(EXAMPLE['subnetpool'], sot.subnet_pools) - self.assertEqual(EXAMPLE['security_group_rule'], - sot.security_group_rules) + self.assertEqual( + EXAMPLE['security_group_rule'], sot.security_group_rules + ) self.assertEqual(EXAMPLE['security_group'], sot.security_groups) self.assertEqual(EXAMPLE['rbac_policy'], sot.rbac_policies) self.assertEqual(EXAMPLE['healthmonitor'], sot.health_monitors) @@ -100,4 +113,5 @@ def test_make_it(self): self.assertEqual(EXAMPLE['loadbalancer'], sot.load_balancers) self.assertEqual(EXAMPLE['l7policy'], sot.l7_policies) self.assertEqual(EXAMPLE['pool'], sot.pools) + self.assertEqual(EXAMPLE['check_limit'], sot.check_limit) self.assertEqual('FAKE_PROJECT', sot.project) diff --git a/openstack/tests/unit/network/v2/test_rbac_policy.py b/openstack/tests/unit/network/v2/test_rbac_policy.py index c8189c9c2f..3c625f48ed 100644 --- a/openstack/tests/unit/network/v2/test_rbac_policy.py +++ b/openstack/tests/unit/network/v2/test_rbac_policy.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import rbac_policy +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -20,28 +20,39 @@ 'object_id': IDENTIFIER, 'object_type': 'network', 'target_tenant': '10', - 'tenant_id': '5', + 'project_id': '5', } -class TestRBACPolicy(testtools.TestCase): - +class TestRBACPolicy(base.TestCase): def test_basic(self): sot = rbac_policy.RBACPolicy() self.assertEqual('rbac_policy', sot.resource_key) self.assertEqual('rbac_policies', sot.resources_key) self.assertEqual('/rbac-policies', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) + self.assertDictEqual( + { + 'action': 'action', + 'object_id': 'object_id', + 'object_type': 'object_type', + 'project_id': 'project_id', + 'target_project_id': 'target_tenant', + 'limit': 'limit', + 'marker': 'marker', + }, + sot._query_mapping._mapping, + ) + def test_make_it(self): sot = rbac_policy.RBACPolicy(**EXAMPLE) self.assertEqual(EXAMPLE['action'], sot.action) self.assertEqual(EXAMPLE['object_id'], sot.object_id) self.assertEqual(EXAMPLE['object_type'], sot.object_type) self.assertEqual(EXAMPLE['target_tenant'], sot.target_project_id) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) diff --git a/openstack/tests/unit/network/v2/test_router.py b/openstack/tests/unit/network/v2/test_router.py index 900f144ef1..b3e23b6ef9 100644 --- a/openstack/tests/unit/network/v2/test_router.py +++ b/openstack/tests/unit/network/v2/test_router.py @@ -10,10 +10,13 @@ # License for the specific language governing permissions and limitations # under the License. -import mock +from unittest import mock + import testtools +from openstack import exceptions from openstack.network.v2 import router +from openstack.tests.unit import base IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -23,6 +26,7 @@ 'created_at': 'timestamp1', 'description': '3', 'distributed': False, + 'enable_ndp_proxy': True, 'external_gateway_info': {'4': 4}, 'flavor_id': '5', 'ha': False, @@ -31,7 +35,7 @@ 'revision': 7, 'routes': ['8'], 'status': '9', - 'tenant_id': '10', + 'project_id': '10', 'updated_at': 'timestamp2', } @@ -44,46 +48,43 @@ 'external_gateway_info': { 'network_id': '1', 'enable_snat': True, - 'external_fixed_ips': [] + 'external_fixed_ips': [], }, 'ha': True, 'id': IDENTIFIER, 'name': 'router1', - 'routes': [{ - 'nexthop': '172.24.4.20', - 'destination': '10.0.3.1/24' - }], + 'routes': [{'nexthop': '172.24.4.20', 'destination': '10.0.3.1/24'}], 'status': 'ACTIVE', - 'tenant_id': '2', + 'project_id': '2', } -class TestRouter(testtools.TestCase): - +class TestRouter(base.TestCase): def test_basic(self): sot = router.Router() self.assertEqual('router', sot.resource_key) self.assertEqual('routers', sot.resources_key) self.assertEqual('/routers', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): sot = router.Router(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) - self.assertEqual(EXAMPLE['availability_zone_hints'], - sot.availability_zone_hints) - self.assertEqual(EXAMPLE['availability_zones'], - sot.availability_zones) + self.assertEqual( + EXAMPLE['availability_zone_hints'], sot.availability_zone_hints + ) + self.assertEqual(EXAMPLE['availability_zones'], sot.availability_zones) self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['description'], sot.description) + self.assertTrue(sot.enable_ndp_proxy) self.assertFalse(sot.is_distributed) - self.assertEqual(EXAMPLE['external_gateway_info'], - sot.external_gateway_info) + self.assertEqual( + EXAMPLE['external_gateway_info'], sot.external_gateway_info + ) self.assertEqual(EXAMPLE['flavor_id'], sot.flavor_id) self.assertFalse(sot.is_ha) self.assertEqual(EXAMPLE['id'], sot.id) @@ -91,27 +92,31 @@ def test_make_it(self): self.assertEqual(EXAMPLE['revision'], sot.revision_number) self.assertEqual(EXAMPLE['routes'], sot.routes) self.assertEqual(EXAMPLE['status'], sot.status) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) def test_make_it_with_optional(self): sot = router.Router(**EXAMPLE_WITH_OPTIONAL) self.assertFalse(sot.is_admin_state_up) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['availability_zone_hints'], - sot.availability_zone_hints) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['availability_zones'], - sot.availability_zones) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['description'], - sot.description) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['availability_zone_hints'], + sot.availability_zone_hints, + ) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['availability_zones'], sot.availability_zones + ) + self.assertEqual(EXAMPLE_WITH_OPTIONAL['description'], sot.description) self.assertTrue(sot.is_distributed) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['external_gateway_info'], - sot.external_gateway_info) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['external_gateway_info'], + sot.external_gateway_info, + ) self.assertTrue(sot.is_ha) self.assertEqual(EXAMPLE_WITH_OPTIONAL['id'], sot.id) self.assertEqual(EXAMPLE_WITH_OPTIONAL['name'], sot.name) self.assertEqual(EXAMPLE_WITH_OPTIONAL['routes'], sot.routes) self.assertEqual(EXAMPLE_WITH_OPTIONAL['status'], sot.status) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE_WITH_OPTIONAL['project_id'], sot.project_id) def test_add_interface_subnet(self): # Add subnet to a router @@ -119,14 +124,14 @@ def test_add_interface_subnet(self): response = mock.Mock() response.body = {"subnet_id": "3", "port_id": "2"} response.json = mock.Mock(return_value=response.body) + response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) body = {"subnet_id": "3"} self.assertEqual(response.body, sot.add_interface(sess, **body)) url = 'routers/IDENTIFIER/add_router_interface' - sess.put.assert_called_with(url, endpoint_filter=sot.service, - json=body) + sess.put.assert_called_with(url, json=body) def test_add_interface_port(self): # Add port to a router @@ -134,6 +139,7 @@ def test_add_interface_port(self): response = mock.Mock() response.body = {"subnet_id": "3", "port_id": "3"} response.json = mock.Mock(return_value=response.body) + response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) @@ -141,8 +147,7 @@ def test_add_interface_port(self): self.assertEqual(response.body, sot.add_interface(sess, **body)) url = 'routers/IDENTIFIER/add_router_interface' - sess.put.assert_called_with(url, endpoint_filter=sot.service, - json=body) + sess.put.assert_called_with(url, json=body) def test_remove_interface_subnet(self): # Remove subnet from a router @@ -150,14 +155,14 @@ def test_remove_interface_subnet(self): response = mock.Mock() response.body = {"subnet_id": "3", "port_id": "2"} response.json = mock.Mock(return_value=response.body) + response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) body = {"subnet_id": "3"} self.assertEqual(response.body, sot.remove_interface(sess, **body)) url = 'routers/IDENTIFIER/remove_router_interface' - sess.put.assert_called_with(url, endpoint_filter=sot.service, - json=body) + sess.put.assert_called_with(url, json=body) def test_remove_interface_port(self): # Remove port from a router @@ -165,14 +170,79 @@ def test_remove_interface_port(self): response = mock.Mock() response.body = {"subnet_id": "3", "port_id": "3"} response.json = mock.Mock(return_value=response.body) + response.status_code = 200 sess = mock.Mock() sess.put = mock.Mock(return_value=response) body = {"network_id": 3, "enable_snat": True} self.assertEqual(response.body, sot.remove_interface(sess, **body)) url = 'routers/IDENTIFIER/remove_router_interface' - sess.put.assert_called_with(url, endpoint_filter=sot.service, - json=body) + sess.put.assert_called_with(url, json=body) + + def test_add_interface_4xx(self): + # Neutron may return 4xx, we have to raise if that happens + sot = router.Router(**EXAMPLE) + response = mock.Mock() + msg = '.*borked' + response.body = {'NeutronError': {'message': msg}} + response.json = mock.Mock(return_value=response.body) + response.ok = False + response.status_code = 409 + response.headers = {'content-type': 'application/json'} + sess = mock.Mock() + sess.put = mock.Mock(return_value=response) + body = {'subnet_id': '3'} + with testtools.ExpectedException(exceptions.ConflictException, msg): + sot.add_interface(sess, **body) + + def test_remove_interface_4xx(self): + # Neutron may return 4xx for example if a router interface has + # extra routes referring to it as a nexthop + sot = router.Router(**EXAMPLE) + response = mock.Mock() + msg = '.*borked' + response.body = {'NeutronError': {'message': msg}} + response.json = mock.Mock(return_value=response.body) + response.ok = False + response.status_code = 409 + response.headers = {'content-type': 'application/json'} + sess = mock.Mock() + sess.put = mock.Mock(return_value=response) + body = {'subnet_id': '3'} + with testtools.ExpectedException(exceptions.ConflictException, msg): + sot.remove_interface(sess, **body) + + def test_add_extra_routes(self): + r = router.Router(**EXAMPLE) + response = mock.Mock() + response.headers = {} + json_body = {'router': {}} + response.body = json_body + response.json = mock.Mock(return_value=response.body) + response.status_code = 200 + sess = mock.Mock() + sess.put = mock.Mock(return_value=response) + ret = r.add_extra_routes(sess, json_body) + self.assertIsInstance(ret, router.Router) + self.assertIsInstance(ret.routes, list) + url = 'routers/IDENTIFIER/add_extraroutes' + sess.put.assert_called_with(url, json=json_body) + + def test_remove_extra_routes(self): + r = router.Router(**EXAMPLE) + response = mock.Mock() + response.headers = {} + json_body = {'router': {}} + response.body = json_body + response.json = mock.Mock(return_value=response.body) + response.status_code = 200 + sess = mock.Mock() + sess.put = mock.Mock(return_value=response) + ret = r.remove_extra_routes(sess, json_body) + self.assertIsInstance(ret, router.Router) + self.assertIsInstance(ret.routes, list) + url = 'routers/IDENTIFIER/remove_extraroutes' + sess.put.assert_called_with(url, json=json_body) def test_add_router_gateway(self): # Add gateway to a router @@ -186,8 +256,7 @@ def test_add_router_gateway(self): self.assertEqual(response.body, sot.add_gateway(sess, **body)) url = 'routers/IDENTIFIER/add_gateway_router' - sess.put.assert_called_with(url, endpoint_filter=sot.service, - json=body) + sess.put.assert_called_with(url, json=body) def test_remove_router_gateway(self): # Remove gateway to a router @@ -201,5 +270,18 @@ def test_remove_router_gateway(self): self.assertEqual(response.body, sot.remove_gateway(sess, **body)) url = 'routers/IDENTIFIER/remove_gateway_router' - sess.put.assert_called_with(url, endpoint_filter=sot.service, - json=body) + sess.put.assert_called_with(url, json=body) + + +class TestL3AgentRouters(base.TestCase): + def test_basic(self): + sot = router.L3AgentRouter() + self.assertEqual('router', sot.resource_key) + self.assertEqual('routers', sot.resources_key) + self.assertEqual('/agents/%(agent_id)s/l3-routers', sot.base_path) + self.assertEqual('l3-router', sot.resource_name) + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_retrieve) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/network/v2/test_security_group.py b/openstack/tests/unit/network/v2/test_security_group.py index 2d18edb0c8..b9978a7d19 100644 --- a/openstack/tests/unit/network/v2/test_security_group.py +++ b/openstack/tests/unit/network/v2/test_security_group.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import security_group +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' RULES = [ @@ -21,9 +21,8 @@ "direction": "egress", "remote_ip_prefix": None, "protocol": None, - "ethertype": - "IPv6", - "tenant_id": "4", + "ethertype": "IPv6", + "project_id": "4", "port_range_max": None, "port_range_min": None, "id": "5", @@ -38,7 +37,7 @@ "remote_ip_prefix": None, "protocol": None, "ethertype": "IPv6", - "tenant_id": "4", + "project_id": "4", "port_range_max": None, "port_range_min": None, "id": "6", @@ -54,27 +53,51 @@ 'description': '1', 'id': IDENTIFIER, 'name': '2', + 'stateful': True, 'revision_number': 3, 'security_group_rules': RULES, - 'tenant_id': '4', + 'project_id': '4', 'updated_at': '2016-10-14T12:16:57.233772', + 'tags': ['5'], + 'is_shared': True, } -class TestSecurityGroup(testtools.TestCase): - +class TestSecurityGroup(base.TestCase): def test_basic(self): sot = security_group.SecurityGroup() self.assertEqual('security_group', sot.resource_key) self.assertEqual('security_groups', sot.resources_key) self.assertEqual('/security-groups', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) + self.assertDictEqual( + { + 'any_tags': 'tags-any', + 'description': 'description', + 'fields': 'fields', + 'id': 'id', + 'is_shared': 'shared', + 'limit': 'limit', + 'marker': 'marker', + 'name': 'name', + 'not_any_tags': 'not-tags-any', + 'not_tags': 'not-tags', + 'tenant_id': 'tenant_id', + 'revision_number': 'revision_number', + 'sort_dir': 'sort_dir', + 'sort_key': 'sort_key', + 'tags': 'tags', + 'project_id': 'project_id', + 'stateful': 'stateful', + }, + sot._query_mapping._mapping, + ) + def test_make_it(self): sot = security_group.SecurityGroup(**EXAMPLE) self.assertEqual(EXAMPLE['created_at'], sot.created_at) @@ -82,8 +105,12 @@ def test_make_it(self): self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['name'], sot.name) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) - self.assertEqual(EXAMPLE['security_group_rules'], - sot.security_group_rules) + self.assertEqual( + EXAMPLE['security_group_rules'], sot.security_group_rules + ) self.assertEqual(dict, type(sot.security_group_rules[0])) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertEqual(EXAMPLE['tags'], sot.tags) + self.assertEqual(EXAMPLE['is_shared'], sot.is_shared) diff --git a/openstack/tests/unit/network/v2/test_security_group_rule.py b/openstack/tests/unit/network/v2/test_security_group_rule.py index 5ad00d91b9..7fc6fbdcbc 100644 --- a/openstack/tests/unit/network/v2/test_security_group_rule.py +++ b/openstack/tests/unit/network/v2/test_security_group_rule.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import security_group_rule +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -28,25 +28,52 @@ 'remote_ip_prefix': '8', 'revision_number': 9, 'security_group_id': '10', - 'tenant_id': '11', - 'updated_at': '12' + 'project_id': '11', + 'updated_at': '12', + 'remote_address_group_id': '13', } -class TestSecurityGroupRule(testtools.TestCase): - +class TestSecurityGroupRule(base.TestCase): def test_basic(self): sot = security_group_rule.SecurityGroupRule() self.assertEqual('security_group_rule', sot.resource_key) self.assertEqual('security_group_rules', sot.resources_key) self.assertEqual('/security-group-rules', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) + self.assertDictEqual( + { + 'any_tags': 'tags-any', + 'description': 'description', + 'direction': 'direction', + 'id': 'id', + 'ether_type': 'ethertype', + 'limit': 'limit', + 'marker': 'marker', + 'not_any_tags': 'not-tags-any', + 'not_tags': 'not-tags', + 'port_range_max': 'port_range_max', + 'port_range_min': 'port_range_min', + 'tenant_id': 'tenant_id', + 'protocol': 'protocol', + 'remote_group_id': 'remote_group_id', + 'remote_address_group_id': 'remote_address_group_id', + 'remote_ip_prefix': 'remote_ip_prefix', + 'revision_number': 'revision_number', + 'security_group_id': 'security_group_id', + 'sort_dir': 'sort_dir', + 'sort_key': 'sort_key', + 'tags': 'tags', + 'project_id': 'project_id', + }, + sot._query_mapping._mapping, + ) + def test_make_it(self): sot = security_group_rule.SecurityGroupRule(**EXAMPLE) self.assertEqual(EXAMPLE['created_at'], sot.created_at) @@ -58,8 +85,12 @@ def test_make_it(self): self.assertEqual(EXAMPLE['port_range_min'], sot.port_range_min) self.assertEqual(EXAMPLE['protocol'], sot.protocol) self.assertEqual(EXAMPLE['remote_group_id'], sot.remote_group_id) + self.assertEqual( + EXAMPLE['remote_address_group_id'], sot.remote_address_group_id + ) self.assertEqual(EXAMPLE['remote_ip_prefix'], sot.remote_ip_prefix) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertEqual(EXAMPLE['security_group_id'], sot.security_group_id) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/network/v2/test_segment.py b/openstack/tests/unit/network/v2/test_segment.py index 307f7568a2..329151f84c 100644 --- a/openstack/tests/unit/network/v2/test_segment.py +++ b/openstack/tests/unit/network/v2/test_segment.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import segment +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -26,18 +26,16 @@ } -class TestSegment(testtools.TestCase): - +class TestSegment(base.TestCase): def test_basic(self): sot = segment.Segment() self.assertEqual('segment', sot.resource_key) self.assertEqual('segments', sot.resources_key) self.assertEqual('/segments', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/network/v2/test_service_profile.py b/openstack/tests/unit/network/v2/test_service_profile.py index 232cab5d9b..f213c17f1e 100644 --- a/openstack/tests/unit/network/v2/test_service_profile.py +++ b/openstack/tests/unit/network/v2/test_service_profile.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import service_profile +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE_WITH_OPTIONAL = { @@ -20,7 +20,7 @@ 'driver': 'neutron_lbaas.drivers.octavia.driver.OctaviaDriver', 'enabled': True, 'metainfo': {'foo': 'bar'}, - 'tenant_id': '5', + 'project_id': '5', } EXAMPLE = { @@ -28,15 +28,15 @@ } -class TestServiceProfile(testtools.TestCase): +class TestServiceProfile(base.TestCase): def test_basic(self): service_profiles = service_profile.ServiceProfile() self.assertEqual('service_profile', service_profiles.resource_key) self.assertEqual('service_profiles', service_profiles.resources_key) self.assertEqual('/service_profiles', service_profiles.base_path) self.assertTrue(service_profiles.allow_create) - self.assertTrue(service_profiles.allow_get) - self.assertTrue(service_profiles.allow_update) + self.assertTrue(service_profiles.allow_fetch) + self.assertTrue(service_profiles.allow_commit) self.assertTrue(service_profiles.allow_delete) self.assertTrue(service_profiles.allow_list) @@ -46,14 +46,20 @@ def test_make_it(self): def test_make_it_with_optional(self): service_profiles = service_profile.ServiceProfile( - **EXAMPLE_WITH_OPTIONAL) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['description'], - service_profiles.description) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['driver'], - service_profiles.driver) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['enabled'], - service_profiles.is_enabled) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['metainfo'], - service_profiles.meta_info) - self.assertEqual(EXAMPLE_WITH_OPTIONAL['tenant_id'], - service_profiles.project_id) + **EXAMPLE_WITH_OPTIONAL + ) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['description'], service_profiles.description + ) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['driver'], service_profiles.driver + ) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['enabled'], service_profiles.is_enabled + ) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['metainfo'], service_profiles.meta_info + ) + self.assertEqual( + EXAMPLE_WITH_OPTIONAL['project_id'], service_profiles.project_id + ) diff --git a/openstack/tests/unit/network/v2/test_service_provider.py b/openstack/tests/unit/network/v2/test_service_provider.py index bc946a03df..aa90903a4a 100644 --- a/openstack/tests/unit/network/v2/test_service_provider.py +++ b/openstack/tests/unit/network/v2/test_service_provider.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import service_provider +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -22,17 +22,15 @@ } -class TestServiceProvider(testtools.TestCase): - +class TestServiceProvider(base.TestCase): def test_basic(self): sot = service_provider.ServiceProvider() self.assertEqual('service_providers', sot.resources_key) self.assertEqual('/service-providers', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/network/v2/test_sfc_flow_classifier.py b/openstack/tests/unit/network/v2/test_sfc_flow_classifier.py new file mode 100644 index 0000000000..31789b5ba8 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_sfc_flow_classifier.py @@ -0,0 +1,102 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import sfc_flow_classifier +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + "description": "", + "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "ethertype": "IPv4", + "protocol": 6, + "source_port_range_min": 22, + "source_port_range_max": 2000, + "destination_port_range_min": 80, + "destination_port_range_max": 80, + "source_ip_prefix": None, + "destination_ip_prefix": "22.12.34.45", + "logical_source_port": "uuid1", + "logical_destination_port": "uuid2", + "l7_parameters": None, + "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + "name": "flow_classifier", +} + + +class TestFlowClassifier(base.TestCase): + def test_basic(self): + sot = sfc_flow_classifier.SfcFlowClassifier() + self.assertEqual('flow_classifier', sot.resource_key) + self.assertEqual('flow_classifiers', sot.resources_key) + self.assertEqual('/sfc/flow_classifiers', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = sfc_flow_classifier.SfcFlowClassifier(**EXAMPLE) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['protocol'], sot.protocol) + self.assertEqual(EXAMPLE['ethertype'], sot.ethertype) + self.assertEqual( + EXAMPLE['source_port_range_min'], sot.source_port_range_min + ) + self.assertEqual( + EXAMPLE['source_port_range_max'], sot.source_port_range_max + ) + self.assertEqual( + EXAMPLE['destination_port_range_min'], + sot.destination_port_range_min, + ) + self.assertEqual( + EXAMPLE['destination_port_range_max'], + sot.destination_port_range_max, + ) + self.assertEqual(EXAMPLE['source_ip_prefix'], sot.source_ip_prefix) + self.assertEqual( + EXAMPLE['destination_ip_prefix'], sot.destination_ip_prefix + ) + self.assertEqual( + EXAMPLE['logical_source_port'], sot.logical_source_port + ) + self.assertEqual( + EXAMPLE['logical_destination_port'], sot.logical_destination_port + ) + self.assertEqual(EXAMPLE['l7_parameters'], sot.l7_parameters) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + 'description': 'description', + 'name': 'name', + 'project_id': 'project_id', + 'tenant_id': 'tenant_id', + 'ethertype': 'ethertype', + 'protocol': 'protocol', + 'source_port_range_min': 'source_port_range_min', + 'source_port_range_max': 'source_port_range_max', + 'destination_port_range_min': 'destination_port_range_min', + 'destination_port_range_max': 'destination_port_range_max', + 'logical_source_port': 'logical_source_port', + 'logical_destination_port': 'logical_destination_port', + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/network/v2/test_sfc_port_chain.py b/openstack/tests/unit/network/v2/test_sfc_port_chain.py new file mode 100644 index 0000000000..fd3ec865a7 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_sfc_port_chain.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import sfc_port_chain +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + "description": "", + "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "port_pair_groups": ["p_group1", "p_group2"], + "flow_classifiers": ["f_classifier1", "f_classifier_2"], + "chain_parameters": {"correlation": "mpls", "symmetric": True}, + "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + "name": "peers", +} + + +class TestPortChain(base.TestCase): + def test_basic(self): + sot = sfc_port_chain.SfcPortChain() + self.assertEqual('port_chain', sot.resource_key) + self.assertEqual('port_chains', sot.resources_key) + self.assertEqual('/sfc/port_chains', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = sfc_port_chain.SfcPortChain(**EXAMPLE) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['port_pair_groups'], sot.port_pair_groups) + self.assertEqual(EXAMPLE['flow_classifiers'], sot.flow_classifiers) + self.assertEqual(EXAMPLE['chain_parameters'], sot.chain_parameters) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + 'description': 'description', + 'name': 'name', + 'project_id': 'project_id', + 'tenant_id': 'tenant_id', + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/network/v2/test_sfc_port_pair.py b/openstack/tests/unit/network/v2/test_sfc_port_pair.py new file mode 100644 index 0000000000..ea8257f646 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_sfc_port_pair.py @@ -0,0 +1,67 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import sfc_port_pair +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + "description": "", + "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "egress": "d294f042-1736-11ee-821a-7f8301c71f83", + "ingress": "d9908eba-1736-11ee-b77f-1fcc4c520068", + "service_function_parameters": {"correlation": "mpls", "weigjt": 101}, + "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + "name": "port_pair_1", +} + + +class TestSfcPortPair(base.TestCase): + def test_basic(self): + sot = sfc_port_pair.SfcPortPair() + self.assertEqual('port_pair', sot.resource_key) + self.assertEqual('port_pairs', sot.resources_key) + self.assertEqual('/sfc/port_pairs', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = sfc_port_pair.SfcPortPair(**EXAMPLE) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['egress'], sot.egress) + self.assertEqual(EXAMPLE['ingress'], sot.ingress) + self.assertEqual( + EXAMPLE['service_function_parameters'], + sot.service_function_parameters, + ) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + 'description': 'description', + 'name': 'name', + 'project_id': 'project_id', + 'tenant_id': 'tenant_id', + 'ingress': 'ingress', + 'egress': 'egress', + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/network/v2/test_sfc_port_pair_group.py b/openstack/tests/unit/network/v2/test_sfc_port_pair_group.py new file mode 100644 index 0000000000..2dc1df6c58 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_sfc_port_pair_group.py @@ -0,0 +1,63 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import sfc_port_pair_group +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + "description": "", + "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "port_pairs": ["8d57819a-174d-11ee-97b0-2f370d29c014"], + "port_pair_group_parameters": {}, + "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + "name": "port_pair_gr", +} + + +class TestSfcPortPairGroup(base.TestCase): + def test_basic(self): + sot = sfc_port_pair_group.SfcPortPairGroup() + self.assertEqual('port_pair_group', sot.resource_key) + self.assertEqual('port_pair_groups', sot.resources_key) + self.assertEqual('/sfc/port_pair_groups', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = sfc_port_pair_group.SfcPortPairGroup(**EXAMPLE) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['port_pairs'], sot.port_pairs) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual( + EXAMPLE['port_pair_group_parameters'], + sot.port_pair_group_parameters, + ) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + 'description': 'description', + 'name': 'name', + 'project_id': 'project_id', + 'tenant_id': 'tenant_id', + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/network/v2/test_sfc_service_graph.py b/openstack/tests/unit/network/v2/test_sfc_service_graph.py new file mode 100644 index 0000000000..3eefb54ec7 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_sfc_service_graph.py @@ -0,0 +1,65 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import sfc_service_graph +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + "description": "", + "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "port_chains": { + "0e6b9678-19aa-11ee-97ae-a3cec2c2ac72": [ + "1e19c266-19aa-11ee-8e02-6fa0c9a9832d" + ], + "2a394dc8-19aa-11ee-b87e-7f24d71926f1": [ + "3299fcf6-19aa-11ee-9398-3f8c68c11209" + ], + }, + "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + "name": "service_graph", +} + + +class TestSfcServiceGraph(base.TestCase): + def test_basic(self): + sot = sfc_service_graph.SfcServiceGraph() + self.assertEqual('service_graph', sot.resource_key) + self.assertEqual('service_graphs', sot.resources_key) + self.assertEqual('/sfc/service_graphs', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = sfc_service_graph.SfcServiceGraph(**EXAMPLE) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['port_chains'], sot.port_chains) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + 'description': 'description', + 'name': 'name', + 'project_id': 'project_id', + 'tenant_id': 'tenant_id', + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/network/v2/test_subnet.py b/openstack/tests/unit/network/v2/test_subnet.py index f315d4a532..ae4bbf9778 100644 --- a/openstack/tests/unit/network/v2/test_subnet.py +++ b/openstack/tests/unit/network/v2/test_subnet.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import subnet +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -21,6 +21,7 @@ 'created_at': '3', 'description': '4', 'dns_nameservers': ['5'], + 'dns_publish_fixed_ip': True, 'enable_dhcp': True, 'gateway_ip': '6', 'host_routes': ['7'], @@ -34,22 +35,21 @@ 'segment_id': '14', 'service_types': ['15'], 'subnetpool_id': '16', - 'tenant_id': '17', + 'project_id': '17', 'updated_at': '18', + 'use_default_subnetpool': True, } -class TestSubnet(testtools.TestCase): - +class TestSubnet(base.TestCase): def test_basic(self): sot = subnet.Subnet() self.assertEqual('subnet', sot.resource_key) self.assertEqual('subnets', sot.resources_key) self.assertEqual('/subnets', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -60,6 +60,7 @@ def test_make_it(self): self.assertEqual(EXAMPLE['created_at'], sot.created_at) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['dns_nameservers'], sot.dns_nameservers) + self.assertTrue(sot.dns_publish_fixed_ip) self.assertTrue(sot.is_dhcp_enabled) self.assertEqual(EXAMPLE['gateway_ip'], sot.gateway_ip) self.assertEqual(EXAMPLE['host_routes'], sot.host_routes) @@ -73,5 +74,6 @@ def test_make_it(self): self.assertEqual(EXAMPLE['segment_id'], sot.segment_id) self.assertEqual(EXAMPLE['service_types'], sot.service_types) self.assertEqual(EXAMPLE['subnetpool_id'], sot.subnet_pool_id) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) + self.assertTrue(sot.use_default_subnet_pool) diff --git a/openstack/tests/unit/network/v2/test_subnet_pool.py b/openstack/tests/unit/network/v2/test_subnet_pool.py index 908a037814..8d04f500e6 100644 --- a/openstack/tests/unit/network/v2/test_subnet_pool.py +++ b/openstack/tests/unit/network/v2/test_subnet_pool.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import subnet_pool +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -30,22 +30,20 @@ 'prefixes': ['10', '11'], 'revision_number': 12, 'shared': True, - 'tenant_id': '13', + 'project_id': '13', 'updated_at': '14', } -class TestSubnetpool(testtools.TestCase): - +class TestSubnetpool(base.TestCase): def test_basic(self): sot = subnet_pool.SubnetPool() self.assertEqual('subnetpool', sot.resource_key) self.assertEqual('subnetpools', sot.resources_key) self.assertEqual('/subnetpools', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -53,8 +51,9 @@ def test_make_it(self): sot = subnet_pool.SubnetPool(**EXAMPLE) self.assertEqual(EXAMPLE['address_scope_id'], sot.address_scope_id) self.assertEqual(EXAMPLE['created_at'], sot.created_at) - self.assertEqual(EXAMPLE['default_prefixlen'], - sot.default_prefix_length) + self.assertEqual( + EXAMPLE['default_prefixlen'], sot.default_prefix_length + ) self.assertEqual(EXAMPLE['default_quota'], sot.default_quota) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['id'], sot.id) @@ -66,5 +65,5 @@ def test_make_it(self): self.assertEqual(EXAMPLE['prefixes'], sot.prefixes) self.assertEqual(EXAMPLE['revision_number'], sot.revision_number) self.assertTrue(sot.is_shared) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) self.assertEqual(EXAMPLE['updated_at'], sot.updated_at) diff --git a/openstack/tests/unit/network/v2/test_tap_flow.py b/openstack/tests/unit/network/v2/test_tap_flow.py new file mode 100644 index 0000000000..1eb382a6d7 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_tap_flow.py @@ -0,0 +1,57 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import tap_flow +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'name': 'my_tap_flow', + 'source_port': '1234', + 'tap_service_id': '4321', + 'id': IDENTIFIER, + 'project_id': '42', +} + + +class TestTapFlow(base.TestCase): + def test_basic(self): + sot = tap_flow.TapFlow() + self.assertEqual('tap_flow', sot.resource_key) + self.assertEqual('tap_flows', sot.resources_key) + self.assertEqual('/taas/tap_flows', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = tap_flow.TapFlow(**EXAMPLE) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['source_port'], sot.source_port) + self.assertEqual(EXAMPLE['tap_service_id'], sot.tap_service_id) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'name': 'name', + 'project_id': 'project_id', + 'sort_key': 'sort_key', + 'sort_dir': 'sort_dir', + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/network/v2/test_tap_mirror.py b/openstack/tests/unit/network/v2/test_tap_mirror.py new file mode 100644 index 0000000000..f7bd92edcc --- /dev/null +++ b/openstack/tests/unit/network/v2/test_tap_mirror.py @@ -0,0 +1,62 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import tap_mirror +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +PORT_ID = 'PORT_ID' +EXAMPLE = { + 'name': 'my_tap_mirror', + 'port_id': PORT_ID, + 'directions': {'IN': 99}, + 'remote_ip': '193.10.10.1', + 'mirror_type': 'erspanv1', + 'id': IDENTIFIER, + 'project_id': '42', +} + + +class TestTapMirror(base.TestCase): + def test_basic(self): + sot = tap_mirror.TapMirror() + self.assertEqual('tap_mirror', sot.resource_key) + self.assertEqual('tap_mirrors', sot.resources_key) + self.assertEqual('/taas/tap_mirrors', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = tap_mirror.TapMirror(**EXAMPLE) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['port_id'], sot.port_id) + self.assertEqual(EXAMPLE['directions'], sot.directions) + self.assertEqual(EXAMPLE['remote_ip'], sot.remote_ip) + self.assertEqual(EXAMPLE['mirror_type'], sot.mirror_type) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'name': 'name', + 'project_id': 'project_id', + 'sort_key': 'sort_key', + 'sort_dir': 'sort_dir', + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/network/v2/test_tap_service.py b/openstack/tests/unit/network/v2/test_tap_service.py new file mode 100644 index 0000000000..b20da6f247 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_tap_service.py @@ -0,0 +1,55 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import tap_service +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'name': 'my_tap_service', + 'port_id': '1234', + 'id': IDENTIFIER, + 'project_id': '42', +} + + +class TestTapService(base.TestCase): + def test_basic(self): + sot = tap_service.TapService() + self.assertEqual('tap_service', sot.resource_key) + self.assertEqual('tap_services', sot.resources_key) + self.assertEqual('/taas/tap_services', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = tap_service.TapService(**EXAMPLE) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['port_id'], sot.port_id) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'name': 'name', + 'project_id': 'project_id', + 'sort_key': 'sort_key', + 'sort_dir': 'sort_dir', + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/network/v2/test_trunk.py b/openstack/tests/unit/network/v2/test_trunk.py new file mode 100644 index 0000000000..9c83b50973 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_trunk.py @@ -0,0 +1,104 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +import testtools + +from openstack import exceptions +from openstack.network.v2 import trunk +from openstack.tests.unit import base + +EXAMPLE = { + 'id': 'IDENTIFIER', + 'description': 'Trunk description', + 'name': 'trunk-name', + 'project_id': '2', + 'admin_state_up': True, + 'port_id': 'fake_port_id', + 'status': 'ACTIVE', + 'sub_ports': [ + { + 'port_id': 'subport_port_id', + 'segmentation_id': 1234, + 'segmentation_type': 'vlan', + } + ], +} + + +class TestTrunk(base.TestCase): + def test_basic(self): + sot = trunk.Trunk() + self.assertEqual('trunk', sot.resource_key) + self.assertEqual('trunks', sot.resources_key) + self.assertEqual('/trunks', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = trunk.Trunk(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['admin_state_up'], sot.is_admin_state_up) + self.assertEqual(EXAMPLE['port_id'], sot.port_id) + self.assertEqual(EXAMPLE['status'], sot.status) + self.assertEqual(EXAMPLE['sub_ports'], sot.sub_ports) + + def test_add_subports_4xx(self): + # Neutron may return 4xx for example if a port does not exist + sot = trunk.Trunk(**EXAMPLE) + response = mock.Mock() + msg = '.*borked' + response.body = {'NeutronError': {'message': msg}} + response.json = mock.Mock(return_value=response.body) + response.ok = False + response.status_code = 404 + response.headers = {'content-type': 'application/json'} + sess = mock.Mock() + sess.put = mock.Mock(return_value=response) + subports = [ + { + 'port_id': 'abc', + 'segmentation_id': '123', + 'segmentation_type': 'vlan', + } + ] + with testtools.ExpectedException(exceptions.NotFoundException, msg): + sot.add_subports(sess, subports) + + def test_delete_subports_4xx(self): + # Neutron may return 4xx for example if a port does not exist + sot = trunk.Trunk(**EXAMPLE) + response = mock.Mock() + msg = '.*borked' + response.body = {'NeutronError': {'message': msg}} + response.json = mock.Mock(return_value=response.body) + response.ok = False + response.status_code = 404 + response.headers = {'content-type': 'application/json'} + sess = mock.Mock() + sess.put = mock.Mock(return_value=response) + subports = [ + { + 'port_id': 'abc', + 'segmentation_id': '123', + 'segmentation_type': 'vlan', + } + ] + with testtools.ExpectedException(exceptions.NotFoundException, msg): + sot.delete_subports(sess, subports) diff --git a/openstack/tests/unit/network/v2/test_vpn_endpoint_group.py b/openstack/tests/unit/network/v2/test_vpn_endpoint_group.py new file mode 100644 index 0000000000..f5a658810e --- /dev/null +++ b/openstack/tests/unit/network/v2/test_vpn_endpoint_group.py @@ -0,0 +1,61 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import vpn_endpoint_group +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + "description": "", + "project_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "tenant_id": "4ad57e7ce0b24fca8f12b9834d91079d", + "endpoints": ["10.2.0.0/24", "10.3.0.0/24"], + "type": "cidr", + "id": "6ecd9cf3-ca64-46c7-863f-f2eb1b9e838a", + "name": "peers", +} + + +class TestVpnEndpointGroup(base.TestCase): + def test_basic(self): + sot = vpn_endpoint_group.VpnEndpointGroup() + self.assertEqual('endpoint_group', sot.resource_key) + self.assertEqual('endpoint_groups', sot.resources_key) + self.assertEqual('/vpn/endpoint-groups', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = vpn_endpoint_group.VpnEndpointGroup(**EXAMPLE) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['endpoints'], sot.endpoints) + self.assertEqual(EXAMPLE['type'], sot.type) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + 'description': 'description', + 'name': 'name', + 'project_id': 'project_id', + 'tenant_id': 'tenant_id', + 'type': 'endpoint_type', + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/network/v2/test_vpn_ikepolicy.py b/openstack/tests/unit/network/v2/test_vpn_ikepolicy.py new file mode 100644 index 0000000000..a521644781 --- /dev/null +++ b/openstack/tests/unit/network/v2/test_vpn_ikepolicy.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import vpn_ike_policy +from openstack.tests.unit import base + + +EXAMPLE = { + "auth_algorithm": "1", + "description": "2", + "encryption_algorithm": "3", + "ike_version": "4", + "lifetime": {'a': 5}, + "name": "5", + "pfs": "6", + "project_id": "7", + "phase1_negotiation_mode": "8", + "units": "9", + "value": 10, +} + + +class TestVpnIkePolicy(base.TestCase): + def test_basic(self): + sot = vpn_ike_policy.VpnIkePolicy() + self.assertEqual('ikepolicy', sot.resource_key) + self.assertEqual('ikepolicies', sot.resources_key) + self.assertEqual('/vpn/ikepolicies', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = vpn_ike_policy.VpnIkePolicy(**EXAMPLE) + self.assertEqual(EXAMPLE['auth_algorithm'], sot.auth_algorithm) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual( + EXAMPLE['encryption_algorithm'], sot.encryption_algorithm + ) + self.assertEqual(EXAMPLE['ike_version'], sot.ike_version) + self.assertEqual(EXAMPLE['lifetime'], sot.lifetime) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['pfs'], sot.pfs) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual( + EXAMPLE['phase1_negotiation_mode'], sot.phase1_negotiation_mode + ) + self.assertEqual(EXAMPLE['units'], sot.units) + self.assertEqual(EXAMPLE['value'], sot.value) diff --git a/openstack/tests/unit/network/v2/test_vpn_ipsec_site_connection.py b/openstack/tests/unit/network/v2/test_vpn_ipsec_site_connection.py new file mode 100644 index 0000000000..459f9e5f0a --- /dev/null +++ b/openstack/tests/unit/network/v2/test_vpn_ipsec_site_connection.py @@ -0,0 +1,79 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import vpn_ipsec_site_connection +from openstack.tests.unit import base + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + "admin_state_up": True, + "auth_mode": "1", + "ikepolicy_id": "2", + "vpnservice_id": "3", + "local_ep_group_id": "4", + "peer_address": "5", + "route_mode": "6", + "ipsecpolicy_id": "7", + "peer_id": "8", + "psk": "9", + "description": "10", + "initiator": "11", + "peer_cidrs": ['1', '2'], + "name": "12", + "tenant_id": "13", + "interval": 5, + "mtu": 5, + "peer_ep_group_id": "14", + "dpd": {'a': 5}, + "timeout": 16, + "action": "17", + "local_id": "18", +} + + +class TestVpnIPSecSiteConnection(base.TestCase): + def test_basic(self): + sot = vpn_ipsec_site_connection.VpnIPSecSiteConnection() + self.assertEqual('ipsec_site_connection', sot.resource_key) + self.assertEqual('ipsec_site_connections', sot.resources_key) + self.assertEqual('/vpn/ipsec-site-connections', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = vpn_ipsec_site_connection.VpnIPSecSiteConnection(**EXAMPLE) + self.assertTrue(sot.is_admin_state_up) + self.assertEqual(EXAMPLE['auth_mode'], sot.auth_mode) + self.assertEqual(EXAMPLE['ikepolicy_id'], sot.ikepolicy_id) + self.assertEqual(EXAMPLE['vpnservice_id'], sot.vpnservice_id) + self.assertEqual(EXAMPLE['local_ep_group_id'], sot.local_ep_group_id) + self.assertEqual(EXAMPLE['peer_address'], sot.peer_address) + self.assertEqual(EXAMPLE['route_mode'], sot.route_mode) + self.assertEqual(EXAMPLE['ipsecpolicy_id'], sot.ipsecpolicy_id) + self.assertEqual(EXAMPLE['peer_id'], sot.peer_id) + self.assertEqual(EXAMPLE['psk'], sot.psk) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['initiator'], sot.initiator) + self.assertEqual(EXAMPLE['peer_cidrs'], sot.peer_cidrs) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['interval'], sot.interval) + self.assertEqual(EXAMPLE['mtu'], sot.mtu) + self.assertEqual(EXAMPLE['peer_ep_group_id'], sot.peer_ep_group_id) + self.assertEqual(EXAMPLE['dpd'], sot.dpd) + self.assertEqual(EXAMPLE['timeout'], sot.timeout) + self.assertEqual(EXAMPLE['action'], sot.action) + self.assertEqual(EXAMPLE['local_id'], sot.local_id) diff --git a/openstack/tests/unit/network/v2/test_vpn_ipsecpolicy.py b/openstack/tests/unit/network/v2/test_vpn_ipsecpolicy.py new file mode 100644 index 0000000000..dc17cb3b9d --- /dev/null +++ b/openstack/tests/unit/network/v2/test_vpn_ipsecpolicy.py @@ -0,0 +1,75 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.network.v2 import vpn_ipsec_policy +from openstack.tests.unit import base + + +EXAMPLE = { + "auth_algorithm": "1", + "description": "2", + "encapsulation_mode": "tunnel", + "encryption_algorithm": "3", + "lifetime": {'a': 5}, + "name": "5", + "pfs": "6", + "project_id": "7", + "transform_protocol": "ESP", + "units": "9", + "value": 10, +} + + +class TestVpnIpsecPolicy(base.TestCase): + def test_basic(self): + sot = vpn_ipsec_policy.VpnIpsecPolicy() + self.assertEqual('ipsecpolicy', sot.resource_key) + self.assertEqual('ipsecpolicies', sot.resources_key) + self.assertEqual('/vpn/ipsecpolicies', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = vpn_ipsec_policy.VpnIpsecPolicy(**EXAMPLE) + self.assertEqual(EXAMPLE['auth_algorithm'], sot.auth_algorithm) + self.assertEqual(EXAMPLE['description'], sot.description) + self.assertEqual(EXAMPLE['encapsulation_mode'], sot.encapsulation_mode) + self.assertEqual( + EXAMPLE['encryption_algorithm'], sot.encryption_algorithm + ) + self.assertEqual(EXAMPLE['lifetime'], sot.lifetime) + self.assertEqual(EXAMPLE['name'], sot.name) + self.assertEqual(EXAMPLE['pfs'], sot.pfs) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + self.assertEqual(EXAMPLE['transform_protocol'], sot.transform_protocol) + self.assertEqual(EXAMPLE['units'], sot.units) + self.assertEqual(EXAMPLE['value'], sot.value) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + 'auth_algorithm': 'auth_algorithm', + 'description': 'description', + 'encapsulation_mode': 'encapsulation_mode', + 'encryption_algorithm': 'encryption_algorithm', + 'name': 'name', + 'pfs': 'pfs', + 'project_id': 'project_id', + 'phase1_negotiation_mode': 'phase1_negotiation_mode', + 'transform_protocol': 'transform_protocol', + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/network/v2/test_vpn_service.py b/openstack/tests/unit/network/v2/test_vpn_service.py index a5b54590c1..72b69d3f05 100644 --- a/openstack/tests/unit/network/v2/test_vpn_service.py +++ b/openstack/tests/unit/network/v2/test_vpn_service.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.network.v2 import vpn_service +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -25,26 +25,24 @@ "router_id": "5", "status": "6", "subnet_id": "7", - "tenant_id": "8", + "project_id": "8", } -class TestVPNService(testtools.TestCase): - +class TestVpnService(base.TestCase): def test_basic(self): - sot = vpn_service.VPNService() + sot = vpn_service.VpnService() self.assertEqual('vpnservice', sot.resource_key) self.assertEqual('vpnservices', sot.resources_key) self.assertEqual('/vpn/vpnservices', sot.base_path) - self.assertEqual('network', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): - sot = vpn_service.VPNService(**EXAMPLE) + sot = vpn_service.VpnService(**EXAMPLE) self.assertTrue(sot.is_admin_state_up) self.assertEqual(EXAMPLE['description'], sot.description) self.assertEqual(EXAMPLE['external_v4_ip'], sot.external_v4_ip) @@ -54,4 +52,21 @@ def test_make_it(self): self.assertEqual(EXAMPLE['router_id'], sot.router_id) self.assertEqual(EXAMPLE['status'], sot.status) self.assertEqual(EXAMPLE['subnet_id'], sot.subnet_id) - self.assertEqual(EXAMPLE['tenant_id'], sot.project_id) + self.assertEqual(EXAMPLE['project_id'], sot.project_id) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + 'description': 'description', + 'external_v4_ip': 'external_v4_ip', + 'external_v6_ip': 'external_v6_ip', + 'name': 'name', + 'router_id': 'router_id', + 'project_id': 'project_id', + 'tenant_id': 'tenant_id', + 'subnet_id': 'subnet_id', + 'is_admin_state_up': 'admin_state_up', + }, + sot._query_mapping._mapping, + ) diff --git a/openstack/tests/unit/object_store/test_object_store_service.py b/openstack/tests/unit/object_store/test_object_store_service.py deleted file mode 100644 index a2707a1340..0000000000 --- a/openstack/tests/unit/object_store/test_object_store_service.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.object_store import object_store_service - - -class TestObjectStoreService(testtools.TestCase): - - def test_service(self): - sot = object_store_service.ObjectStoreService() - self.assertEqual('object-store', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(1, len(sot.valid_versions)) - self.assertEqual('v1', sot.valid_versions[0].module) - self.assertEqual('v1', sot.valid_versions[0].path) diff --git a/openstack/tests/unit/object_store/v1/test_account.py b/openstack/tests/unit/object_store/v1/test_account.py index e0df5390a9..f8d88f5e5b 100644 --- a/openstack/tests/unit/object_store/v1/test_account.py +++ b/openstack/tests/unit/object_store/v1/test_account.py @@ -10,9 +10,8 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.object_store.v1 import account +from openstack.tests.unit import base CONTAINER_NAME = "mycontainer" @@ -25,32 +24,88 @@ 'x-account-container-count': '678', 'content-type': 'text/plain; charset=utf-8', 'x-account-object-count': '98765', - 'x-timestamp': '1453413555.88937' + 'x-timestamp': '1453413555.88937', } -class TestAccount(testtools.TestCase): +class TestAccount(base.TestCase): + def setUp(self): + super().setUp() + self.endpoint = self.cloud.object_store.get_endpoint() + '/' def test_basic(self): - sot = account.Account.new(**ACCOUNT_EXAMPLE) + sot = account.Account(**ACCOUNT_EXAMPLE) self.assertIsNone(sot.resources_key) self.assertIsNone(sot.id) self.assertEqual('/', sot.base_path) - self.assertEqual('object-store', sot.service.service_type) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_head) - self.assertTrue(sot.allow_retrieve) + self.assertTrue(sot.allow_fetch) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) self.assertFalse(sot.allow_create) def test_make_it(self): - sot = account.Account.new(**{'headers': ACCOUNT_EXAMPLE}) + sot = account.Account(**ACCOUNT_EXAMPLE) self.assertIsNone(sot.id) - self.assertEqual(int(ACCOUNT_EXAMPLE['x-account-bytes-used']), - sot.account_bytes_used) - self.assertEqual(int(ACCOUNT_EXAMPLE['x-account-container-count']), - sot.account_container_count) - self.assertEqual(int(ACCOUNT_EXAMPLE['x-account-object-count']), - sot.account_object_count) + self.assertEqual( + int(ACCOUNT_EXAMPLE['x-account-bytes-used']), + sot.account_bytes_used, + ) + self.assertEqual( + int(ACCOUNT_EXAMPLE['x-account-container-count']), + sot.account_container_count, + ) + self.assertEqual( + int(ACCOUNT_EXAMPLE['x-account-object-count']), + sot.account_object_count, + ) self.assertEqual(ACCOUNT_EXAMPLE['x-timestamp'], sot.timestamp) + + def test_set_temp_url_key(self): + sot = account.Account() + key = 'super-secure-key' + + self.register_uris( + [ + dict( + method='POST', + uri=self.endpoint, + status_code=204, + validate=dict( + headers={'x-account-meta-temp-url-key': key} + ), + ), + dict( + method='HEAD', + uri=self.endpoint, + headers={'x-account-meta-temp-url-key': key}, + ), + ] + ) + sot.set_temp_url_key(self.cloud.object_store, key) + self.assert_calls() + + def test_set_account_temp_url_key_second(self): + sot = account.Account() + key = 'super-secure-key' + + self.register_uris( + [ + dict( + method='POST', + uri=self.endpoint, + status_code=204, + validate=dict( + headers={'x-account-meta-temp-url-key-2': key} + ), + ), + dict( + method='HEAD', + uri=self.endpoint, + headers={'x-account-meta-temp-url-key-2': key}, + ), + ] + ) + sot.set_temp_url_key(self.cloud.object_store, key, secondary=True) + self.assert_calls() diff --git a/openstack/tests/unit/object_store/v1/test_container.py b/openstack/tests/unit/object_store/v1/test_container.py index 587c7e470a..7908ff7fe4 100644 --- a/openstack/tests/unit/object_store/v1/test_container.py +++ b/openstack/tests/unit/object_store/v1/test_container.py @@ -10,125 +10,124 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +import json from openstack.object_store.v1 import container +from openstack.tests.unit import base -CONTAINER_NAME = "mycontainer" - -CONT_EXAMPLE = { - "count": 999, - "bytes": 12345, - "name": CONTAINER_NAME -} - -HEAD_EXAMPLE = { - 'content-length': '346', - 'x-container-object-count': '2', - 'accept-ranges': 'bytes', - 'id': 'tx1878fdc50f9b4978a3fdc-0053c31462', - 'date': 'Sun, 13 Jul 2014 23:21:06 GMT', - 'x-container-read': 'read-settings', - 'x-container-write': 'write-settings', - 'x-container-sync-to': 'sync-to', - 'x-container-sync-key': 'sync-key', - 'x-container-bytes-used': '630666', - 'x-versions-location': 'versions-location', - 'content-type': 'application/json; charset=utf-8', - 'x-timestamp': '1453414055.48672' -} - -LIST_EXAMPLE = [ - { - "count": 999, - "bytes": 12345, - "name": "container1" - }, - { - "count": 888, - "bytes": 54321, - "name": "container2" - } -] - - -class TestContainer(testtools.TestCase): - +class TestContainer(base.TestCase): def setUp(self): - super(TestContainer, self).setUp() - self.resp = mock.Mock() - self.resp.body = {} - self.resp.json = mock.Mock(return_value=self.resp.body) - self.resp.headers = {"X-Trans-Id": "abcdef"} - self.sess = mock.Mock() - self.sess.put = mock.Mock(return_value=self.resp) - self.sess.post = mock.Mock(return_value=self.resp) + super().setUp() + self.container = self.getUniqueString() + self.endpoint = self.cloud.object_store.get_endpoint() + '/' + self.container_endpoint = f'{self.endpoint}{self.container}' + + self.body = { + "count": 2, + "bytes": 630666, + "name": self.container, + } + + self.headers = { + 'x-container-object-count': '2', + 'x-container-read': 'read-settings', + 'x-container-write': 'write-settings', + 'x-container-sync-to': 'sync-to', + 'x-container-sync-key': 'sync-key', + 'x-container-bytes-used': '630666', + 'x-versions-location': 'versions-location', + 'x-history-location': 'history-location', + 'content-type': 'application/json; charset=utf-8', + 'x-timestamp': '1453414055.48672', + 'x-storage-policy': 'Gold', + } + self.body_plus_headers = dict(self.body, **self.headers) def test_basic(self): - sot = container.Container.new(**CONT_EXAMPLE) + sot = container.Container.new(**self.body) self.assertIsNone(sot.resources_key) - self.assertEqual('name', sot.id_attribute) + self.assertEqual('name', sot._alternate_id()) self.assertEqual('/', sot.base_path) - self.assertEqual('object-store', sot.service.service_type) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_retrieve) + self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_head) + self.assert_no_calls() def test_make_it(self): - sot = container.Container.new(**CONT_EXAMPLE) - self.assertEqual(CONT_EXAMPLE['name'], sot.id) - self.assertEqual(CONT_EXAMPLE['name'], sot.name) - self.assertEqual(CONT_EXAMPLE['count'], sot.count) - self.assertEqual(CONT_EXAMPLE['bytes'], sot.bytes) + sot = container.Container.new(**self.body) + self.assertEqual(self.body['name'], sot.id) + self.assertEqual(self.body['name'], sot.name) + self.assertEqual(self.body['count'], sot.count) + self.assertEqual(self.body['count'], sot.object_count) + self.assertEqual(self.body['bytes'], sot.bytes) + self.assertEqual(self.body['bytes'], sot.bytes_used) + self.assert_no_calls() def test_create_and_head(self): - sot = container.Container(CONT_EXAMPLE) - - # Update container with HEAD data - sot._attrs.update({'headers': HEAD_EXAMPLE}) + sot = container.Container(**self.body_plus_headers) # Attributes from create - self.assertEqual(CONT_EXAMPLE['name'], sot.id) - self.assertEqual(CONT_EXAMPLE['name'], sot.name) - self.assertEqual(CONT_EXAMPLE['count'], sot.count) - self.assertEqual(CONT_EXAMPLE['bytes'], sot.bytes) + self.assertEqual(self.body_plus_headers['name'], sot.id) + self.assertEqual(self.body_plus_headers['name'], sot.name) + self.assertEqual(self.body_plus_headers['count'], sot.count) + self.assertEqual(self.body_plus_headers['bytes'], sot.bytes) # Attributes from header - self.assertEqual(int(HEAD_EXAMPLE['x-container-object-count']), - sot.object_count) - self.assertEqual(int(HEAD_EXAMPLE['x-container-bytes-used']), - sot.bytes_used) - self.assertEqual(HEAD_EXAMPLE['x-container-read'], - sot.read_ACL) - self.assertEqual(HEAD_EXAMPLE['x-container-write'], - sot.write_ACL) - self.assertEqual(HEAD_EXAMPLE['x-container-sync-to'], - sot.sync_to) - self.assertEqual(HEAD_EXAMPLE['x-container-sync-key'], - sot.sync_key) - self.assertEqual(HEAD_EXAMPLE['x-versions-location'], - sot.versions_location) - self.assertEqual(HEAD_EXAMPLE['x-timestamp'], sot.timestamp) - - @mock.patch("openstack.resource.Resource.list") - def test_list(self, fake_list): - fake_val = [container.Container.existing(**ex) for ex in LIST_EXAMPLE] - fake_list.return_value = fake_val - - # Since the list method is mocked out, just pass None for the session. - response = container.Container.list(None) - - self.assertEqual(len(LIST_EXAMPLE), len(response)) - for item in range(len(response)): - self.assertEqual(container.Container, type(response[item])) - self.assertEqual(LIST_EXAMPLE[item]["name"], response[item].name) - self.assertEqual(LIST_EXAMPLE[item]["count"], response[item].count) - self.assertEqual(LIST_EXAMPLE[item]["bytes"], response[item].bytes) + self.assertEqual( + int(self.body_plus_headers['x-container-object-count']), + sot.object_count, + ) + self.assertEqual( + int(self.body_plus_headers['x-container-bytes-used']), + sot.bytes_used, + ) + self.assertEqual( + self.body_plus_headers['x-container-read'], sot.read_ACL + ) + self.assertEqual( + self.body_plus_headers['x-container-write'], sot.write_ACL + ) + self.assertEqual( + self.body_plus_headers['x-container-sync-to'], sot.sync_to + ) + self.assertEqual( + self.body_plus_headers['x-container-sync-key'], sot.sync_key + ) + self.assertEqual( + self.body_plus_headers['x-versions-location'], + sot.versions_location, + ) + self.assertEqual( + self.body_plus_headers['x-history-location'], sot.history_location + ) + self.assertEqual(self.body_plus_headers['x-timestamp'], sot.timestamp) + self.assertEqual( + self.body_plus_headers['x-storage-policy'], sot.storage_policy + ) + + def test_list(self): + containers = [ + {"count": 999, "bytes": 12345, "name": "container1"}, + {"count": 888, "bytes": 54321, "name": "container2"}, + ] + self.register_uris( + [dict(method='GET', uri=self.endpoint, json=containers)] + ) + + response = container.Container.list(self.cloud.object_store) + + self.assertEqual(len(containers), len(list(response))) + for index, item in enumerate(response): + self.assertEqual(container.Container, type(item)) + self.assertEqual(containers[index]["name"], item.name) + self.assertEqual(containers[index]["count"], item.count) + self.assertEqual(containers[index]["bytes"], item.bytes) + + self.assert_calls() def _test_create_update(self, sot, sot_call, sess_method): sot.read_ACL = "some ACL" @@ -137,35 +136,142 @@ def _test_create_update(self, sot, sot_call, sess_method): headers = { "x-container-read": "some ACL", "x-container-write": "another ACL", - "x-detect-content-type": True, - "Accept": "", + "x-detect-content-type": 'True', + "X-Container-Meta-foo": "bar", } - sot_call(self.sess) + self.register_uris( + [ + dict( + method=sess_method, + uri=self.container_endpoint, + json=self.body, + validate=dict(headers=headers), + ), + ] + ) + sot_call(self.cloud.object_store) - url = "/%s" % CONTAINER_NAME - sess_method.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + self.assert_calls() def test_create(self): - sot = container.Container.new(name=CONTAINER_NAME) - self._test_create_update(sot, sot.create, self.sess.put) + sot = container.Container.new( + name=self.container, metadata={'foo': 'bar'} + ) + self._test_create_update(sot, sot.create, 'PUT') + + def test_commit(self): + sot = container.Container.new( + name=self.container, metadata={'foo': 'bar'} + ) + self._test_create_update(sot, sot.commit, 'POST') + + def test_to_dict_recursion(self): + # This test is verifying that circular aliases in a Resource + # do not cause infinite recursion. count is aliased to object_count + # and object_count is aliased to count. + sot = container.Container.new(name=self.container) + sot_dict = sot.to_dict() + self.assertIsNone(sot_dict['count']) + self.assertIsNone(sot_dict['object_count']) + self.assertEqual(sot_dict['id'], self.container) + self.assertEqual(sot_dict['name'], self.container) - def test_update(self): - sot = container.Container.new(name=CONTAINER_NAME) - self._test_create_update(sot, sot.update, self.sess.post) + def test_to_json(self): + sot = container.Container.new(name=self.container) + self.assertEqual( + { + 'bytes': None, + 'bytes_used': None, + 'content_type': None, + 'count': None, + 'id': self.container, + 'if_none_match': None, + 'is_content_type_detected': None, + 'is_newest': None, + 'location': None, + 'name': self.container, + 'object_count': None, + 'read_ACL': None, + 'sync_key': None, + 'sync_to': None, + 'meta_temp_url_key': None, + 'meta_temp_url_key_2': None, + 'timestamp': None, + 'versions_location': None, + 'history_location': None, + 'write_ACL': None, + 'storage_policy': None, + }, + json.loads(json.dumps(sot)), + ) def _test_no_headers(self, sot, sot_call, sess_method): - sot = container.Container.new(name=CONTAINER_NAME) - sot.create(self.sess) - url = "/%s" % CONTAINER_NAME - headers = {'Accept': ''} - self.sess.put.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) + headers = {} + self.register_uris( + [ + dict( + method=sess_method, + uri=self.container_endpoint, + validate=dict(headers=headers), + ) + ] + ) + sot_call(self.cloud.object_store) def test_create_no_headers(self): - sot = container.Container.new(name=CONTAINER_NAME) - self._test_no_headers(sot, sot.create, self.sess.put) + sot = container.Container.new(name=self.container) + self._test_no_headers(sot, sot.create, 'PUT') + self.assert_calls() + + def test_commit_no_headers(self): + sot = container.Container.new(name=self.container) + self._test_no_headers(sot, sot.commit, 'POST') + self.assert_calls() + + def test_set_temp_url_key(self): + sot = container.Container.new(name=self.container) + key = self.getUniqueString() + + self.register_uris( + [ + dict( + method='POST', + uri=self.container_endpoint, + status_code=204, + validate=dict( + headers={'x-container-meta-temp-url-key': key} + ), + ), + dict( + method='HEAD', + uri=self.container_endpoint, + headers={'x-container-meta-temp-url-key': key}, + ), + ] + ) + sot.set_temp_url_key(self.cloud.object_store, key) + self.assert_calls() + + def test_set_temp_url_key_second(self): + sot = container.Container.new(name=self.container) + key = self.getUniqueString() - def test_update_no_headers(self): - sot = container.Container.new(name=CONTAINER_NAME) - self._test_no_headers(sot, sot.update, self.sess.post) + self.register_uris( + [ + dict( + method='POST', + uri=self.container_endpoint, + status_code=204, + validate=dict( + headers={'x-container-meta-temp-url-key-2': key} + ), + ), + dict( + method='HEAD', + uri=self.container_endpoint, + headers={'x-container-meta-temp-url-key-2': key}, + ), + ] + ) + sot.set_temp_url_key(self.cloud.object_store, key, secondary=True) + self.assert_calls() diff --git a/openstack/tests/unit/object_store/v1/test_info.py b/openstack/tests/unit/object_store/v1/test_info.py new file mode 100644 index 0000000000..9551bed308 --- /dev/null +++ b/openstack/tests/unit/object_store/v1/test_info.py @@ -0,0 +1,44 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.object_store.v1 import info +from openstack.tests.unit import base + + +class TestInfo(base.TestCase): + def setUp(self): + super().setUp() + + def test_get_info_url(self): + sot = info.Info() + test_urls = { + 'http://object.cloud.example.com': 'http://object.cloud.example.com/info', + 'http://object.cloud.example.com/': 'http://object.cloud.example.com/info', + 'http://object.cloud.example.com/v1': 'http://object.cloud.example.com/info', + 'http://object.cloud.example.com/v1/': 'http://object.cloud.example.com/info', + 'http://object.cloud.example.com/swift': 'http://object.cloud.example.com/swift/info', + 'http://object.cloud.example.com/swift/': 'http://object.cloud.example.com/swift/info', + 'http://object.cloud.example.com/v1.0': 'http://object.cloud.example.com/info', + 'http://object.cloud.example.com/swift/v1.0': 'http://object.cloud.example.com/swift/info', + 'http://object.cloud.example.com/v111': 'http://object.cloud.example.com/info', + 'http://object.cloud.example.com/v111/test': 'http://object.cloud.example.com/info', + 'http://object.cloud.example.com/v1/test': 'http://object.cloud.example.com/info', + 'http://object.cloud.example.com/swift/v1.0/test': 'http://object.cloud.example.com/swift/info', + 'http://object.cloud.example.com/v1.0/test': 'http://object.cloud.example.com/info', + 'https://object.cloud.example.com/swift/v1/AUTH_%(tenant_id)s': 'https://object.cloud.example.com/swift/info', + 'https://object.cloud.example.com/swift/v1/AUTH_%(project_id)s': 'https://object.cloud.example.com/swift/info', + 'https://object.cloud.example.com/services/swift/v1/AUTH_%(project_id)s': 'https://object.cloud.example.com/services/swift/info', # noqa: E501 + 'https://object.cloud.example.com/services/swift/v1/AUTH_%(project_id)s/': 'https://object.cloud.example.com/services/swift/info', # noqa: E501 + 'https://object.cloud.example.com/info/v1/AUTH_%(project_id)s/': 'https://object.cloud.example.com/info/info', + } + for uri_k, uri_v in test_urls.items(): + self.assertEqual(sot._get_info_url(uri_k), uri_v) diff --git a/openstack/tests/unit/object_store/v1/test_obj.py b/openstack/tests/unit/object_store/v1/test_obj.py index 47a3b95ad4..de30ba1c5f 100644 --- a/openstack/tests/unit/object_store/v1/test_obj.py +++ b/openstack/tests/unit/object_store/v1/test_obj.py @@ -10,14 +10,8 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools - from openstack.object_store.v1 import obj - - -CONTAINER_NAME = "mycontainer" -OBJECT_NAME = "myobject" +from openstack.tests.unit.cloud import test_object as base_test_object # Object can receive both last-modified in headers and last_modified in # the body. However, originally, only last-modified was handled as an @@ -30,108 +24,162 @@ # attribute which would follow the same pattern. # This example should represent the body values returned by a GET, so the keys # must be underscores. -OBJ_EXAMPLE = { - "hash": "243f87b91224d85722564a80fd3cb1f1", - "last_modified": "2014-07-13T18:41:03.319240", - "bytes": 252466, - "name": OBJECT_NAME, - "content_type": "application/octet-stream" -} - -DICT_EXAMPLE = { - 'container': CONTAINER_NAME, - 'name': OBJECT_NAME, - 'content_type': 'application/octet-stream', - 'headers': { - 'content-length': '252466', - 'accept-ranges': 'bytes', - 'last-modified': 'Sun, 13 Jul 2014 18:41:04 GMT', - 'etag': '243f87b91224d85722564a80fd3cb1f1', - 'x-timestamp': '1453414256.28112', - 'date': 'Thu, 28 Aug 2014 14:41:59 GMT', - 'id': 'tx5fb5ad4f4d0846c6b2bc7-0053ff3fb7', - 'x-delete-at': '1453416226.16744' - } -} - - -class TestObject(testtools.TestCase): + +class TestObject(base_test_object.BaseTestObject): def setUp(self): - super(TestObject, self).setUp() - self.resp = mock.Mock() - self.resp.content = "lol here's some content" - self.resp.headers = {"X-Trans-Id": "abcdef"} - self.sess = mock.Mock() - self.sess.get = mock.Mock(return_value=self.resp) - self.sess.put = mock.Mock(return_value=self.resp) - self.sess.post = mock.Mock(return_value=self.resp) + super().setUp() + self.the_data = b'test body' + self.the_data_length = len(self.the_data) + # TODO(mordred) Make the_data be from getUniqueString and then + # have hash and etag be actual md5 sums of that string + self.body = { + "hash": "243f87b91224d85722564a80fd3cb1f1", + "last_modified": "2014-07-13T18:41:03.319240", + "bytes": self.the_data_length, + "name": self.object, + "content_type": "application/octet-stream", + } + self.headers = { + 'Content-Length': str(len(self.the_data)), + 'Content-Type': 'application/octet-stream', + 'Accept-Ranges': 'bytes', + 'Last-Modified': 'Thu, 15 Dec 2016 13:34:14 GMT', + 'Etag': '"b5c454b44fbd5344793e3fb7e3850768"', + 'X-Timestamp': '1481808853.65009', + 'X-Trans-Id': 'tx68c2a2278f0c469bb6de1-005857ed80dfw1', + 'Date': 'Mon, 19 Dec 2016 14:24:00 GMT', + 'X-Static-Large-Object': 'True', + 'X-Object-Meta-Mtime': '1481513709.168512', + 'X-Delete-At': '1453416226.16744', + } def test_basic(self): - sot = obj.Object.new(**OBJ_EXAMPLE) + sot = obj.Object.new(**self.body) + self.assert_no_calls() self.assertIsNone(sot.resources_key) - self.assertEqual("name", sot.id_attribute) + self.assertEqual('name', sot._alternate_id()) self.assertEqual('/%(container)s', sot.base_path) - self.assertEqual('object-store', sot.service.service_type) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_retrieve) + self.assertTrue(sot.allow_fetch) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) self.assertTrue(sot.allow_head) + self.assertDictEqual( + { + 'filename': 'filename', + 'format': 'format', + 'limit': 'limit', + 'marker': 'marker', + 'multipart_manifest': 'multipart-manifest', + 'prefix': 'prefix', + 'symlink': 'symlink', + 'temp_url_expires': 'temp_url_expires', + 'temp_url_sig': 'temp_url_sig', + }, + sot._query_mapping._mapping, + ) + def test_new(self): - sot = obj.Object.new(container=CONTAINER_NAME, name=OBJECT_NAME) - self.assertEqual(OBJECT_NAME, sot.name) - self.assertEqual(CONTAINER_NAME, sot.container) + sot = obj.Object.new(container=self.container, name=self.object) + self.assert_no_calls() + self.assertEqual(self.object, sot.name) + self.assertEqual(self.container, sot.container) - def test_head(self): - sot = obj.Object.existing(**DICT_EXAMPLE) + def test_from_body(self): + sot = obj.Object.existing(container=self.container, **self.body) + self.assert_no_calls() # Attributes from header - self.assertEqual(DICT_EXAMPLE['container'], sot.container) - headers = DICT_EXAMPLE['headers'] - self.assertEqual(headers['content-length'], sot.content_length) - self.assertEqual(headers['accept-ranges'], sot.accept_ranges) - self.assertEqual(headers['last-modified'], sot.last_modified_at) - self.assertEqual(headers['etag'], sot.etag) - self.assertEqual(headers['x-timestamp'], sot.timestamp) - self.assertEqual(headers['content-type'], sot.content_type) - self.assertEqual(headers['x-delete-at'], sot.delete_at) - - def test_get(self): - sot = obj.Object.new(container=CONTAINER_NAME, name=OBJECT_NAME) - sot.is_newest = True - sot.if_match = {"who": "what"} - - rv = sot.get(self.sess) - - url = "%s/%s" % (CONTAINER_NAME, OBJECT_NAME) - # TODO(thowe): Should allow filtering bug #1488269 - # headers = { - # "x-newest": True, - # "if-match": {"who": "what"} - # } - headers = {'Accept': 'bytes'} - self.sess.get.assert_called_with(url, endpoint_filter=sot.service, - headers=headers) - self.assertEqual(self.resp.content, rv) - - def _test_create(self, method, data, accept): - sot = obj.Object.new(container=CONTAINER_NAME, name=OBJECT_NAME, - data=data) + self.assertEqual(self.container, sot.container) + self.assertEqual(int(self.body['bytes']), sot.content_length) + self.assertEqual(self.body['last_modified'], sot.last_modified_at) + self.assertEqual(self.body['hash'], sot.etag) + self.assertEqual(self.body['content_type'], sot.content_type) + + def test_from_headers(self): + sot = obj.Object.existing(container=self.container, **self.headers) + self.assert_no_calls() + + # Attributes from header + self.assertEqual(self.container, sot.container) + self.assertEqual( + int(self.headers['Content-Length']), sot.content_length + ) + self.assertEqual(self.headers['Accept-Ranges'], sot.accept_ranges) + self.assertEqual(self.headers['Last-Modified'], sot.last_modified_at) + self.assertEqual(self.headers['Etag'], sot.etag) + self.assertEqual(self.headers['X-Timestamp'], sot.timestamp) + self.assertEqual(self.headers['Content-Type'], sot.content_type) + self.assertEqual(self.headers['X-Delete-At'], sot.delete_at) + + # Verify that we also properly process lowcased headers + # All headers are processed in _base._set_metadata therefore invoke it + # here directly + sot._set_metadata(headers={"x-object-meta-foo": "bar"}) + self.assert_no_calls() + + # Attributes from header + self.assertEqual("bar", sot.metadata["foo"]) + + def test_download(self): + headers = { + 'X-Newest': 'True', + 'If-Match': self.headers['Etag'], + 'Accept': '*/*', + } + self.register_uris( + [ + dict( + method='GET', + uri=self.object_endpoint, + headers=self.headers, + content=self.the_data, + validate=dict(headers=headers), + ) + ] + ) + sot = obj.Object.new(container=self.container, name=self.object) sot.is_newest = True - headers = {"x-newest": True, "Accept": ""} + # if_match is a list type, but we're passing a string. This tests + # the up-conversion works properly. + sot.if_match = self.headers['Etag'] - rv = sot.create(self.sess) + rv = sot.download(self.cloud.object_store) - url = "%s/%s" % (CONTAINER_NAME, OBJECT_NAME) - method.assert_called_with(url, endpoint_filter=sot.service, data=data, - headers=headers) - self.assertEqual(self.resp.headers, rv.get_headers()) + self.assertEqual(self.the_data, rv) + + self.assert_calls() + + def _test_create(self, method, data): + sot = obj.Object.new( + container=self.container, + name=self.object, + data=data, + metadata={'foo': 'bar'}, + ) + sot.is_newest = True + sent_headers = {"x-newest": 'True', "X-Object-Meta-foo": "bar"} + self.register_uris( + [ + dict( + method=method, + uri=self.object_endpoint, + headers=self.headers, + validate=dict(headers=sent_headers), + ) + ] + ) + + rv = sot.create(self.cloud.object_store) + self.assertEqual(rv.etag, self.headers['Etag']) + + self.assert_calls() def test_create_data(self): - self._test_create(self.sess.put, "data", "bytes") + self._test_create('PUT', self.the_data) def test_create_no_data(self): - self._test_create(self.sess.post, None, None) + self._test_create('PUT', None) diff --git a/openstack/tests/unit/object_store/v1/test_proxy.py b/openstack/tests/unit/object_store/v1/test_proxy.py index 5fa85c76bc..58797971dd 100644 --- a/openstack/tests/unit/object_store/v1/test_proxy.py +++ b/openstack/tests/unit/object_store/v1/test_proxy.py @@ -10,55 +10,98 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import six +from hashlib import sha1 +import random +import string +import tempfile +import time +from unittest import mock + +import requests_mock +from testscenarios import load_tests_apply_scenarios as load_tests # noqa -from openstack.object_store.v1 import _proxy from openstack.object_store.v1 import account from openstack.object_store.v1 import container from openstack.object_store.v1 import obj +from openstack.tests.unit.cloud import test_object as base_test_object from openstack.tests.unit import test_proxy_base +class FakeResponse: + def __init__(self, response, status_code=200, headers=None): + self.body = response + self.status_code = status_code + self.headers = headers if headers else {} + + def json(self): + return self.body + + class TestObjectStoreProxy(test_proxy_base.TestProxyBase): + kwargs_to_path_args = False def setUp(self): - super(TestObjectStoreProxy, self).setUp() - self.proxy = _proxy.Proxy(self.session) + super().setUp() + self.proxy = self.cloud.object_store + self.container = self.getUniqueString() + self.endpoint = self.cloud.object_store.get_endpoint() + '/' + self.container_endpoint = f'{self.endpoint}{self.container}' def test_account_metadata_get(self): - self.verify_head(self.proxy.get_account_metadata, account.Account) + self.verify_head( + self.proxy.get_account_metadata, account.Account, method_args=[] + ) def test_container_metadata_get(self): - self.verify_head(self.proxy.get_container_metadata, - container.Container, value="container") + self.verify_head( + self.proxy.get_container_metadata, + container.Container, + method_args=["container"], + ) def test_container_delete(self): - self.verify_delete(self.proxy.delete_container, - container.Container, False) + self.verify_delete( + self.proxy.delete_container, container.Container, False + ) def test_container_delete_ignore(self): - self.verify_delete(self.proxy.delete_container, - container.Container, True) + self.verify_delete( + self.proxy.delete_container, container.Container, True + ) def test_container_create_attrs(self): - self.verify_create(self.proxy.create_container, container.Container) + self.verify_create( + self.proxy.create_container, + container.Container, + method_args=['container_name'], + expected_args=[], + expected_kwargs={'name': 'container_name', "x": 1, "y": 2, "z": 3}, + ) def test_object_metadata_get(self): - self.verify_head(self.proxy.get_object_metadata, obj.Object, - value="object", container="container") + self._verify( + "openstack.proxy.Proxy._head", + self.proxy.get_object_metadata, + method_args=['object'], + method_kwargs={'container': 'container'}, + expected_args=[obj.Object, 'object'], + expected_kwargs={'container': 'container'}, + ) def _test_object_delete(self, ignore): - expected_kwargs = {"path_args": {"container": "name"}} - expected_kwargs["ignore_missing"] = ignore - - self._verify2("openstack.proxy.BaseProxy._delete", - self.proxy.delete_object, - method_args=["resource"], - method_kwargs={"container": "name", - "ignore_missing": ignore}, - expected_args=[obj.Object, "resource"], - expected_kwargs=expected_kwargs) + expected_kwargs = { + "ignore_missing": ignore, + "container": "name", + } + + self._verify( + "openstack.proxy.Proxy._delete", + self.proxy.delete_object, + method_args=["resource"], + method_kwargs=expected_kwargs, + expected_args=[obj.Object, "resource"], + expected_kwargs=expected_kwargs, + ) def test_object_delete(self): self._test_object_delete(False) @@ -67,211 +110,617 @@ def test_object_delete_ignore(self): self._test_object_delete(True) def test_object_create_attrs(self): - path_args = {"path_args": {"container": "name"}} - method_kwargs = {"name": "test", "data": "data", "container": "name"} - - expected_kwargs = path_args.copy() - expected_kwargs.update(method_kwargs) - expected_kwargs.pop("container") - - self._verify2("openstack.proxy.BaseProxy._create", - self.proxy.upload_object, - method_kwargs=method_kwargs, - expected_args=[obj.Object], - expected_kwargs=expected_kwargs) + kwargs = { + "name": "test", + "data": "data", + "container": "name", + "metadata": {}, + } + + self._verify( + "openstack.proxy.Proxy._create", + self.proxy.upload_object, + method_kwargs=kwargs, + expected_args=[obj.Object], + expected_kwargs=kwargs, + ) def test_object_create_no_container(self): - self.assertRaises(ValueError, self.proxy.upload_object) + self.assertRaises(TypeError, self.proxy.upload_object) def test_object_get(self): - self.verify_get(self.proxy.get_object, obj.Object, - value=["object"], container="container") - - -class Test_containers(TestObjectStoreProxy): - - def setUp(self): - super(Test_containers, self).setUp() - self.proxy = _proxy.Proxy(self.session) - - self.containers_body = [] - for i in range(3): - self.containers_body.append({six.text_type("name"): - six.text_type("container%d" % i)}) - -# @httpretty.activate -# def test_all_containers(self): -# self.stub_url(httpretty.GET, -# path=[container.Container.base_path], -# responses=[httpretty.Response( -# body=json.dumps(self.containers_body), -# status=200, content_type="application/json"), -# httpretty.Response(body=json.dumps([]), -# status=200, content_type="application/json")]) -# -# count = 0 -# for actual, expected in zip(self.proxy.containers(), -# self.containers_body): -# self.assertEqual(expected, actual) -# count += 1 -# self.assertEqual(len(self.containers_body), count) - -# @httpretty.activate -# def test_containers_limited(self): -# limit = len(self.containers_body) + 1 -# limit_param = "?limit=%d" % limit -# -# self.stub_url(httpretty.GET, -# path=[container.Container.base_path + limit_param], -# json=self.containers_body) -# -# count = 0 -# for actual, expected in zip(self.proxy.containers(limit=limit), -# self.containers_body): -# self.assertEqual(actual, expected) -# count += 1 -# -# self.assertEqual(len(self.containers_body), count) -# # Since we've chosen a limit larger than the body, only one request -# # should be made, so it should be the last one. -# self.assertIn(limit_param, httpretty.last_request().path) - -# @httpretty.activate -# def test_containers_with_marker(self): -# marker = six.text_type("container2") -# marker_param = "marker=%s" % marker -# -# self.stub_url(httpretty.GET, -# path=[container.Container.base_path + "?" + -# marker_param], -# json=self.containers_body) -# -# count = 0 -# for actual, expected in zip(self.proxy.containers(marker=marker), -# self.containers_body): -# # Make sure the marker made it into the actual request. -# self.assertIn(marker_param, httpretty.last_request().path) -# self.assertEqual(expected, actual) -# count += 1 -# -# self.assertEqual(len(self.containers_body), count) -# -# # Since we have to make one request beyond the end, because no -# # limit was provided, make sure the last container appears as -# # the marker in this last request. -# self.assertIn(self.containers_body[-1]["name"], -# httpretty.last_request().path) - - -class Test_objects(TestObjectStoreProxy): - - def setUp(self): - super(Test_objects, self).setUp() - self.proxy = _proxy.Proxy(self.session) - - self.container_name = six.text_type("my_container") - - self.objects_body = [] - for i in range(3): - self.objects_body.append({six.text_type("name"): - six.text_type("object%d" % i)}) - - # Returned object bodies have their container inserted. - self.returned_objects = [] - for ob in self.objects_body: - ob[six.text_type("container")] = self.container_name - self.returned_objects.append(ob) - self.assertEqual(len(self.objects_body), len(self.returned_objects)) - -# @httpretty.activate -# def test_all_objects(self): -# self.stub_url(httpretty.GET, -# path=[obj.Object.base_path % -# {"container": self.container_name}], -# responses=[httpretty.Response( -# body=json.dumps(self.objects_body), -# status=200, content_type="application/json"), -# httpretty.Response(body=json.dumps([]), -# status=200, content_type="application/json")]) -# -# count = 0 -# for actual, expected in zip(self.proxy.objects(self.container_name), -# self.returned_objects): -# self.assertEqual(expected, actual) -# count += 1 -# self.assertEqual(len(self.returned_objects), count) - -# @httpretty.activate -# def test_objects_limited(self): -# limit = len(self.objects_body) + 1 -# limit_param = "?limit=%d" % limit -# -# self.stub_url(httpretty.GET, -# path=[obj.Object.base_path % -# {"container": self.container_name} + limit_param], -# json=self.objects_body) -# -# count = 0 -# for actual, expected in zip(self.proxy.objects(self.container_name, -# limit=limit), -# self.returned_objects): -# self.assertEqual(expected, actual) -# count += 1 -# -# self.assertEqual(len(self.returned_objects), count) -# # Since we've chosen a limit larger than the body, only one request -# # should be made, so it should be the last one. -# self.assertIn(limit_param, httpretty.last_request().path) - -# @httpretty.activate -# def test_objects_with_marker(self): -# marker = six.text_type("object2") -# # marker_param = "marker=%s" % marker -# -# self.stub_url(httpretty.GET, -# path=[obj.Object.base_path % -# {"container": self.container_name} + "?" + -# marker_param], -# json=self.objects_body) -# -# count = 0 -# for actual, expected in zip(self.proxy.objects(self.container_name, -# marker=marker), -# self.returned_objects): -# # Make sure the marker made it into the actual request. -# self.assertIn(marker_param, httpretty.last_request().path) -# self.assertEqual(expected, actual) -# count += 1 -# -# self.assertEqual(len(self.returned_objects), count) -# -# # Since we have to make one request beyond the end, because no -# # limit was provided, make sure the last container appears as -# # the marker in this last request. -# self.assertIn(self.returned_objects[-1]["name"], -# httpretty.last_request().path) - - -class Test_download_object(TestObjectStoreProxy): - - @mock.patch("openstack.object_store.v1._proxy.Proxy.get_object") - def test_download(self, mock_get): - the_data = "here's some data" - mock_get.return_value = the_data - ob = mock.Mock() - - fake_open = mock.mock_open() - file_path = "blarga/somefile" - with mock.patch("openstack.object_store.v1._proxy.open", - fake_open, create=True): - self.proxy.download_object(ob, container="tainer", path=file_path) - - fake_open.assert_called_once_with(file_path, "w") - fake_handle = fake_open() - fake_handle.write.assert_called_once_with(the_data) - - -class Test_copy_object(TestObjectStoreProxy): + with requests_mock.Mocker() as m: + m.get(f"{self.endpoint}container/object", text="data") + res = self.proxy.get_object("object", container="container") + self.assertIsNone(res.data) + + def test_object_get_write_file(self): + with requests_mock.Mocker() as m: + m.get(f"{self.endpoint}container/object", text="data") + with tempfile.NamedTemporaryFile() as f: + self.proxy.get_object( + "object", container="container", outfile=f.name + ) + dt = open(f.name).read() + self.assertEqual(dt, "data") + + def test_object_get_remember_content(self): + with requests_mock.Mocker() as m: + m.get(f"{self.endpoint}container/object", text="data") + res = self.proxy.get_object( + "object", container="container", remember_content=True + ) + self.assertEqual(res.data, "data") + + def test_set_temp_url_key(self): + key = 'super-secure-key' + + self.register_uris( + [ + dict( + method='POST', + uri=self.endpoint, + status_code=204, + validate=dict( + headers={'x-account-meta-temp-url-key': key} + ), + ), + dict( + method='HEAD', + uri=self.endpoint, + headers={'x-account-meta-temp-url-key': key}, + ), + ] + ) + self.proxy.set_account_temp_url_key(key) + self.assert_calls() + + def test_set_account_temp_url_key_second(self): + key = 'super-secure-key' + + self.register_uris( + [ + dict( + method='POST', + uri=self.endpoint, + status_code=204, + validate=dict( + headers={'x-account-meta-temp-url-key-2': key} + ), + ), + dict( + method='HEAD', + uri=self.endpoint, + headers={'x-account-meta-temp-url-key-2': key}, + ), + ] + ) + self.proxy.set_account_temp_url_key(key, secondary=True) + self.assert_calls() + + def test_set_container_temp_url_key(self): + key = 'super-secure-key' + + self.register_uris( + [ + dict( + method='POST', + uri=self.container_endpoint, + status_code=204, + validate=dict( + headers={'x-container-meta-temp-url-key': key} + ), + ), + dict( + method='HEAD', + uri=self.container_endpoint, + headers={'x-container-meta-temp-url-key': key}, + ), + ] + ) + self.proxy.set_container_temp_url_key(self.container, key) + self.assert_calls() + + def test_set_container_temp_url_key_second(self): + key = 'super-secure-key' + + self.register_uris( + [ + dict( + method='POST', + uri=self.container_endpoint, + status_code=204, + validate=dict( + headers={'x-container-meta-temp-url-key-2': key} + ), + ), + dict( + method='HEAD', + uri=self.container_endpoint, + headers={'x-container-meta-temp-url-key-2': key}, + ), + ] + ) + self.proxy.set_container_temp_url_key( + self.container, key, secondary=True + ) + self.assert_calls() def test_copy_object(self): self.assertRaises(NotImplementedError, self.proxy.copy_object) + + def test_file_segment(self): + file_size = 4200 + content = ''.join( + random.choice(string.ascii_uppercase + string.digits) + for _ in range(file_size) + ).encode('latin-1') + self.imagefile = tempfile.NamedTemporaryFile(delete=False) + self.imagefile.write(content) + self.imagefile.close() + + segments = self.proxy._get_file_segments( + endpoint='test_container/test_image', + filename=self.imagefile.name, + file_size=file_size, + segment_size=1000, + ) + self.assertEqual(len(segments), 5) + segment_content = b'' + for index, (name, segment) in enumerate(segments.items()): + self.assertEqual( + f'test_container/test_image/{index:0>6}', + name, + ) + segment_content += segment.read() + self.assertEqual(content, segment_content) + + +class TestDownloadObject(base_test_object.BaseTestObject): + def setUp(self): + super().setUp() + self.the_data = b'test body' + self.register_uris( + [ + dict( + method='GET', + uri=self.object_endpoint, + headers={ + 'Content-Length': str(len(self.the_data)), + 'Content-Type': 'application/octet-stream', + 'Accept-Ranges': 'bytes', + 'Last-Modified': 'Thu, 15 Dec 2016 13:34:14 GMT', + 'Etag': '"b5c454b44fbd5344793e3fb7e3850768"', + 'X-Timestamp': '1481808853.65009', + 'X-Trans-Id': 'tx68c2a2278f0c469bb6de1-005857ed80dfw1', + 'Date': 'Mon, 19 Dec 2016 14:24:00 GMT', + 'X-Static-Large-Object': 'True', + 'X-Object-Meta-Mtime': '1481513709.168512', + }, + content=self.the_data, + ) + ] + ) + + def test_download(self): + data = self.cloud.object_store.download_object( + self.object, container=self.container + ) + + self.assertEqual(data, self.the_data) + self.assert_calls() + + def test_stream(self): + chunk_size = 2 + for index, chunk in enumerate( + self.cloud.object_store.stream_object( + self.object, container=self.container, chunk_size=chunk_size + ) + ): + chunk_len = len(chunk) + start = index * chunk_size + end = start + chunk_len + self.assertLessEqual(chunk_len, chunk_size) + self.assertEqual(chunk, self.the_data[start:end]) + self.assert_calls() + + +class TestExtractName(TestObjectStoreProxy): + scenarios = [ + ('discovery', dict(url='/', parts=['account'])), + ('endpoints', dict(url='/endpoints', parts=['endpoints'])), + ( + 'container', + dict(url='/AUTH_123/container_name', parts=['container']), + ), + ('object', dict(url='/container_name/object_name', parts=['object'])), + ( + 'object_long', + dict( + url='/v1/AUTH_123/cnt/path/deep/object_name', parts=['object'] + ), + ), + ] + + def test_extract_name(self): + results = self.proxy._extract_name(self.url, project_id='123') + self.assertEqual(self.parts, results) + + +class TestTempURL(TestObjectStoreProxy): + expires_iso8601_format = '%Y-%m-%dT%H:%M:%SZ' + short_expires_iso8601_format = '%Y-%m-%d' + time_errmsg = ( + 'time must either be a whole number or in specific ISO 8601 format.' + ) + path_errmsg = 'path must be full path to an object e.g. /v1/a/c/o' + url = '/v1/AUTH_account/c/o' + seconds = 3600 + key = 'correcthorsebatterystaple' + method = 'GET' + expected_url = url + ( + '?temp_url_sig=temp_url_signature&temp_url_expires=1400003600' + ) + expected_body = '\n'.join( + [ + method, + '1400003600', + url, + ] + ).encode('utf-8') + + @mock.patch('hmac.HMAC') + @mock.patch('time.time', return_value=1400000000) + def test_generate_temp_url(self, time_mock, hmac_mock): + hmac_mock().hexdigest.return_value = 'temp_url_signature' + url = self.proxy.generate_temp_url( + self.url, self.seconds, self.method, temp_url_key=self.key + ) + key = self.key + if not isinstance(key, bytes): + key = key.encode('utf-8') + self.assertEqual(url, self.expected_url) + self.assertEqual( + hmac_mock.mock_calls, + [ + mock.call(), + mock.call(key, self.expected_body, sha1), + mock.call().hexdigest(), + ], + ) + self.assertIsInstance(url, type(self.url)) + + @mock.patch('hmac.HMAC') + @mock.patch('time.time', return_value=1400000000) + def test_generate_temp_url_ip_range(self, time_mock, hmac_mock): + hmac_mock().hexdigest.return_value = 'temp_url_signature' + ip_ranges = [ + '1.2.3.4', + '1.2.3.4/24', + '2001:db8::', + b'1.2.3.4', + b'1.2.3.4/24', + b'2001:db8::', + ] + path = '/v1/AUTH_account/c/o/' + expected_url = path + ( + '?temp_url_sig=temp_url_signature' + '&temp_url_expires=1400003600' + '&temp_url_ip_range=' + ) + for ip_range in ip_ranges: + hmac_mock.reset_mock() + url = self.proxy.generate_temp_url( + path, + self.seconds, + self.method, + temp_url_key=self.key, + ip_range=ip_range, + ) + key = self.key + if not isinstance(key, bytes): + key = key.encode('utf-8') + + if isinstance(ip_range, bytes): + ip_range_expected_url = expected_url + ip_range.decode('utf-8') + expected_body = '\n'.join( + [ + 'ip=' + ip_range.decode('utf-8'), + self.method, + '1400003600', + path, + ] + ).encode('utf-8') + else: + ip_range_expected_url = expected_url + ip_range + expected_body = '\n'.join( + [ + 'ip=' + ip_range, + self.method, + '1400003600', + path, + ] + ).encode('utf-8') + + self.assertEqual(url, ip_range_expected_url) + + self.assertEqual( + hmac_mock.mock_calls, + [ + mock.call(key, expected_body, sha1), + mock.call().hexdigest(), + ], + ) + self.assertIsInstance(url, type(path)) + + @mock.patch('hmac.HMAC') + def test_generate_temp_url_iso8601_argument(self, hmac_mock): + hmac_mock().hexdigest.return_value = 'temp_url_signature' + url = self.proxy.generate_temp_url( + self.url, + '2014-05-13T17:53:20Z', + self.method, + temp_url_key=self.key, + ) + self.assertEqual(url, self.expected_url) + + # Don't care about absolute arg. + url = self.proxy.generate_temp_url( + self.url, + '2014-05-13T17:53:20Z', + self.method, + temp_url_key=self.key, + absolute=True, + ) + self.assertEqual(url, self.expected_url) + + lt = time.localtime() + expires = time.strftime(self.expires_iso8601_format[:-1], lt) + + if not isinstance(self.expected_url, str): + expected_url = self.expected_url.replace( + b'1400003600', + bytes(str(int(time.mktime(lt))), encoding='ascii'), + ) + else: + expected_url = self.expected_url.replace( + '1400003600', str(int(time.mktime(lt))) + ) + url = self.proxy.generate_temp_url( + self.url, expires, self.method, temp_url_key=self.key + ) + self.assertEqual(url, expected_url) + + expires = time.strftime(self.short_expires_iso8601_format, lt) + lt = time.strptime(expires, self.short_expires_iso8601_format) + + if not isinstance(self.expected_url, str): + expected_url = self.expected_url.replace( + b'1400003600', + bytes(str(int(time.mktime(lt))), encoding='ascii'), + ) + else: + expected_url = self.expected_url.replace( + '1400003600', str(int(time.mktime(lt))) + ) + url = self.proxy.generate_temp_url( + self.url, expires, self.method, temp_url_key=self.key + ) + self.assertEqual(url, expected_url) + + @mock.patch('hmac.HMAC') + @mock.patch('time.time', return_value=1400000000) + def test_generate_temp_url_iso8601_output(self, time_mock, hmac_mock): + hmac_mock().hexdigest.return_value = 'temp_url_signature' + url = self.proxy.generate_temp_url( + self.url, + self.seconds, + self.method, + temp_url_key=self.key, + iso8601=True, + ) + key = self.key + if not isinstance(key, bytes): + key = key.encode('utf-8') + + expires = time.strftime( + self.expires_iso8601_format, time.gmtime(1400003600) + ) + if not isinstance(self.url, str): + self.assertTrue(url.endswith(bytes(expires, 'utf-8'))) + else: + self.assertTrue(url.endswith(expires)) + self.assertEqual( + hmac_mock.mock_calls, + [ + mock.call(), + mock.call(key, self.expected_body, sha1), + mock.call().hexdigest(), + ], + ) + self.assertIsInstance(url, type(self.url)) + + @mock.patch('hmac.HMAC') + @mock.patch('time.time', return_value=1400000000) + def test_generate_temp_url_prefix(self, time_mock, hmac_mock): + hmac_mock().hexdigest.return_value = 'temp_url_signature' + prefixes = ['', 'o', 'p0/p1/'] + for p in prefixes: + hmac_mock.reset_mock() + path = '/v1/AUTH_account/c/' + p + expected_url = path + ( + '?temp_url_sig=temp_url_signature' + '&temp_url_expires=1400003600' + '&temp_url_prefix=' + p + ) + expected_body = '\n'.join( + [ + self.method, + '1400003600', + 'prefix:' + path, + ] + ).encode('utf-8') + url = self.proxy.generate_temp_url( + path, + self.seconds, + self.method, + prefix=True, + temp_url_key=self.key, + ) + key = self.key + if not isinstance(key, bytes): + key = key.encode('utf-8') + self.assertEqual(url, expected_url) + self.assertEqual( + hmac_mock.mock_calls, + [ + mock.call(key, expected_body, sha1), + mock.call().hexdigest(), + ], + ) + + self.assertIsInstance(url, type(path)) + + def test_generate_temp_url_invalid_path(self): + self.assertRaisesRegex( + ValueError, + 'path must be representable as UTF-8', + self.proxy.generate_temp_url, + b'/v1/a/c/\xff', + self.seconds, + self.method, + temp_url_key=self.key, + ) + + @mock.patch('hmac.HMAC.hexdigest', return_value="temp_url_signature") + def test_generate_absolute_expiry_temp_url(self, hmac_mock): + if isinstance(self.expected_url, bytes): + expected_url = self.expected_url.replace( + b'1400003600', b'2146636800' + ) + else: + expected_url = self.expected_url.replace( + '1400003600', '2146636800' + ) + url = self.proxy.generate_temp_url( + self.url, + 2146636800, + self.method, + absolute=True, + temp_url_key=self.key, + ) + self.assertEqual(url, expected_url) + + def test_generate_temp_url_bad_time(self): + for bad_time in [ + 'not_an_int', + -1, + 1.1, + '-1', + '1.1', + '2015-05', + '2015-05-01T01:00', + ]: + self.assertRaisesRegex( + ValueError, + self.time_errmsg, + self.proxy.generate_temp_url, + self.url, + bad_time, + self.method, + temp_url_key=self.key, + ) + + def test_generate_temp_url_bad_path(self): + for bad_path in [ + '/v1/a/c', + 'v1/a/c/o', + 'blah/v1/a/c/o', + '/v1//c/o', + '/v1/a/c/', + '/v1/a/c', + ]: + self.assertRaisesRegex( + ValueError, + self.path_errmsg, + self.proxy.generate_temp_url, + bad_path, + 60, + self.method, + temp_url_key=self.key, + ) + + +class TestTempURLUnicodePathAndKey(TestTempURL): + url = '/v1/\u00e4/c/\u00f3' + key = 'k\u00e9y' + expected_url = ( + f'{url}?temp_url_sig=temp_url_signature&temp_url_expires=1400003600' + ) + expected_body = '\n'.join( + [ + 'GET', + '1400003600', + url, + ] + ).encode('utf-8') + + +class TestTempURLUnicodePathBytesKey(TestTempURL): + url = '/v1/\u00e4/c/\u00f3' + key = 'k\u00e9y'.encode() + expected_url = ( + f'{url}?temp_url_sig=temp_url_signature&temp_url_expires=1400003600' + ) + expected_body = '\n'.join( + [ + 'GET', + '1400003600', + url, + ] + ).encode('utf-8') + + +class TestTempURLBytesPathUnicodeKey(TestTempURL): + url = '/v1/\u00e4/c/\u00f3'.encode() + key = 'k\u00e9y' + expected_url = url + ( + b'?temp_url_sig=temp_url_signature&temp_url_expires=1400003600' + ) + expected_body = b'\n'.join( + [ + b'GET', + b'1400003600', + url, + ] + ) + + +class TestTempURLBytesPathAndKey(TestTempURL): + url = '/v1/\u00e4/c/\u00f3'.encode() + key = 'k\u00e9y'.encode() + expected_url = url + ( + b'?temp_url_sig=temp_url_signature&temp_url_expires=1400003600' + ) + expected_body = b'\n'.join( + [ + b'GET', + b'1400003600', + url, + ] + ) + + +class TestTempURLBytesPathAndNonUtf8Key(TestTempURL): + url = '/v1/\u00e4/c/\u00f3'.encode() + key = b'k\xffy' + expected_url = url + ( + b'?temp_url_sig=temp_url_signature&temp_url_expires=1400003600' + ) + expected_body = b'\n'.join( + [ + b'GET', + b'1400003600', + url, + ] + ) diff --git a/openstack/tests/unit/orchestration/test_orchestration_service.py b/openstack/tests/unit/orchestration/test_orchestration_service.py deleted file mode 100644 index 9d0840add6..0000000000 --- a/openstack/tests/unit/orchestration/test_orchestration_service.py +++ /dev/null @@ -1,29 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.orchestration import orchestration_service - - -class TestOrchestrationService(testtools.TestCase): - - def test_service(self): - sot = orchestration_service.OrchestrationService() - self.assertEqual('orchestration', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(1, len(sot.valid_versions)) - self.assertEqual('v1', sot.valid_versions[0].module) - self.assertEqual('v1', sot.valid_versions[0].path) - self.assertTrue(sot.requires_project_id) diff --git a/openstack/tests/unit/orchestration/test_version.py b/openstack/tests/unit/orchestration/test_version.py index 9943fece03..a8b91383ef 100644 --- a/openstack/tests/unit/orchestration/test_version.py +++ b/openstack/tests/unit/orchestration/test_version.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.orchestration import version +from openstack.tests.unit import base + IDENTIFIER = 'IDENTIFIER' EXAMPLE = { @@ -22,22 +22,20 @@ } -class TestVersion(testtools.TestCase): - +class TestVersion(base.TestCase): def test_basic(self): sot = version.Version() self.assertEqual('version', sot.resource_key) self.assertEqual('versions', sot.resources_key) self.assertEqual('/', sot.base_path) - self.assertEqual('orchestration', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) def test_make_it(self): - sot = version.Version(EXAMPLE) + sot = version.Version(**EXAMPLE) self.assertEqual(EXAMPLE['id'], sot.id) self.assertEqual(EXAMPLE['links'], sot.links) self.assertEqual(EXAMPLE['status'], sot.status) diff --git a/openstack/tests/unit/orchestration/v1/hello_world.yaml b/openstack/tests/unit/orchestration/v1/hello_world.yaml new file mode 100644 index 0000000000..77651d203c --- /dev/null +++ b/openstack/tests/unit/orchestration/v1/hello_world.yaml @@ -0,0 +1,44 @@ +# +# Minimal HOT template defining a single compute server. +# +heat_template_version: 2013-05-23 + +description: > + Minimal HOT template for stack + +parameters: + key_name: + type: string + description: Name of an existing key pair to use for the server + constraints: + - custom_constraint: nova.keypair + flavor: + type: string + description: Flavor for the server to be created + default: m1.small + constraints: + - custom_constraint: nova.flavor + image: + type: string + description: Image ID or image name to use for the server + constraints: + - custom_constraint: glance.image + network: + type: string + description: Network used by the server + +resources: + server: + type: OS::Nova::Server + properties: + key_name: { get_param: key_name } + image: { get_param: image } + flavor: { get_param: flavor } + networks: [{network: {get_param: network} }] + metadata: + message: {get_file: helloworld.txt} + +outputs: + server_networks: + description: The networks of the deployed server + value: { get_attr: [server, networks] } diff --git a/openstack/tests/unit/orchestration/v1/helloworld.txt b/openstack/tests/unit/orchestration/v1/helloworld.txt new file mode 100644 index 0000000000..e965047ad7 --- /dev/null +++ b/openstack/tests/unit/orchestration/v1/helloworld.txt @@ -0,0 +1 @@ +Hello diff --git a/openstack/tests/unit/orchestration/v1/test_proxy.py b/openstack/tests/unit/orchestration/v1/test_proxy.py index 0da8050de2..c9321d3097 100644 --- a/openstack/tests/unit/orchestration/v1/test_proxy.py +++ b/openstack/tests/unit/orchestration/v1/test_proxy.py @@ -10,8 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import six +from unittest import mock + +from testscenarios import load_tests_apply_scenarios as load_tests # noqa from openstack import exceptions from openstack.orchestration.v1 import _proxy @@ -19,34 +20,153 @@ from openstack.orchestration.v1 import software_config as sc from openstack.orchestration.v1 import software_deployment as sd from openstack.orchestration.v1 import stack +from openstack.orchestration.v1 import stack_environment +from openstack.orchestration.v1 import stack_event +from openstack.orchestration.v1 import stack_files +from openstack.orchestration.v1 import stack_template from openstack.orchestration.v1 import template -from openstack.tests.unit import test_proxy_base2 +from openstack import proxy +from openstack.tests.unit import test_proxy_base -class TestOrchestrationProxy(test_proxy_base2.TestProxyBase): +class TestOrchestrationProxy(test_proxy_base.TestProxyBase): def setUp(self): - super(TestOrchestrationProxy, self).setUp() + super().setUp() self.proxy = _proxy.Proxy(self.session) + +class TestOrchestrationStack(TestOrchestrationProxy): def test_create_stack(self): self.verify_create(self.proxy.create_stack, stack.Stack) def test_create_stack_preview(self): - method_kwargs = {"preview": True, "x": 1, "y": 2, "z": 3} - self.verify_create(self.proxy.create_stack, stack.StackPreview, - method_kwargs=method_kwargs) + self.verify_create( + self.proxy.create_stack, + stack.Stack, + method_kwargs={"preview": True, "x": 1, "y": 2, "z": 3}, + expected_kwargs={"x": 1, "y": 2, "z": 3}, + ) def test_find_stack(self): - self.verify_find(self.proxy.find_stack, stack.Stack) + self.verify_find( + self.proxy.find_stack, + stack.Stack, + expected_kwargs={'resolve_outputs': True}, + ) + # mock_method="openstack.proxy.Proxy._find" + # test_method=self.proxy.find_stack + # method_kwargs = { + # 'resolve_outputs': False, + # 'ignore_missing': False + # } + # method_args=["name_or_id"] + # self._verify( + # mock_method, test_method, + # method_args=method_args, + # method_kwargs=method_kwargs, + # expected_args=[stack.Stack, "name_or_id"], + # expected_kwargs=method_kwargs, + # expected_result="result") + # + # method_kwargs = { + # 'resolve_outputs': True, + # 'ignore_missing': True + # } + # self._verify( + # mock_method, test_method, + # method_args=method_args, + # method_kwargs=method_kwargs, + # expected_args=[stack.Stack, "name_or_id"], + # expected_kwargs=method_kwargs, + # expected_result="result") def test_stacks(self): - self.verify_list(self.proxy.stacks, stack.Stack, paginated=False) + self.verify_list(self.proxy.stacks, stack.Stack) def test_get_stack(self): - self.verify_get(self.proxy.get_stack, stack.Stack) + self.verify_get( + self.proxy.get_stack, + stack.Stack, + method_kwargs={'resolve_outputs': False}, + expected_kwargs={'resolve_outputs': False}, + ) + self.verify_get_overrided( + self.proxy, stack.Stack, 'openstack.orchestration.v1.stack.Stack' + ) def test_update_stack(self): - self.verify_update(self.proxy.update_stack, stack.Stack) + self._verify( + 'openstack.orchestration.v1.stack.Stack.commit', + self.proxy.update_stack, + expected_result='result', + method_args=['stack'], + method_kwargs={'preview': False}, + expected_args=[self.proxy, False], + ) + + def test_update_stack_preview(self): + self._verify( + 'openstack.orchestration.v1.stack.Stack.commit', + self.proxy.update_stack, + expected_result='result', + method_args=['stack'], + method_kwargs={'preview': True}, + expected_args=[self.proxy, True], + ) + + def test_abandon_stack(self): + self._verify( + 'openstack.orchestration.v1.stack.Stack.abandon', + self.proxy.abandon_stack, + expected_result='result', + method_args=['stack'], + expected_args=[self.proxy], + ) + + @mock.patch.object(stack.Stack, 'find') + def test_export_stack_with_identity(self, mock_find): + stack_id = '1234' + stack_name = 'test_stack' + stk = stack.Stack(id=stack_id, name=stack_name) + mock_find.return_value = stk + + self._verify( + 'openstack.orchestration.v1.stack.Stack.export', + self.proxy.export_stack, + method_args=['IDENTITY'], + expected_args=[self.proxy], + ) + mock_find.assert_called_once_with( + mock.ANY, 'IDENTITY', ignore_missing=False + ) + + def test_export_stack_with_object(self): + stack_id = '1234' + stack_name = 'test_stack' + stk = stack.Stack(id=stack_id, name=stack_name) + + self._verify( + 'openstack.orchestration.v1.stack.Stack.export', + self.proxy.export_stack, + method_args=[stk], + expected_args=[self.proxy], + ) + + def test_suspend_stack(self): + self._verify( + 'openstack.orchestration.v1.stack.Stack.suspend', + self.proxy.suspend_stack, + method_args=['stack'], + expected_args=[self.proxy], + ) + + def test_resume_stack(self): + self._verify( + 'openstack.orchestration.v1.stack.Stack.resume', + self.proxy.resume_stack, + method_args=['stack'], + expected_args=[self.proxy], + ) def test_delete_stack(self): self.verify_delete(self.proxy.delete_stack, stack.Stack, False) @@ -61,7 +181,7 @@ def test_check_stack_with_stack_object(self, mock_check): res = self.proxy.check_stack(stk) self.assertIsNone(res) - mock_check.assert_called_once_with(self.proxy.session) + mock_check.assert_called_once_with(self.proxy) @mock.patch.object(stack.Stack, 'existing') def test_check_stack_with_stack_ID(self, mock_stack): @@ -72,18 +192,136 @@ def test_check_stack_with_stack_ID(self, mock_stack): self.assertIsNone(res) mock_stack.assert_called_once_with(id='FAKE_ID') - stk.check.assert_called_once_with(self.proxy.session) + stk.check.assert_called_once_with(self.proxy) + + +class TestOrchestrationStackEnvironment(TestOrchestrationProxy): + @mock.patch.object(stack.Stack, 'find') + def test_get_stack_environment_with_stack_identity(self, mock_find): + stack_id = '1234' + stack_name = 'test_stack' + stk = stack.Stack(id=stack_id, name=stack_name) + mock_find.return_value = stk + + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_stack_environment, + method_args=['IDENTITY'], + expected_args=[stack_environment.StackEnvironment], + expected_kwargs={ + 'requires_id': False, + 'stack_name': stack_name, + 'stack_id': stack_id, + }, + ) + mock_find.assert_called_once_with( + mock.ANY, 'IDENTITY', ignore_missing=False + ) + + def test_get_stack_environment_with_stack_object(self): + stack_id = '1234' + stack_name = 'test_stack' + stk = stack.Stack(id=stack_id, name=stack_name) + + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_stack_environment, + method_args=[stk], + expected_args=[stack_environment.StackEnvironment], + expected_kwargs={ + 'requires_id': False, + 'stack_name': stack_name, + 'stack_id': stack_id, + }, + ) + + +class TestOrchestrationStackFiles(TestOrchestrationProxy): + @mock.patch.object(stack_files.StackFiles, 'fetch') + @mock.patch.object(stack.Stack, 'find') + def test_get_stack_files_with_stack_identity(self, mock_find, mock_fetch): + stack_id = '1234' + stack_name = 'test_stack' + stk = stack.Stack(id=stack_id, name=stack_name) + mock_find.return_value = stk + mock_fetch.return_value = {'file': 'content'} + + res = self.proxy.get_stack_files('IDENTITY') + + self.assertEqual({'file': 'content'}, res) + mock_find.assert_called_once_with( + mock.ANY, 'IDENTITY', ignore_missing=False + ) + mock_fetch.assert_called_once_with(self.proxy) + + @mock.patch.object(stack_files.StackFiles, 'fetch') + def test_get_stack_files_with_stack_object(self, mock_fetch): + stack_id = '1234' + stack_name = 'test_stack' + stk = stack.Stack(id=stack_id, name=stack_name) + mock_fetch.return_value = {'file': 'content'} + + res = self.proxy.get_stack_files(stk) + + self.assertEqual({'file': 'content'}, res) + mock_fetch.assert_called_once_with(self.proxy) + + +class TestOrchestrationStackTemplate(TestOrchestrationProxy): + @mock.patch.object(stack.Stack, 'find') + def test_get_stack_template_with_stack_identity(self, mock_find): + stack_id = '1234' + stack_name = 'test_stack' + stk = stack.Stack(id=stack_id, name=stack_name) + mock_find.return_value = stk + + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_stack_template, + method_args=['IDENTITY'], + expected_args=[stack_template.StackTemplate], + expected_kwargs={ + 'requires_id': False, + 'stack_name': stack_name, + 'stack_id': stack_id, + }, + ) + mock_find.assert_called_once_with( + mock.ANY, 'IDENTITY', ignore_missing=False + ) + + def test_get_stack_template_with_stack_object(self): + stack_id = '1234' + stack_name = 'test_stack' + stk = stack.Stack(id=stack_id, name=stack_name) + + self._verify( + 'openstack.proxy.Proxy._get', + self.proxy.get_stack_template, + method_args=[stk], + expected_args=[stack_template.StackTemplate], + expected_kwargs={ + 'requires_id': False, + 'stack_name': stack_name, + 'stack_id': stack_id, + }, + ) + +class TestOrchestrationResource(TestOrchestrationProxy): @mock.patch.object(stack.Stack, 'find') def test_resources_with_stack_object(self, mock_find): stack_id = '1234' stack_name = 'test_stack' stk = stack.Stack(id=stack_id, name=stack_name) - self.verify_list(self.proxy.resources, resource.Resource, - paginated=False, method_args=[stk], - expected_kwargs={'stack_name': stack_name, - 'stack_id': stack_id}) + self.verify_list( + self.proxy.resources, + resource.Resource, + method_args=[stk], + expected_args=[], + expected_kwargs={'stack_name': stack_name, 'stack_id': stack_id}, + ) self.assertEqual(0, mock_find.call_count) @@ -94,65 +332,84 @@ def test_resources_with_stack_name(self, mock_find): stk = stack.Stack(id=stack_id, name=stack_name) mock_find.return_value = stk - self.verify_list(self.proxy.resources, resource.Resource, - paginated=False, method_args=[stack_id], - expected_kwargs={'stack_name': stack_name, - 'stack_id': stack_id}) + self.verify_list( + self.proxy.resources, + resource.Resource, + method_args=[stack_id], + expected_args=[], + expected_kwargs={'stack_name': stack_name, 'stack_id': stack_id}, + ) - mock_find.assert_called_once_with(mock.ANY, stack_id, - ignore_missing=False) + mock_find.assert_called_once_with( + mock.ANY, stack_id, ignore_missing=False + ) @mock.patch.object(stack.Stack, 'find') @mock.patch.object(resource.Resource, 'list') def test_resources_stack_not_found(self, mock_list, mock_find): stack_name = 'test_stack' - mock_find.side_effect = exceptions.ResourceNotFound( - 'No stack found for test_stack') + mock_find.side_effect = exceptions.NotFoundException( + 'No stack found for test_stack' + ) - ex = self.assertRaises(exceptions.ResourceNotFound, - self.proxy.resources, stack_name) - self.assertEqual('ResourceNotFound: No stack found for test_stack', - six.text_type(ex)) + ex = self.assertRaises( + exceptions.NotFoundException, self.proxy.resources, stack_name + ) + self.assertEqual('No stack found for test_stack', str(ex)) + +class TestOrchestrationSoftwareConfig(TestOrchestrationProxy): def test_create_software_config(self): - self.verify_create(self.proxy.create_software_config, - sc.SoftwareConfig) + self.verify_create( + self.proxy.create_software_config, sc.SoftwareConfig + ) def test_software_configs(self): - self.verify_list(self.proxy.software_configs, sc.SoftwareConfig, - paginated=True) + self.verify_list(self.proxy.software_configs, sc.SoftwareConfig) def test_get_software_config(self): self.verify_get(self.proxy.get_software_config, sc.SoftwareConfig) def test_delete_software_config(self): - self.verify_delete(self.proxy.delete_software_config, - sc.SoftwareConfig, True) - self.verify_delete(self.proxy.delete_software_config, - sc.SoftwareConfig, False) + self.verify_delete( + self.proxy.delete_software_config, sc.SoftwareConfig, True + ) + self.verify_delete( + self.proxy.delete_software_config, sc.SoftwareConfig, False + ) + +class TestOrchestrationSoftwareDeployment(TestOrchestrationProxy): def test_create_software_deployment(self): - self.verify_create(self.proxy.create_software_deployment, - sd.SoftwareDeployment) + self.verify_create( + self.proxy.create_software_deployment, sd.SoftwareDeployment + ) def test_software_deployments(self): - self.verify_list(self.proxy.software_deployments, - sd.SoftwareDeployment, paginated=False) + self.verify_list( + self.proxy.software_deployments, sd.SoftwareDeployment + ) def test_get_software_deployment(self): - self.verify_get(self.proxy.get_software_deployment, - sd.SoftwareDeployment) + self.verify_get( + self.proxy.get_software_deployment, sd.SoftwareDeployment + ) def test_update_software_deployment(self): - self.verify_update(self.proxy.update_software_deployment, - sd.SoftwareDeployment) + self.verify_update( + self.proxy.update_software_deployment, sd.SoftwareDeployment + ) def test_delete_software_deployment(self): - self.verify_delete(self.proxy.delete_software_deployment, - sd.SoftwareDeployment, True) - self.verify_delete(self.proxy.delete_software_deployment, - sd.SoftwareDeployment, False) + self.verify_delete( + self.proxy.delete_software_deployment, sd.SoftwareDeployment, True + ) + self.verify_delete( + self.proxy.delete_software_deployment, sd.SoftwareDeployment, False + ) + +class TestOrchestrationTemplate(TestOrchestrationProxy): @mock.patch.object(template.Template, 'validate') def test_validate_template(self, mock_validate): tmpl = mock.Mock() @@ -163,13 +420,139 @@ def test_validate_template(self, mock_validate): res = self.proxy.validate_template(tmpl, env, tmpl_url, ignore_errors) mock_validate.assert_called_once_with( - self.proxy.session, tmpl, environment=env, template_url=tmpl_url, - ignore_errors=ignore_errors) + self.proxy, + tmpl, + environment=env, + template_url=tmpl_url, + ignore_errors=ignore_errors, + ) self.assertEqual(mock_validate.return_value, res) + def test_validate_template_no_env(self): + tmpl = "openstack/tests/unit/orchestration/v1/hello_world.yaml" + + res = self.proxy.read_env_and_templates(tmpl) + + self.assertIsInstance(res, dict) + self.assertIsInstance(res["files"], dict) + def test_validate_template_invalid_request(self): - err = self.assertRaises(exceptions.InvalidRequest, - self.proxy.validate_template, - None, template_url=None) - self.assertEqual("'template_url' must be specified when template is " - "None", six.text_type(err)) + err = self.assertRaises( + exceptions.InvalidRequest, + self.proxy.validate_template, + None, + template_url=None, + ) + self.assertEqual( + "'template_url' must be specified when template is None", + str(err), + ) + + +class TestExtractName(TestOrchestrationProxy): + scenarios = [ + ('stacks', dict(url='/stacks', parts=['stacks'])), + ('name_id', dict(url='/stacks/name/id', parts=['stack'])), + ('identity', dict(url='/stacks/id', parts=['stack'])), + ( + 'preview', + dict(url='/stacks/name/preview', parts=['stack', 'preview']), + ), + ( + 'stack_act', + dict(url='/stacks/name/id/preview', parts=['stack', 'preview']), + ), + ( + 'stack_subres', + dict( + url='/stacks/name/id/resources', parts=['stack', 'resources'] + ), + ), + ( + 'stack_subres_id', + dict( + url='/stacks/name/id/resources/id', parts=['stack', 'resource'] + ), + ), + ( + 'stack_subres_id_act', + dict( + url='/stacks/name/id/resources/id/action', + parts=['stack', 'resource', 'action'], + ), + ), + ( + 'event', + dict( + url='/stacks/ignore/ignore/resources/ignore/events/id', + parts=['stack', 'resource', 'event'], + ), + ), + ( + 'sd_metadata', + dict( + url='/software_deployments/metadata/ignore', + parts=['software_deployment', 'metadata'], + ), + ), + ] + + def test_extract_name(self): + results = self.proxy._extract_name(self.url) + self.assertEqual(self.parts, results) + + +class TestOrchestrationStackEvents(TestOrchestrationProxy): + def test_stack_events_with_stack_object(self): + stack_id = '1234' + stack_name = 'test_stack' + stk = stack.Stack(id=stack_id, name=stack_name) + + self._verify( + 'openstack.proxy.Proxy._list', + self.proxy.stack_events, + method_args=[stk], + expected_args=[stack_event.StackEvent], + expected_kwargs={ + 'stack_name': stack_name, + 'stack_id': stack_id, + }, + ) + + @mock.patch.object(proxy.Proxy, '_get') + def test_stack_events_with_stack_id(self, mock_get): + stack_id = '1234' + stack_name = 'test_stack' + stk = stack.Stack(id=stack_id, name=stack_name) + mock_get.return_value = stk + + self._verify( + 'openstack.proxy.Proxy._list', + self.proxy.stack_events, + method_args=[stk], + expected_args=[stack_event.StackEvent], + expected_kwargs={ + 'stack_name': stack_name, + 'stack_id': stack_id, + }, + ) + + def test_stack_events_with_resource_name(self): + stack_id = '1234' + stack_name = 'test_stack' + resource_name = 'id' + base_path = '/stacks/%(stack_name)s/%(stack_id)s/resources/%(resource_name)s/events' # noqa: E501 + stk = stack.Stack(id=stack_id, name=stack_name) + + self._verify( + 'openstack.proxy.Proxy._list', + self.proxy.stack_events, + method_args=[stk, resource_name], + expected_args=[stack_event.StackEvent], + expected_kwargs={ + 'stack_name': stack_name, + 'stack_id': stack_id, + 'resource_name': resource_name, + 'base_path': base_path, + }, + ) diff --git a/openstack/tests/unit/orchestration/v1/test_resource.py b/openstack/tests/unit/orchestration/v1/test_resource.py index b4080ac3f7..964d8b9d83 100644 --- a/openstack/tests/unit/orchestration/v1/test_resource.py +++ b/openstack/tests/unit/orchestration/v1/test_resource.py @@ -10,21 +10,17 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.orchestration.v1 import resource +from openstack.tests.unit import base FAKE_ID = '32e39358-2422-4ad0-a1b5-dd60696bf564' FAKE_NAME = 'test_stack' FAKE = { - 'links': [{ - 'href': 'http://res_link', - 'rel': 'self' - }, { - 'href': 'http://stack_link', - 'rel': 'stack' - }], + 'links': [ + {'href': 'http://res_link', 'rel': 'self'}, + {'href': 'http://stack_link', 'rel': 'stack'}, + ], 'logical_resource_id': 'the_resource', 'name': 'the_resource', 'physical_resource_id': '9f38ab5a-37c8-4e40-9702-ce27fc5f6954', @@ -36,18 +32,17 @@ } -class TestResource(testtools.TestCase): - +class TestResource(base.TestCase): def test_basic(self): sot = resource.Resource() self.assertEqual('resource', sot.resource_key) self.assertEqual('resources', sot.resources_key) - self.assertEqual('/stacks/%(stack_name)s/%(stack_id)s/resources', - sot.base_path) - self.assertEqual('orchestration', sot.service.service_type) + self.assertEqual( + '/stacks/%(stack_name)s/%(stack_id)s/resources', sot.base_path + ) self.assertFalse(sot.allow_create) self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -56,8 +51,9 @@ def test_make_it(self): self.assertEqual(FAKE['links'], sot.links) self.assertEqual(FAKE['logical_resource_id'], sot.logical_resource_id) self.assertEqual(FAKE['name'], sot.name) - self.assertEqual(FAKE['physical_resource_id'], - sot.physical_resource_id) + self.assertEqual( + FAKE['physical_resource_id'], sot.physical_resource_id + ) self.assertEqual(FAKE['required_by'], sot.required_by) self.assertEqual(FAKE['resource_type'], sot.resource_type) self.assertEqual(FAKE['status'], sot.status) diff --git a/openstack/tests/unit/orchestration/v1/test_software_config.py b/openstack/tests/unit/orchestration/v1/test_software_config.py index aa752a7ba4..3b4aa3fd2d 100644 --- a/openstack/tests/unit/orchestration/v1/test_software_config.py +++ b/openstack/tests/unit/orchestration/v1/test_software_config.py @@ -10,9 +10,8 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.orchestration.v1 import software_config +from openstack.tests.unit import base FAKE_ID = 'ce8ae86c-9810-4cb1-8888-7fb53bc523bf' @@ -29,17 +28,15 @@ } -class TestSoftwareConfig(testtools.TestCase): - +class TestSoftwareConfig(base.TestCase): def test_basic(self): sot = software_config.SoftwareConfig() self.assertEqual('software_config', sot.resource_key) self.assertEqual('software_configs', sot.resources_key) self.assertEqual('/software_configs', sot.base_path) - self.assertEqual('orchestration', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) diff --git a/openstack/tests/unit/orchestration/v1/test_software_deployment.py b/openstack/tests/unit/orchestration/v1/test_software_deployment.py index cb03ac0fd9..8a67105e5a 100644 --- a/openstack/tests/unit/orchestration/v1/test_software_deployment.py +++ b/openstack/tests/unit/orchestration/v1/test_software_deployment.py @@ -10,9 +10,9 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack.orchestration.v1 import software_deployment +from openstack.tests.unit import base + FAKE = { 'id': 'ce8ae86c-9810-4cb1-8888-7fb53bc523bf', @@ -29,17 +29,15 @@ } -class TestSoftwareDeployment(testtools.TestCase): - +class TestSoftwareDeployment(base.TestCase): def test_basic(self): sot = software_deployment.SoftwareDeployment() self.assertEqual('software_deployment', sot.resource_key) self.assertEqual('software_deployments', sot.resources_key) self.assertEqual('/software_deployments', sot.base_path) - self.assertEqual('orchestration', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) @@ -50,8 +48,9 @@ def test_make_it(self): self.assertEqual(FAKE['config_id'], sot.config_id) self.assertEqual(FAKE['creation_time'], sot.created_at) self.assertEqual(FAKE['server_id'], sot.server_id) - self.assertEqual(FAKE['stack_user_project_id'], - sot.stack_user_project_id) + self.assertEqual( + FAKE['stack_user_project_id'], sot.stack_user_project_id + ) self.assertEqual(FAKE['input_values'], sot.input_values) self.assertEqual(FAKE['output_values'], sot.output_values) self.assertEqual(FAKE['status'], sot.status) diff --git a/openstack/tests/unit/orchestration/v1/test_stack.py b/openstack/tests/unit/orchestration/v1/test_stack.py index 13ed0d2928..5b9c72e900 100644 --- a/openstack/tests/unit/orchestration/v1/test_stack.py +++ b/openstack/tests/unit/orchestration/v1/test_stack.py @@ -10,32 +10,35 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import six -import testtools +from unittest import mock from openstack import exceptions from openstack.orchestration.v1 import stack -from openstack import resource2 as resource - +from openstack import resource +from openstack.tests.unit import base +from openstack.tests.unit import test_resource FAKE_ID = 'ce8ae86c-9810-4cb1-8888-7fb53bc523bf' FAKE_NAME = 'test_stack' FAKE = { 'capabilities': '1', 'creation_time': '2015-03-09T12:15:57.233772', + 'deletion_time': '2015-03-09T12:15:57.233772', 'description': '3', 'disable_rollback': True, + 'environment': {'var1': 'val1'}, + 'environment_files': [], + 'files': {'file1': 'content'}, + 'files_container': 'dummy_container', 'id': FAKE_ID, - 'links': [{ - 'href': 'stacks/%s/%s' % (FAKE_NAME, FAKE_ID), - 'rel': 'self'}], + 'links': [{'href': f'stacks/{FAKE_NAME}/{FAKE_ID}', 'rel': 'self'}], 'notification_topics': '7', 'outputs': '8', 'parameters': {'OS::stack_id': '9'}, 'name': FAKE_NAME, 'status': '11', 'status_reason': '12', + 'tags': ['FOO', 'bar:1'], 'template_description': '13', 'template_url': 'http://www.example.com/wordpress.yaml', 'timeout_mins': '14', @@ -44,44 +47,131 @@ FAKE_CREATE_RESPONSE = { 'stack': { 'id': FAKE_ID, - 'links': [{ - 'href': 'stacks/%s/%s' % (FAKE_NAME, FAKE_ID), - 'rel': 'self'}]} + 'links': [{'href': f'stacks/{FAKE_NAME}/{FAKE_ID}', 'rel': 'self'}], + } +} +FAKE_UPDATE_PREVIEW_RESPONSE = { + 'unchanged': [ + { + 'updated_time': 'datetime', + 'resource_name': '', + 'physical_resource_id': '{resource id or }', + 'resource_action': 'CREATE', + 'resource_status': 'COMPLETE', + 'resource_status_reason': '', + 'resource_type': 'restype', + 'stack_identity': '{stack_id}', + 'stack_name': '{stack_name}', + } + ], + 'updated': [ + { + 'updated_time': 'datetime', + 'resource_name': '', + 'physical_resource_id': '{resource id or }', + 'resource_action': 'CREATE', + 'resource_status': 'COMPLETE', + 'resource_status_reason': '', + 'resource_type': 'restype', + 'stack_identity': '{stack_id}', + 'stack_name': '{stack_name}', + } + ], + 'replaced': [ + { + 'updated_time': 'datetime', + 'resource_name': '', + 'physical_resource_id': '{resource id or }', + 'resource_action': 'CREATE', + 'resource_status': 'COMPLETE', + 'resource_status_reason': '', + 'resource_type': 'restype', + 'stack_identity': '{stack_id}', + 'stack_name': '{stack_name}', + } + ], + 'added': [ + { + 'updated_time': 'datetime', + 'resource_name': '', + 'physical_resource_id': '{resource id or }', + 'resource_action': 'CREATE', + 'resource_status': 'COMPLETE', + 'resource_status_reason': '', + 'resource_type': 'restype', + 'stack_identity': '{stack_id}', + 'stack_name': '{stack_name}', + } + ], + 'deleted': [ + { + 'updated_time': 'datetime', + 'resource_name': '', + 'physical_resource_id': '{resource id or }', + 'resource_action': 'CREATE', + 'resource_status': 'COMPLETE', + 'resource_status_reason': '', + 'resource_type': 'restype', + 'stack_identity': '{stack_id}', + 'stack_name': '{stack_name}', + } + ], } -class TestStack(testtools.TestCase): - +class TestStack(base.TestCase): def test_basic(self): sot = stack.Stack() self.assertEqual('stack', sot.resource_key) self.assertEqual('stacks', sot.resources_key) self.assertEqual('/stacks', sot.base_path) - self.assertEqual('orchestration', sot.service.service_type) self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertTrue(sot.allow_update) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) self.assertTrue(sot.allow_delete) self.assertTrue(sot.allow_list) + self.assertDictEqual( + { + 'action': 'action', + 'any_tags': 'tags-any', + 'limit': 'limit', + 'marker': 'marker', + 'name': 'name', + 'not_any_tags': 'not-tags-any', + 'not_tags': 'not-tags', + 'owner_id': 'owner_id', + 'project_id': 'tenant_id', + 'status': 'status', + 'tags': 'tags', + 'username': 'username', + }, + sot._query_mapping._mapping, + ) + def test_make_it(self): sot = stack.Stack(**FAKE) self.assertEqual(FAKE['capabilities'], sot.capabilities) self.assertEqual(FAKE['creation_time'], sot.created_at) + self.assertEqual(FAKE['deletion_time'], sot.deleted_at) self.assertEqual(FAKE['description'], sot.description) + self.assertEqual(FAKE['environment'], sot.environment) + self.assertEqual(FAKE['environment_files'], sot.environment_files) + self.assertEqual(FAKE['files'], sot.files) + self.assertEqual(FAKE['files_container'], sot.files_container) self.assertTrue(sot.is_rollback_disabled) self.assertEqual(FAKE['id'], sot.id) self.assertEqual(FAKE['links'], sot.links) - self.assertEqual(FAKE['notification_topics'], - sot.notification_topics) + self.assertEqual(FAKE['notification_topics'], sot.notification_topics) self.assertEqual(FAKE['outputs'], sot.outputs) self.assertEqual(FAKE['parameters'], sot.parameters) self.assertEqual(FAKE['name'], sot.name) self.assertEqual(FAKE['status'], sot.status) - self.assertEqual(FAKE['status_reason'], - sot.status_reason) - self.assertEqual(FAKE['template_description'], - sot.template_description) + self.assertEqual(FAKE['status_reason'], sot.status_reason) + self.assertEqual(FAKE['tags'], sot.tags) + self.assertEqual( + FAKE['template_description'], sot.template_description + ) self.assertEqual(FAKE['template_url'], sot.template_url) self.assertEqual(FAKE['timeout_mins'], sot.timeout_mins) self.assertEqual(FAKE['updated_time'], sot.updated_at) @@ -89,62 +179,184 @@ def test_make_it(self): @mock.patch.object(resource.Resource, 'create') def test_create(self, mock_create): sess = mock.Mock() - sot = stack.Stack(FAKE) + sot = stack.Stack() res = sot.create(sess) - mock_create.assert_called_once_with(sess, prepend_key=False) + mock_create.assert_called_once_with(sess, False) self.assertEqual(mock_create.return_value, res) - @mock.patch.object(resource.Resource, 'update') - def test_update(self, mock_update): - sess = mock.Mock() - sot = stack.Stack(FAKE) - - res = sot.update(sess) - - mock_update.assert_called_once_with(sess, prepend_key=False, - has_body=False) - self.assertEqual(mock_update.return_value, res) - def test_check(self): sess = mock.Mock() sot = stack.Stack(**FAKE) sot._action = mock.Mock() + sot._action.side_effect = [ + test_resource.FakeResponse(None, 200, None), + exceptions.BadRequestException(message='oops'), + exceptions.NotFoundException(message='oops'), + ] body = {'check': ''} sot.check(sess) - sot._action.assert_called_with(sess, body) - @mock.patch.object(resource.Resource, 'get') - def test_get(self, mock_get): + self.assertRaises(exceptions.BadRequestException, sot.check, sess) + self.assertRaises(exceptions.NotFoundException, sot.check, sess) + + def test_fetch(self): sess = mock.Mock() + sess.default_microversion = None sot = stack.Stack(**FAKE) - deleted_stack = mock.Mock(id=FAKE_ID, status='DELETE_COMPLETE') - normal_stack = mock.Mock(status='CREATE_COMPLETE') - mock_get.side_effect = [ - normal_stack, + + sess.get = mock.Mock() + sess.get.side_effect = [ + test_resource.FakeResponse( + {'stack': {'stack_status': 'CREATE_COMPLETE'}}, 200 + ), + test_resource.FakeResponse( + {'stack': {'stack_status': 'CREATE_COMPLETE'}}, 200 + ), exceptions.NotFoundException(message='oops'), - deleted_stack, + test_resource.FakeResponse( + {'stack': {'stack_status': 'DELETE_COMPLETE'}}, 200 + ), ] - self.assertEqual(normal_stack, sot.get(sess)) - ex = self.assertRaises(exceptions.NotFoundException, sot.get, sess) - self.assertEqual('NotFoundException: oops', six.text_type(ex)) - ex = self.assertRaises(exceptions.NotFoundException, sot.get, sess) - self.assertEqual('NotFoundException: No stack found for %s' % FAKE_ID, - six.text_type(ex)) + self.assertEqual(sot, sot.fetch(sess)) + sess.get.assert_called_with( + f'stacks/{sot.id}', + microversion=None, + skip_cache=False, + ) + sot.fetch(sess, resolve_outputs=False) + sess.get.assert_called_with( + f'stacks/{sot.id}?resolve_outputs=False', + microversion=None, + skip_cache=False, + ) + ex = self.assertRaises(exceptions.NotFoundException, sot.fetch, sess) + self.assertEqual('oops', str(ex)) + ex = self.assertRaises(exceptions.NotFoundException, sot.fetch, sess) + self.assertEqual(f'No stack found for {FAKE_ID}', str(ex)) + def test_abandon(self): + sess = mock.Mock() + sess.default_microversion = None -class TestStackPreview(testtools.TestCase): + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.headers = {} + mock_response.json.return_value = {} + sess.delete = mock.Mock(return_value=mock_response) + sot = stack.Stack(**FAKE) - def test_basic(self): - sot = stack.StackPreview() + sot.abandon(sess) - self.assertEqual('/stacks/preview', sot.base_path) - self.assertTrue(sot.allow_create) - self.assertFalse(sot.allow_list) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) + sess.delete.assert_called_with( + f'stacks/{FAKE_NAME}/{FAKE_ID}/abandon', + ) + + def test_export(self): + sess = mock.Mock() + sess.default_microversion = None + + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.headers = {} + mock_response.json.return_value = {} + sess.get = mock.Mock(return_value=mock_response) + sot = stack.Stack(**FAKE) + + sot.export(sess) + + sess.get.assert_called_with( + f'stacks/{FAKE_NAME}/{FAKE_ID}/export', + ) + + def test_commit(self): + sess = mock.Mock() + sess.default_microversion = None + + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.headers = {} + mock_response.json.return_value = {} + sess.put = mock.Mock(return_value=mock_response) + sot = stack.Stack(**FAKE) + body = sot._body.dirty.copy() + + sot.commit(sess) + + sess.put.assert_called_with( + f'/stacks/{FAKE_NAME}/{FAKE_ID}', + headers={}, + microversion=None, + json=body, + ) + + def test_commit_preview(self): + sess = mock.Mock() + sess.default_microversion = None + + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.headers = {} + mock_response.json.return_value = FAKE_UPDATE_PREVIEW_RESPONSE.copy() + sess.put = mock.Mock(return_value=mock_response) + sot = stack.Stack(**FAKE) + body = sot._body.dirty.copy() + + ret = sot.commit(sess, preview=True) + + sess.put.assert_called_with( + f'stacks/{FAKE_NAME}/{FAKE_ID}/preview', + headers={}, + microversion=None, + json=body, + ) + + self.assertEqual(FAKE_UPDATE_PREVIEW_RESPONSE['added'], ret.added) + self.assertEqual(FAKE_UPDATE_PREVIEW_RESPONSE['deleted'], ret.deleted) + self.assertEqual( + FAKE_UPDATE_PREVIEW_RESPONSE['replaced'], ret.replaced + ) + self.assertEqual( + FAKE_UPDATE_PREVIEW_RESPONSE['unchanged'], ret.unchanged + ) + self.assertEqual(FAKE_UPDATE_PREVIEW_RESPONSE['updated'], ret.updated) + + def test_suspend(self): + sess = mock.Mock() + + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.headers = {} + mock_response.json.return_value = {} + sess.post = mock.Mock(return_value=mock_response) + url = f"stacks/{FAKE_ID}/actions" + body = {"suspend": None} + sot = stack.Stack(**FAKE) + + res = sot.suspend(sess) + + self.assertIsNone(res) + sess.post.assert_called_with(url, json=body, microversion=None) + + def test_resume(self): + sess = mock.Mock() + + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.headers = {} + mock_response.json.return_value = {} + sess.post = mock.Mock(return_value=mock_response) + url = f"stacks/{FAKE_ID}/actions" + + body = {"resume": None} + + sot = stack.Stack(**FAKE) + + res = sot.resume(sess) + + self.assertIsNone(res) + sess.post.assert_called_with(url, json=body, microversion=None) diff --git a/openstack/tests/unit/orchestration/v1/test_stack_environment.py b/openstack/tests/unit/orchestration/v1/test_stack_environment.py new file mode 100644 index 0000000000..5c7291f9f3 --- /dev/null +++ b/openstack/tests/unit/orchestration/v1/test_stack_environment.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.orchestration.v1 import stack_environment as se +from openstack.tests.unit import base + + +FAKE = { + 'encrypted_param_names': ['n1', 'n2'], + 'event_sinks': {'s1': 'v1'}, + 'parameters': {'key_name': {'type': 'string'}}, + 'parameter_defaults': {'p1': 'def1'}, + 'resource_registry': {'resources': {'type1': 'type2'}}, +} + + +class TestStackTemplate(base.TestCase): + def test_basic(self): + sot = se.StackEnvironment() + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) + + def test_make_it(self): + sot = se.StackEnvironment(**FAKE) + self.assertEqual( + FAKE['encrypted_param_names'], sot.encrypted_param_names + ) + self.assertEqual(FAKE['event_sinks'], sot.event_sinks) + self.assertEqual(FAKE['parameters'], sot.parameters) + self.assertEqual(FAKE['parameter_defaults'], sot.parameter_defaults) + self.assertEqual(FAKE['resource_registry'], sot.resource_registry) diff --git a/openstack/tests/unit/orchestration/v1/test_stack_event.py b/openstack/tests/unit/orchestration/v1/test_stack_event.py new file mode 100644 index 0000000000..913792fee2 --- /dev/null +++ b/openstack/tests/unit/orchestration/v1/test_stack_event.py @@ -0,0 +1,53 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.orchestration.v1 import stack_event +from openstack.tests.unit import base + + +FAKE_ID = 'ce8ae86c-9810-4cb1-8888-7fb53bc523bf' +FAKE_NAME = 'test_stack' +FAKE = { + 'event_time': '2015-03-09T12:15:57.233772', + 'id': FAKE_ID, + 'links': [{'href': f'stacks/{FAKE_NAME}/{FAKE_ID}', 'rel': 'self'}], + 'logical_resource_id': 'my_test_group', + 'physical_resource_id': 'my_test_group', + 'resource_name': 'my_test_resource', + 'resource_status': 'CREATE_IN_PROGRESS', + 'resource_status_reason': 'state changed', +} + + +class TestStackEvent(base.TestCase): + def test_basic(self): + sot = stack_event.StackEvent() + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = stack_event.StackEvent(**FAKE) + self.assertEqual(FAKE['event_time'], sot.event_time) + self.assertEqual(FAKE['id'], sot.id) + self.assertEqual(FAKE['links'], sot.links) + self.assertEqual(FAKE['logical_resource_id'], sot.logical_resource_id) + self.assertEqual( + FAKE['physical_resource_id'], sot.physical_resource_id + ) + self.assertEqual(FAKE['resource_name'], sot.resource_name) + self.assertEqual(FAKE['resource_status'], sot.resource_status) + self.assertEqual( + FAKE['resource_status_reason'], sot.resource_status_reason + ) diff --git a/openstack/tests/unit/orchestration/v1/test_stack_files.py b/openstack/tests/unit/orchestration/v1/test_stack_files.py new file mode 100644 index 0000000000..6b4527f60d --- /dev/null +++ b/openstack/tests/unit/orchestration/v1/test_stack_files.py @@ -0,0 +1,56 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.orchestration.v1 import stack_files as sf +from openstack import resource +from openstack.tests.unit import base + +FAKE = {'stack_id': 'ID', 'stack_name': 'NAME'} + + +class TestStackFiles(base.TestCase): + def test_basic(self): + sot = sf.StackFiles() + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) + + def test_make_it(self): + sot = sf.StackFiles(**FAKE) + self.assertEqual(FAKE['stack_id'], sot.stack_id) + self.assertEqual(FAKE['stack_name'], sot.stack_name) + + @mock.patch.object(resource.Resource, '_prepare_request') + def test_get(self, mock_prepare_request): + resp = mock.Mock() + resp.json = mock.Mock(return_value={'file': 'file-content'}) + + sess = mock.Mock() + sess.get = mock.Mock(return_value=resp) + + sot = sf.StackFiles(**FAKE) + + req = mock.MagicMock() + req.url = '/stacks/{stack_name}/{stack_id}/files'.format( + stack_name=FAKE['stack_name'], + stack_id=FAKE['stack_id'], + ) + mock_prepare_request.return_value = req + + files = sot.fetch(sess) + + sess.get.assert_called_once_with(req.url) + self.assertEqual({'file': 'file-content'}, files) diff --git a/openstack/tests/unit/orchestration/v1/test_stack_template.py b/openstack/tests/unit/orchestration/v1/test_stack_template.py new file mode 100644 index 0000000000..1a9ab8e94e --- /dev/null +++ b/openstack/tests/unit/orchestration/v1/test_stack_template.py @@ -0,0 +1,75 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import copy + +from openstack.orchestration.v1 import stack_template +from openstack.tests.unit import base + + +FAKE = { + 'description': 'template description', + 'heat_template_version': '2014-10-16', + 'parameters': {'key_name': {'type': 'string'}}, + 'resources': {'resource1': {'type': 'ResourceType'}}, + 'conditions': {'cd1': True}, + 'outputs': {'key1': 'value1'}, +} + + +class TestStackTemplate(base.TestCase): + def test_basic(self): + sot = stack_template.StackTemplate() + self.assertFalse(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) + + def test_make_it(self): + sot = stack_template.StackTemplate(**FAKE) + self.assertEqual(FAKE['description'], sot.description) + self.assertEqual( + FAKE['heat_template_version'], sot.heat_template_version + ) + self.assertEqual(FAKE['outputs'], sot.outputs) + self.assertEqual(FAKE['parameters'], sot.parameters) + self.assertEqual(FAKE['resources'], sot.resources) + self.assertEqual(FAKE['conditions'], sot.conditions) + + def test_to_dict(self): + fake_sot = copy.deepcopy(FAKE) + fake_sot['parameter_groups'] = [ + { + "description": "server parameters", + "parameters": ["key_name", "image_id"], + "label": "server_parameters", + } + ] + fake_sot['location'] = None + fake_sot['id'] = None + fake_sot['name'] = None + + for temp_version in [ + '2016-10-14', + '2017-02-24', + '2017-02-24', + '2017-09-01', + '2018-03-02', + 'newton', + 'ocata', + 'pike', + 'queens', + ]: + fake_sot['heat_template_version'] = temp_version + sot = stack_template.StackTemplate(**fake_sot) + self.assertEqual(fake_sot, sot.to_dict()) diff --git a/openstack/tests/unit/orchestration/v1/test_template.py b/openstack/tests/unit/orchestration/v1/test_template.py index c92b0ae799..27bb183763 100644 --- a/openstack/tests/unit/orchestration/v1/test_template.py +++ b/openstack/tests/unit/orchestration/v1/test_template.py @@ -10,35 +10,25 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +from unittest import mock from openstack.orchestration.v1 import template -from openstack import resource2 as resource - +from openstack import resource +from openstack.tests.unit import base FAKE = { 'Description': 'Blah blah', - 'Parameters': { - 'key_name': { - 'type': 'string' - } - }, - 'ParameterGroups': [{ - 'label': 'Group 1', - 'parameters': ['key_name'] - }] + 'Parameters': {'key_name': {'type': 'string'}}, + 'ParameterGroups': [{'label': 'Group 1', 'parameters': ['key_name']}], } -class TestTemplate(testtools.TestCase): - +class TestTemplate(base.TestCase): def test_basic(self): sot = template.Template() - self.assertEqual('orchestration', sot.service.service_type) self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) self.assertFalse(sot.allow_delete) self.assertFalse(sot.allow_list) @@ -57,8 +47,7 @@ def test_validate(self, mock_translate): sot.validate(sess, tmpl) - sess.post.assert_called_once_with( - '/validate', endpoint_filter=sot.service, json=body) + sess.post.assert_called_once_with('/validate', json=body) mock_translate.assert_called_once_with(sess.post.return_value) @mock.patch.object(resource.Resource, '_translate_response') @@ -71,8 +60,7 @@ def test_validate_with_env(self, mock_translate): sot.validate(sess, tmpl, environment=env) - sess.post.assert_called_once_with( - '/validate', endpoint_filter=sot.service, json=body) + sess.post.assert_called_once_with('/validate', json=body) mock_translate.assert_called_once_with(sess.post.return_value) @mock.patch.object(resource.Resource, '_translate_response') @@ -84,8 +72,7 @@ def test_validate_with_template_url(self, mock_translate): sot.validate(sess, None, template_url=template_url) - sess.post.assert_called_once_with( - '/validate', endpoint_filter=sot.service, json=body) + sess.post.assert_called_once_with('/validate', json=body) mock_translate.assert_called_once_with(sess.post.return_value) @mock.patch.object(resource.Resource, '_translate_response') @@ -98,6 +85,6 @@ def test_validate_with_ignore_errors(self, mock_translate): sot.validate(sess, tmpl, ignore_errors='123,456') sess.post.assert_called_once_with( - '/validate?ignore_errors=123%2C456', - endpoint_filter=sot.service, json=body) + '/validate?ignore_errors=123%2C456', json=body + ) mock_translate.assert_called_once_with(sess.post.return_value) diff --git a/openstack/tests/unit/placement/__init__.py b/openstack/tests/unit/placement/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/placement/v1/__init__.py b/openstack/tests/unit/placement/v1/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/placement/v1/test_proxy.py b/openstack/tests/unit/placement/v1/test_proxy.py new file mode 100644 index 0000000000..5dcdec11d6 --- /dev/null +++ b/openstack/tests/unit/placement/v1/test_proxy.py @@ -0,0 +1,165 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.placement.v1 import _proxy +from openstack.placement.v1 import resource_class +from openstack.placement.v1 import resource_provider +from openstack.placement.v1 import resource_provider_inventory +from openstack.tests.unit import test_proxy_base as test_proxy_base + + +class TestPlacementProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + +class TestPlacementResourceClass(TestPlacementProxy): + def test_resource_class_create(self): + self.verify_create( + self.proxy.create_resource_class, + resource_class.ResourceClass, + ) + + def test_resource_class_delete(self): + self.verify_delete( + self.proxy.delete_resource_class, + resource_class.ResourceClass, + False, + ) + + def test_resource_class_update(self): + self.verify_update( + self.proxy.update_resource_class, + resource_class.ResourceClass, + False, + ) + + def test_resource_class_get(self): + self.verify_get( + self.proxy.get_resource_class, + resource_class.ResourceClass, + ) + + def test_resource_classes(self): + self.verify_list( + self.proxy.resource_classes, + resource_class.ResourceClass, + ) + + +class TestPlacementResourceProvider(TestPlacementProxy): + def test_resource_provider_create(self): + self.verify_create( + self.proxy.create_resource_provider, + resource_provider.ResourceProvider, + ) + + def test_resource_provider_delete(self): + self.verify_delete( + self.proxy.delete_resource_provider, + resource_provider.ResourceProvider, + False, + ) + + def test_resource_provider_update(self): + self.verify_update( + self.proxy.update_resource_provider, + resource_provider.ResourceProvider, + False, + ) + + def test_resource_provider_get(self): + self.verify_get( + self.proxy.get_resource_provider, + resource_provider.ResourceProvider, + ) + + def test_resource_providers(self): + self.verify_list( + self.proxy.resource_providers, + resource_provider.ResourceProvider, + ) + + def test_resource_provider_set_aggregates(self): + self._verify( + 'openstack.placement.v1.resource_provider.ResourceProvider.set_aggregates', + self.proxy.set_resource_provider_aggregates, + method_args=['value', 'a', 'b'], + expected_args=[self.proxy], + expected_kwargs={'aggregates': ('a', 'b')}, + ) + + def test_resource_provider_get_aggregates(self): + self._verify( + 'openstack.placement.v1.resource_provider.ResourceProvider.fetch_aggregates', + self.proxy.get_resource_provider_aggregates, + method_args=['value'], + expected_args=[self.proxy], + ) + + +class TestPlacementResourceProviderInventory(TestPlacementProxy): + def test_resource_provider_inventory_create(self): + self.verify_create( + self.proxy.create_resource_provider_inventory, + resource_provider_inventory.ResourceProviderInventory, + method_kwargs={ + 'resource_provider': 'test_id', + 'resource_class': 'CUSTOM_FOO', + 'total': 20, + }, + expected_kwargs={ + 'resource_provider_id': 'test_id', + 'resource_class': 'CUSTOM_FOO', + 'total': 20, + }, + ) + + def test_resource_provider_inventory_delete(self): + self.verify_delete( + self.proxy.delete_resource_provider_inventory, + resource_provider_inventory.ResourceProviderInventory, + ignore_missing=False, + method_kwargs={'resource_provider': 'test_id'}, + expected_kwargs={'resource_provider_id': 'test_id'}, + ) + + def test_resource_provider_inventory_update(self): + self.verify_update( + self.proxy.update_resource_provider_inventory, + resource_provider_inventory.ResourceProviderInventory, + method_kwargs={ + 'resource_provider': 'test_id', + 'resource_provider_generation': 1, + }, + expected_kwargs={ + 'resource_provider_id': 'test_id', + 'resource_provider_generation': 1, + }, + ) + + def test_resource_provider_inventory_get(self): + self.verify_get( + self.proxy.get_resource_provider_inventory, + resource_provider_inventory.ResourceProviderInventory, + method_kwargs={'resource_provider': 'test_id'}, + expected_kwargs={'resource_provider_id': 'test_id'}, + ) + + def test_resource_provider_inventories(self): + self.verify_list( + self.proxy.resource_provider_inventories, + resource_provider_inventory.ResourceProviderInventory, + method_kwargs={'resource_provider': 'test_id'}, + expected_kwargs={'resource_provider_id': 'test_id'}, + ) diff --git a/openstack/tests/unit/placement/v1/test_resource_class.py b/openstack/tests/unit/placement/v1/test_resource_class.py new file mode 100644 index 0000000000..09e1f0e17d --- /dev/null +++ b/openstack/tests/unit/placement/v1/test_resource_class.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.placement.v1 import resource_class as rc +from openstack.tests.unit import base + +FAKE = { + 'name': 'CUSTOM_FPGA', +} + + +class TestResourceClass(base.TestCase): + def test_basic(self): + sot = rc.ResourceClass() + self.assertEqual(None, sot.resource_key) + self.assertEqual('resource_classes', sot.resources_key) + self.assertEqual('/resource_classes', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_patch) + + self.assertDictEqual( + {'limit': 'limit', 'marker': 'marker'}, sot._query_mapping._mapping + ) + + def test_make_it(self): + sot = rc.ResourceClass(**FAKE) + self.assertEqual(FAKE['name'], sot.id) + self.assertEqual(FAKE['name'], sot.name) diff --git a/openstack/tests/unit/placement/v1/test_resource_provider.py b/openstack/tests/unit/placement/v1/test_resource_provider.py new file mode 100644 index 0000000000..a63bcb22ec --- /dev/null +++ b/openstack/tests/unit/placement/v1/test_resource_provider.py @@ -0,0 +1,57 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.placement.v1 import resource_provider as rp +from openstack.tests.unit import base + +FAKE = { + 'uuid': '751cd30a-df22-4ef8-b028-67c1c5aeddc3', + 'name': 'fake-name', + 'parent_provider_uuid': '9900cc2d-88e8-429d-927a-182adf1577b0', +} + + +class TestResourceProvider(base.TestCase): + def test_basic(self): + sot = rp.ResourceProvider() + self.assertEqual(None, sot.resource_key) + self.assertEqual('resource_providers', sot.resources_key) + self.assertEqual('/resource_providers', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_patch) + + self.assertDictEqual( + { + 'limit': 'limit', + 'marker': 'marker', + 'name': 'name', + 'member_of': 'member_of', + 'resources': 'resources', + 'in_tree': 'in_tree', + 'required': 'required', + 'id': 'uuid', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = rp.ResourceProvider(**FAKE) + self.assertEqual(FAKE['uuid'], sot.id) + self.assertEqual(FAKE['name'], sot.name) + self.assertEqual( + FAKE['parent_provider_uuid'], + sot.parent_provider_id, + ) diff --git a/openstack/tests/unit/placement/v1/test_resource_provider_inventory.py b/openstack/tests/unit/placement/v1/test_resource_provider_inventory.py new file mode 100644 index 0000000000..1194ca81a5 --- /dev/null +++ b/openstack/tests/unit/placement/v1/test_resource_provider_inventory.py @@ -0,0 +1,51 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.placement.v1 import resource_provider_inventory +from openstack.tests.unit import base + +FAKE = { + 'allocation_ratio': 1.0, + 'max_unit': 35, + 'min_unit': 1, + 'reserved': 0, + 'step_size': 1, + 'total': 35, +} + + +class TestResourceProviderInventory(base.TestCase): + def test_basic(self): + sot = resource_provider_inventory.ResourceProviderInventory() + self.assertIsNone(sot.resource_key) + self.assertIsNone(sot.resources_key) + self.assertEqual( + '/resource_providers/%(resource_provider_id)s/inventories', + sot.base_path, + ) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_patch) + + self.assertDictEqual({}, sot._query_mapping._mapping) + + def test_make_it(self): + sot = resource_provider_inventory.ResourceProviderInventory(**FAKE) + self.assertEqual(FAKE['allocation_ratio'], sot.allocation_ratio) + self.assertEqual(FAKE['max_unit'], sot.max_unit) + self.assertEqual(FAKE['min_unit'], sot.min_unit) + self.assertEqual(FAKE['reserved'], sot.reserved) + self.assertEqual(FAKE['step_size'], sot.step_size) + self.assertEqual(FAKE['total'], sot.total) diff --git a/openstack/tests/unit/placement/v1/test_trait.py b/openstack/tests/unit/placement/v1/test_trait.py new file mode 100644 index 0000000000..777a6d7da7 --- /dev/null +++ b/openstack/tests/unit/placement/v1/test_trait.py @@ -0,0 +1,42 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.placement.v1 import trait as _trait +from openstack.tests.unit import base + +FAKE = { + 'name': 'CUSTOM_FOO', +} + + +class TestResourceClass(base.TestCase): + def test_basic(self): + sot = _trait.Trait() + self.assertEqual(None, sot.resource_key) + self.assertEqual(None, sot.resources_key) + self.assertEqual('/traits', sot.base_path) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertTrue(sot.allow_delete) + self.assertTrue(sot.allow_list) + self.assertFalse(sot.allow_patch) + + self.assertDictEqual( + {'name': 'name', 'associated': 'associated'}, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = _trait.Trait(**FAKE) + self.assertEqual(FAKE['name'], sot.id) + self.assertEqual(FAKE['name'], sot.name) diff --git a/openstack/tests/unit/shared_file_system/__init__.py b/openstack/tests/unit/shared_file_system/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/shared_file_system/v2/__init__.py b/openstack/tests/unit/shared_file_system/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/shared_file_system/v2/test_availability_zone.py b/openstack/tests/unit/shared_file_system/v2/test_availability_zone.py new file mode 100644 index 0000000000..377a28121c --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_availability_zone.py @@ -0,0 +1,37 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import availability_zone as az +from openstack.tests.unit import base + +IDENTIFIER = '08a87d37-5ca2-4308-86c5-cba06d8d796c' +EXAMPLE = { + "id": IDENTIFIER, + "name": "nova", + "created_at": "2021-01-21T20:13:55.000000", + "updated_at": None, +} + + +class TestAvailabilityZone(base.TestCase): + def test_basic(self): + az_resource = az.AvailabilityZone() + self.assertEqual('availability_zones', az_resource.resources_key) + self.assertEqual('/availability-zones', az_resource.base_path) + self.assertTrue(az_resource.allow_list) + + def test_make_availability_zone(self): + az_resource = az.AvailabilityZone(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], az_resource.id) + self.assertEqual(EXAMPLE['name'], az_resource.name) + self.assertEqual(EXAMPLE['created_at'], az_resource.created_at) + self.assertEqual(EXAMPLE['updated_at'], az_resource.updated_at) diff --git a/openstack/tests/unit/shared_file_system/v2/test_limit.py b/openstack/tests/unit/shared_file_system/v2/test_limit.py new file mode 100644 index 0000000000..864e4a9148 --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_limit.py @@ -0,0 +1,89 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import limit +from openstack.tests.unit import base + +EXAMPLE = { + "totalShareNetworksUsed": 0, + "maxTotalShareGigabytes": 1000, + "maxTotalShareNetworks": 10, + "totalSharesUsed": 0, + "totalShareGigabytesUsed": 0, + "totalShareSnapshotsUsed": 0, + "maxTotalShares": 50, + "totalSnapshotGigabytesUsed": 0, + "maxTotalSnapshotGigabytes": 1000, + "maxTotalShareSnapshots": 50, + "maxTotalShareReplicas": 100, + "maxTotalReplicaGigabytes": 1000, + "totalShareReplicasUsed": 0, + "totalReplicaGigabytesUsed": 0, +} + + +class TestLimit(base.TestCase): + def test_basic(self): + limits = limit.Limit() + self.assertEqual('limits', limits.resources_key) + self.assertEqual('/limits', limits.base_path) + self.assertTrue(limits.allow_list) + self.assertFalse(limits.allow_fetch) + self.assertFalse(limits.allow_create) + self.assertFalse(limits.allow_commit) + self.assertFalse(limits.allow_delete) + self.assertFalse(limits.allow_head) + + def test_make_limits(self): + limits = limit.Limit(**EXAMPLE) + self.assertEqual( + EXAMPLE['totalShareNetworksUsed'], limits.totalShareNetworksUsed + ) + self.assertEqual( + EXAMPLE['maxTotalShareGigabytes'], limits.maxTotalShareGigabytes + ) + self.assertEqual( + EXAMPLE['maxTotalShareNetworks'], limits.maxTotalShareNetworks + ) + self.assertEqual(EXAMPLE['totalSharesUsed'], limits.totalSharesUsed) + self.assertEqual( + EXAMPLE['totalShareGigabytesUsed'], limits.totalShareGigabytesUsed + ) + self.assertEqual( + EXAMPLE['totalShareSnapshotsUsed'], limits.totalShareSnapshotsUsed + ) + self.assertEqual(EXAMPLE['maxTotalShares'], limits.maxTotalShares) + self.assertEqual( + EXAMPLE['totalSnapshotGigabytesUsed'], + limits.totalSnapshotGigabytesUsed, + ) + self.assertEqual( + EXAMPLE['maxTotalSnapshotGigabytes'], + limits.maxTotalSnapshotGigabytes, + ) + self.assertEqual( + EXAMPLE['maxTotalShareSnapshots'], limits.maxTotalShareSnapshots + ) + self.assertEqual( + EXAMPLE['maxTotalShareReplicas'], limits.maxTotalShareReplicas + ) + self.assertEqual( + EXAMPLE['maxTotalReplicaGigabytes'], + limits.maxTotalReplicaGigabytes, + ) + self.assertEqual( + EXAMPLE['totalShareReplicasUsed'], limits.totalShareReplicasUsed + ) + self.assertEqual( + EXAMPLE['totalReplicaGigabytesUsed'], + limits.totalReplicaGigabytesUsed, + ) diff --git a/openstack/tests/unit/shared_file_system/v2/test_proxy.py b/openstack/tests/unit/shared_file_system/v2/test_proxy.py new file mode 100644 index 0000000000..9643e42338 --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_proxy.py @@ -0,0 +1,595 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from openstack.shared_file_system.v2 import _proxy +from openstack.shared_file_system.v2 import limit +from openstack.shared_file_system.v2 import resource_locks +from openstack.shared_file_system.v2 import share +from openstack.shared_file_system.v2 import share_access_rule +from openstack.shared_file_system.v2 import share_group +from openstack.shared_file_system.v2 import share_group_snapshot +from openstack.shared_file_system.v2 import share_instance +from openstack.shared_file_system.v2 import share_network +from openstack.shared_file_system.v2 import share_network_subnet +from openstack.shared_file_system.v2 import share_snapshot +from openstack.shared_file_system.v2 import share_snapshot_instance +from openstack.shared_file_system.v2 import storage_pool +from openstack.shared_file_system.v2 import user_message +from openstack.tests.unit import test_proxy_base + + +class TestSharedFileSystemProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + +class TestSharedFileSystemShare(TestSharedFileSystemProxy): + def test_shares(self): + self.verify_list(self.proxy.shares, share.Share) + + def test_shares_detailed(self): + self.verify_list( + self.proxy.shares, + share.Share, + method_kwargs={"details": True, "query": 1}, + expected_kwargs={"query": 1}, + ) + + def test_shares_not_detailed(self): + self.verify_list( + self.proxy.shares, + share.Share, + method_kwargs={"details": False, "query": 1}, + expected_kwargs={"query": 1}, + ) + + def test_share_get(self): + self.verify_get(self.proxy.get_share, share.Share) + + def test_share_find(self): + self.verify_find(self.proxy.find_share, share.Share) + + def test_share_delete(self): + self.verify_delete(self.proxy.delete_share, share.Share, False) + + def test_share_delete_ignore(self): + self.verify_delete(self.proxy.delete_share, share.Share, True) + + def test_share_create(self): + self.verify_create(self.proxy.create_share, share.Share) + + def test_share_update(self): + self.verify_update(self.proxy.update_share, share.Share) + + def test_share_resize_extend(self): + mock_share = share.Share(size=10, id='fakeId') + self.proxy._get = mock.Mock(return_value=mock_share) + + self._verify( + "openstack.shared_file_system.v2.share." + "Share.extend_share", + self.proxy.resize_share, + method_args=['fakeId', 20], + expected_args=[self.proxy, 20, False], + ) + + def test_share_resize_shrink(self): + mock_share = share.Share(size=30, id='fakeId') + self.proxy._get = mock.Mock(return_value=mock_share) + + self._verify( + "openstack.shared_file_system.v2.share." + "Share.shrink_share", + self.proxy.resize_share, + method_args=['fakeId', 20], + expected_args=[self.proxy, 20], + ) + + def test_share_instances(self): + self.verify_list( + self.proxy.share_instances, share_instance.ShareInstance + ) + + def test_share_instance_get(self): + self.verify_get( + self.proxy.get_share_instance, share_instance.ShareInstance + ) + + def test_share_instance_reset(self): + self._verify( + "openstack.shared_file_system.v2.share_instance." + + "ShareInstance.reset_status", + self.proxy.reset_share_instance_status, + method_args=['id', 'available'], + expected_args=[self.proxy, 'available'], + ) + + def test_share_instance_delete(self): + self._verify( + "openstack.shared_file_system.v2.share_instance." + + "ShareInstance.force_delete", + self.proxy.delete_share_instance, + method_args=['id'], + expected_args=[self.proxy], + ) + + @mock.patch("openstack.resource.wait_for_status") + def test_wait_for(self, mock_wait): + mock_resource = mock.Mock() + mock_wait.return_value = mock_resource + + self.proxy.wait_for_status(mock_resource, 'ACTIVE') + + mock_wait.assert_called_once_with( + self.proxy, mock_resource, 'ACTIVE', None, 2, None, 'status', None + ) + + +class TestSharedFileSystemStoragePool(TestSharedFileSystemProxy): + def test_storage_pools(self): + self.verify_list(self.proxy.storage_pools, storage_pool.StoragePool) + + def test_storage_pool_detailed(self): + self.verify_list( + self.proxy.storage_pools, + storage_pool.StoragePool, + method_kwargs={"details": True, "backend": "alpha"}, + expected_kwargs={"backend": "alpha"}, + ) + + def test_storage_pool_not_detailed(self): + self.verify_list( + self.proxy.storage_pools, + storage_pool.StoragePool, + method_kwargs={"details": False, "backend": "alpha"}, + expected_kwargs={"backend": "alpha"}, + ) + + +class TestSharedFileSystemShareMetadata(TestSharedFileSystemProxy): + def test_get_share_metadata(self): + self._verify( + "openstack.shared_file_system.v2.share.Share.fetch_metadata", + self.proxy.get_share_metadata, + method_args=["share_id"], + expected_args=[self.proxy], + expected_result=share.Share( + id="share_id", metadata={"key": "value"} + ), + ) + + def test_get_share_metadata_item(self): + self._verify( + "openstack.shared_file_system.v2.share.Share.get_metadata_item", + self.proxy.get_share_metadata_item, + method_args=["share_id", "key"], + expected_args=[self.proxy, "key"], + expected_result=share.Share( + id="share_id", metadata={"key": "value"} + ), + ) + + def test_create_share_metadata(self): + metadata = {"foo": "bar", "newFoo": "newBar"} + self._verify( + "openstack.shared_file_system.v2.share.Share.set_metadata", + self.proxy.create_share_metadata, + method_args=["share_id"], + method_kwargs=metadata, + expected_args=[self.proxy], + expected_kwargs={"metadata": metadata}, + expected_result=share.Share(id="share_id", metadata=metadata), + ) + + def test_update_share_metadata(self): + metadata = {"foo": "bar", "newFoo": "newBar"} + replace = True + self._verify( + "openstack.shared_file_system.v2.share.Share.set_metadata", + self.proxy.update_share_metadata, + method_args=["share_id", metadata, replace], + expected_args=[self.proxy], + expected_kwargs={"metadata": metadata, "replace": replace}, + expected_result=share.Share(id="share_id", metadata=metadata), + ) + + def test_delete_share_metadata(self): + self._verify( + "openstack.shared_file_system.v2.share.Share.delete_metadata_item", + self.proxy.delete_share_metadata, + expected_result=None, + method_args=["share_id", ["key"]], + expected_args=[self.proxy, "key"], + ) + + +class TestUserMessageProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_user_messages(self): + self.verify_list(self.proxy.user_messages, user_message.UserMessage) + + def test_user_messages_queried(self): + self.verify_list( + self.proxy.user_messages, + user_message.UserMessage, + method_kwargs={"action_id": "1"}, + expected_kwargs={"action_id": "1"}, + ) + + def test_user_message_get(self): + self.verify_get(self.proxy.get_user_message, user_message.UserMessage) + + def test_delete_user_message(self): + self.verify_delete( + self.proxy.delete_user_message, user_message.UserMessage, False + ) + + def test_delete_user_message_true(self): + self.verify_delete( + self.proxy.delete_user_message, user_message.UserMessage, True + ) + + def test_limit(self): + self.verify_list(self.proxy.limits, limit.Limit) + + +class TestShareSnapshotResource(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_share_snapshots(self): + self.verify_list( + self.proxy.share_snapshots, share_snapshot.ShareSnapshot + ) + + def test_share_snapshots_detailed(self): + self.verify_list( + self.proxy.share_snapshots, + share_snapshot.ShareSnapshot, + method_kwargs={"details": True, "name": "my_snapshot"}, + expected_kwargs={"name": "my_snapshot"}, + ) + + def test_share_snapshots_not_detailed(self): + self.verify_list( + self.proxy.share_snapshots, + share_snapshot.ShareSnapshot, + method_kwargs={"details": False, "name": "my_snapshot"}, + expected_kwargs={"name": "my_snapshot"}, + ) + + def test_share_snapshot_get(self): + self.verify_get( + self.proxy.get_share_snapshot, share_snapshot.ShareSnapshot + ) + + def test_share_snapshot_delete(self): + self.verify_delete( + self.proxy.delete_share_snapshot, + share_snapshot.ShareSnapshot, + False, + ) + + def test_share_snapshot_delete_ignore(self): + self.verify_delete( + self.proxy.delete_share_snapshot, + share_snapshot.ShareSnapshot, + True, + ) + + def test_share_snapshot_create(self): + self.verify_create( + self.proxy.create_share_snapshot, share_snapshot.ShareSnapshot + ) + + def test_share_snapshot_update(self): + self.verify_update( + self.proxy.update_share_snapshot, share_snapshot.ShareSnapshot + ) + + @mock.patch("openstack.resource.wait_for_delete") + def test_wait_for_delete(self, mock_wait): + mock_resource = mock.Mock() + mock_wait.return_value = mock_resource + + self.proxy.wait_for_delete(mock_resource) + + mock_wait.assert_called_once_with( + self.proxy, mock_resource, 2, 120, None + ) + + +class TestShareSnapshotInstanceResource(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_share_snapshot_instances(self): + self.verify_list( + self.proxy.share_snapshot_instances, + share_snapshot_instance.ShareSnapshotInstance, + ) + + def test_share_snapshot_instance_detailed(self): + self.verify_list( + self.proxy.share_snapshot_instances, + share_snapshot_instance.ShareSnapshotInstance, + method_kwargs={"details": True, "query": {'snapshot_id': 'fake'}}, + expected_kwargs={"query": {'snapshot_id': 'fake'}}, + ) + + def test_share_snapshot_instance_not_detailed(self): + self.verify_list( + self.proxy.share_snapshot_instances, + share_snapshot_instance.ShareSnapshotInstance, + method_kwargs={"details": False, "query": {'snapshot_id': 'fake'}}, + expected_kwargs={"query": {'snapshot_id': 'fake'}}, + ) + + def test_share_snapshot_instance_get(self): + self.verify_get( + self.proxy.get_share_snapshot_instance, + share_snapshot_instance.ShareSnapshotInstance, + ) + + +class TestShareNetworkResource(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_share_networks(self): + self.verify_list(self.proxy.share_networks, share_network.ShareNetwork) + + def test_share_networks_detailed(self): + self.verify_list( + self.proxy.share_networks, + share_network.ShareNetwork, + method_kwargs={"details": True, "name": "my_net"}, + expected_kwargs={"name": "my_net"}, + ) + + def test_share_networks_not_detailed(self): + self.verify_list( + self.proxy.share_networks, + share_network.ShareNetwork, + method_kwargs={"details": False, "name": "my_net"}, + expected_kwargs={"name": "my_net"}, + ) + + def test_share_network_get(self): + self.verify_get( + self.proxy.get_share_network, share_network.ShareNetwork + ) + + def test_share_network_delete(self): + self.verify_delete( + self.proxy.delete_share_network, share_network.ShareNetwork, False + ) + + def test_share_network_delete_ignore(self): + self.verify_delete( + self.proxy.delete_share_network, share_network.ShareNetwork, True + ) + + def test_share_network_create(self): + self.verify_create( + self.proxy.create_share_network, share_network.ShareNetwork + ) + + def test_share_network_update(self): + self.verify_update( + self.proxy.update_share_network, share_network.ShareNetwork + ) + + +class TestShareNetworkSubnetResource(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_share_network_subnets(self): + self.verify_list( + self.proxy.share_network_subnets, + share_network_subnet.ShareNetworkSubnet, + method_args=["test_share"], + expected_args=[], + expected_kwargs={"share_network_id": "test_share"}, + ) + + def test_share_network_subnet_get(self): + self.verify_get( + self.proxy.get_share_network_subnet, + share_network_subnet.ShareNetworkSubnet, + method_args=["fake_network_id", "fake_sub_network_id"], + expected_args=['fake_sub_network_id'], + expected_kwargs={'share_network_id': 'fake_network_id'}, + ) + + def test_share_network_subnet_create(self): + self.verify_create( + self.proxy.create_share_network_subnet, + share_network_subnet.ShareNetworkSubnet, + method_args=["fake_network_id"], + method_kwargs={"p1": "v1"}, + expected_args=[], + expected_kwargs={ + "share_network_id": "fake_network_id", + "p1": "v1", + }, + ) + + def test_share_network_subnet_delete(self): + self.verify_delete( + self.proxy.delete_share_network_subnet, + share_network_subnet.ShareNetworkSubnet, + False, + method_args=["fake_network_id", "fake_sub_network_id"], + expected_args=["fake_sub_network_id"], + expected_kwargs={'share_network_id': 'fake_network_id'}, + ) + + +class TestAccessRuleProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_access_ruless(self): + self.verify_list( + self.proxy.access_rules, + share_access_rule.ShareAccessRule, + method_args=["test_share"], + expected_args=[], + expected_kwargs={"share_id": "test_share"}, + ) + + def test_access_rules_get(self): + self.verify_get( + self.proxy.get_access_rule, share_access_rule.ShareAccessRule + ) + + def test_access_rules_create(self): + self.verify_create( + self.proxy.create_access_rule, + share_access_rule.ShareAccessRule, + method_args=["share_id"], + expected_args=[], + ) + + def test_access_rules_delete(self): + self._verify( + "openstack.shared_file_system.v2.share_access_rule.ShareAccessRule.delete", + self.proxy.delete_access_rule, + method_args=[ + 'access_id', + 'share_id', + ], + expected_args=[self.proxy], + expected_kwargs={'unrestrict': False}, + ) + + +class TestResourceLocksProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_list_resource_locks(self): + self.verify_list( + self.proxy.resource_locks, resource_locks.ResourceLock + ) + + def test_resource_lock_get(self): + self.verify_get( + self.proxy.get_resource_lock, resource_locks.ResourceLock + ) + + def test_resource_lock_delete(self): + self.verify_delete( + self.proxy.delete_resource_lock, resource_locks.ResourceLock, False + ) + + def test_resource_lock_delete_ignore(self): + self.verify_delete( + self.proxy.delete_resource_lock, resource_locks.ResourceLock, True + ) + + def test_resource_lock_create(self): + self.verify_create( + self.proxy.create_resource_lock, resource_locks.ResourceLock + ) + + def test_resource_lock_update(self): + self.verify_update( + self.proxy.update_resource_lock, resource_locks.ResourceLock + ) + + +class TestShareGroupResource(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_share_groups(self): + self.verify_list(self.proxy.share_groups, share_group.ShareGroup) + + def test_share_groups_query(self): + self.verify_list( + self.proxy.share_groups, + share_group.ShareGroup, + method_kwargs={"query": 1}, + expected_kwargs={"query": 1}, + ) + + def test_share_group_get(self): + self.verify_get(self.proxy.get_share_group, share_group.ShareGroup) + + def test_share_group_find(self): + self.verify_find(self.proxy.find_share_group, share_group.ShareGroup) + + def test_share_group_delete(self): + self.verify_delete( + self.proxy.delete_share_group, share_group.ShareGroup, False + ) + + def test_share_group_delete_ignore(self): + self.verify_delete( + self.proxy.delete_share_group, share_group.ShareGroup, True + ) + + def test_share_group_create(self): + self.verify_create( + self.proxy.create_share_group, share_group.ShareGroup + ) + + def test_share_group_update(self): + self.verify_update( + self.proxy.update_share_group, share_group.ShareGroup + ) + + def test_share_group_snapshots(self): + self.verify_list( + self.proxy.share_group_snapshots, + share_group_snapshot.ShareGroupSnapshot, + ) + + def test_share_group_snapshot_get(self): + self.verify_get( + self.proxy.get_share_group_snapshot, + share_group_snapshot.ShareGroupSnapshot, + ) + + def test_share_group_snapshot_update(self): + self.verify_update( + self.proxy.update_share_group_snapshot, + share_group_snapshot.ShareGroupSnapshot, + ) + + def test_share_group_snapshot_delete(self): + self.verify_delete( + self.proxy.delete_share_group_snapshot, + share_group_snapshot.ShareGroupSnapshot, + False, + ) + + def test_share_group_snapshot_delete_ignore(self): + self.verify_delete( + self.proxy.delete_share_group_snapshot, + share_group_snapshot.ShareGroupSnapshot, + True, + ) diff --git a/openstack/tests/unit/shared_file_system/v2/test_quota_class_set.py b/openstack/tests/unit/shared_file_system/v2/test_quota_class_set.py new file mode 100644 index 0000000000..daf2bb12fc --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_quota_class_set.py @@ -0,0 +1,99 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack.shared_file_system.v2 import quota_class_set +from openstack.tests.unit import base + +EXAMPLE = { + "share_groups": 50, + "gigabytes": 1000, + "share_group_snapshots": 50, + "snapshots": 50, + "snapshot_gigabytes": 1000, + "shares": 50, + "id": "default", + "share_networks": 10, + "share_replicas": 100, + "replica_gigabytes": 1000, + "per_share_gigabytes": -1, + "backups": 50, + "backup_gigabytes": 1000, +} + + +class TestQuotaClassSet(base.TestCase): + def test_basic(self): + _quota_class_set = quota_class_set.QuotaClassSet() + + self.assertEqual('/quota-class-sets', _quota_class_set.base_path) + self.assertTrue(_quota_class_set.allow_fetch) + self.assertTrue(_quota_class_set.allow_commit) + self.assertFalse(_quota_class_set.allow_create) + self.assertFalse(_quota_class_set.allow_delete) + self.assertFalse(_quota_class_set.allow_list) + self.assertFalse(_quota_class_set.allow_head) + + def test_get_quota_class_set(self): + _quota_class_set = quota_class_set.QuotaClassSet(**EXAMPLE) + self.assertEqual( + EXAMPLE['share_groups'], _quota_class_set.share_groups + ) + self.assertEqual(EXAMPLE['gigabytes'], _quota_class_set.gigabytes) + self.assertEqual( + EXAMPLE['share_group_snapshots'], + _quota_class_set.share_group_snapshots, + ) + self.assertEqual(EXAMPLE['snapshots'], _quota_class_set.snapshots) + self.assertEqual( + EXAMPLE['snapshot_gigabytes'], _quota_class_set.snapshot_gigabytes + ) + self.assertEqual(EXAMPLE['shares'], _quota_class_set.shares) + self.assertEqual(EXAMPLE['id'], _quota_class_set.id) + self.assertEqual( + EXAMPLE['share_networks'], _quota_class_set.share_networks + ) + self.assertEqual( + EXAMPLE['share_replicas'], _quota_class_set.share_replicas + ) + self.assertEqual( + EXAMPLE['replica_gigabytes'], _quota_class_set.replica_gigabytes + ) + self.assertEqual( + EXAMPLE['per_share_gigabytes'], + _quota_class_set.per_share_gigabytes, + ) + self.assertEqual(EXAMPLE['backups'], _quota_class_set.backups) + self.assertEqual( + EXAMPLE['backup_gigabytes'], _quota_class_set.backup_gigabytes + ) + + def test_update_quota_class_set(self): + _quota_class_set = quota_class_set.QuotaClassSet(**EXAMPLE) + updated_attributes = { + "share_groups": 100, + "gigabytes": 2000, + "share_group_snapshots": 100, + } + _quota_class_set._update(**updated_attributes) + + self.assertEqual( + updated_attributes['share_groups'], _quota_class_set.share_groups + ) + self.assertEqual( + updated_attributes['gigabytes'], _quota_class_set.gigabytes + ) + self.assertEqual( + updated_attributes['share_group_snapshots'], + _quota_class_set.share_group_snapshots, + ) + self.assertEqual(EXAMPLE['snapshots'], _quota_class_set.snapshots) diff --git a/openstack/tests/unit/shared_file_system/v2/test_share.py b/openstack/tests/unit/shared_file_system/v2/test_share.py new file mode 100644 index 0000000000..08b830fd75 --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_share.py @@ -0,0 +1,257 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.shared_file_system.v2 import share +from openstack.tests.unit import base + + +IDENTIFIER = '08a87d37-5ca2-4308-86c5-cba06d8d796c' +EXAMPLE = { + "id": IDENTIFIER, + "size": 2, + "availability_zone": "manila-zone-1", + "created_at": "2021-02-11T17:38:00.000000", + "status": "available", + "name": None, + "description": None, + "project_id": "d19444eb73af4b37bc0794532ef6fc50", + "snapshot_id": None, + "share_network_id": None, + "share_protocol": "NFS", + "metadata": {}, + "share_type": "cbb18bb7-cc97-477a-b64b-ed7c7f2a1c67", + "volume_type": "default", + "is_public": False, + "is_snapshot_supported": True, + "task_state": None, + "share_type_name": "default", + "access_rules_status": "active", + "replication_type": None, + "is_replicated": False, + "user_id": "6c262cab98de42c2afc4cfccbefc50c7", + "is_creating_new_share_from_snapshot_supported": True, + "is_reverting_to_snapshot_supported": True, + "share_group_id": None, + "source_share_group_snapshot_member_id": None, + "is_mounting_snapshot_supported": True, + "progress": "100%", + "share_server_id": None, + "host": "new@denver#lvm-single-pool", +} + + +class TestShares(base.TestCase): + def test_basic(self): + shares_resource = share.Share() + self.assertEqual('shares', shares_resource.resources_key) + self.assertEqual('/shares', shares_resource.base_path) + self.assertTrue(shares_resource.allow_list) + self.assertTrue(shares_resource.allow_create) + self.assertTrue(shares_resource.allow_fetch) + self.assertTrue(shares_resource.allow_commit) + self.assertTrue(shares_resource.allow_delete) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + "project_id": "project_id", + "name": "name", + "status": "status", + "share_server_id": "share_server_id", + "metadata": "metadata", + "share_type_id": "share_type_id", + "snapshot_id": "snapshot_id", + "host": "host", + "share_network_id": "share_network_id", + "is_public": "is_public", + "share_group_id": "share_group_id", + "export_location_id": "export_location_id", + "export_location_path": "export_location_path", + "offset": "offset", + "sort_key": "sort_key", + "sort_dir": "sort_dir", + "all_projects": "all_tenants", + }, + shares_resource._query_mapping._mapping, + ) + + def test_make_shares(self): + shares_resource = share.Share(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], shares_resource.id) + self.assertEqual(EXAMPLE['size'], shares_resource.size) + self.assertEqual( + EXAMPLE['availability_zone'], shares_resource.availability_zone + ) + self.assertEqual(EXAMPLE['created_at'], shares_resource.created_at) + self.assertEqual(EXAMPLE['status'], shares_resource.status) + self.assertEqual(EXAMPLE['name'], shares_resource.name) + self.assertEqual(EXAMPLE['description'], shares_resource.description) + self.assertEqual(EXAMPLE['project_id'], shares_resource.project_id) + self.assertEqual(EXAMPLE['snapshot_id'], shares_resource.snapshot_id) + self.assertEqual( + EXAMPLE['share_network_id'], shares_resource.share_network_id + ) + self.assertEqual( + EXAMPLE['share_protocol'], shares_resource.share_protocol + ) + self.assertEqual(EXAMPLE['metadata'], shares_resource.metadata) + self.assertEqual(EXAMPLE['share_type'], shares_resource.share_type) + self.assertEqual(EXAMPLE['is_public'], shares_resource.is_public) + self.assertEqual( + EXAMPLE['is_snapshot_supported'], + shares_resource.is_snapshot_supported, + ) + self.assertEqual(EXAMPLE['task_state'], shares_resource.task_state) + self.assertEqual( + EXAMPLE['share_type_name'], shares_resource.share_type_name + ) + self.assertEqual( + EXAMPLE['access_rules_status'], shares_resource.access_rules_status + ) + self.assertEqual( + EXAMPLE['replication_type'], shares_resource.replication_type + ) + self.assertEqual( + EXAMPLE['is_replicated'], shares_resource.is_replicated + ) + self.assertEqual(EXAMPLE['user_id'], shares_resource.user_id) + self.assertEqual( + EXAMPLE['is_creating_new_share_from_snapshot_supported'], + (shares_resource.is_creating_new_share_from_snapshot_supported), + ) + self.assertEqual( + EXAMPLE['is_reverting_to_snapshot_supported'], + shares_resource.is_reverting_to_snapshot_supported, + ) + self.assertEqual( + EXAMPLE['share_group_id'], shares_resource.share_group_id + ) + self.assertEqual( + EXAMPLE['source_share_group_snapshot_member_id'], + shares_resource.source_share_group_snapshot_member_id, + ) + self.assertEqual( + EXAMPLE['is_mounting_snapshot_supported'], + shares_resource.is_mounting_snapshot_supported, + ) + self.assertEqual(EXAMPLE['progress'], shares_resource.progress) + self.assertEqual( + EXAMPLE['share_server_id'], shares_resource.share_server_id + ) + self.assertEqual(EXAMPLE['host'], shares_resource.host) + + +class TestShareActions(TestShares): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.status_code = 202 + self.resp.json = mock.Mock(return_value=self.resp.body) + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = '3.0' + self.sess.post = mock.Mock(return_value=self.resp) + self.sess._get_connection = mock.Mock(return_value=self.cloud) + + def test_shrink_share(self): + sot = share.Share(**EXAMPLE) + microversion = sot._get_microversion(self.sess) + + self.assertIsNone(sot.shrink_share(self.sess, new_size=1)) + + url = f'shares/{IDENTIFIER}/action' + body = {"shrink": {"new_size": 1}} + headers = {'Accept': ''} + + self.sess.post.assert_called_with( + url, json=body, headers=headers, microversion=microversion + ) + + def test_extend_share(self): + sot = share.Share(**EXAMPLE) + microversion = sot._get_microversion(self.sess) + + self.assertIsNone(sot.extend_share(self.sess, new_size=3)) + + url = f'shares/{IDENTIFIER}/action' + body = {"extend": {"new_size": 3}} + headers = {'Accept': ''} + + self.sess.post.assert_called_with( + url, json=body, headers=headers, microversion=microversion + ) + + def test_revert_to_snapshot(self): + sot = share.Share(**EXAMPLE) + microversion = sot._get_microversion(self.sess) + + self.assertIsNone(sot.revert_to_snapshot(self.sess, "fake_id")) + + url = f'shares/{IDENTIFIER}/action' + body = {"revert": {"snapshot_id": "fake_id"}} + headers = {'Accept': ''} + + self.sess.post.assert_called_with( + url, json=body, headers=headers, microversion=microversion + ) + + def test_manage_share(self): + sot = share.Share() + + self.resp.headers = {} + self.resp.json = mock.Mock( + return_value={"share": {"name": "test_share", "size": 1}} + ) + + export_path = ( + "10.254.0 .5:/shares/share-42033c24-0261-424f-abda-4fef2f6dbfd5." + ) + params = {"name": "test_share"} + res = sot.manage( + self.sess, + sot["share_protocol"], + export_path, + sot["host"], + **params, + ) + + self.assertEqual(res.name, "test_share") + self.assertEqual(res.size, 1) + + jsonDict = { + "share": { + "protocol": sot["share_protocol"], + "export_path": export_path, + "service_host": sot["host"], + "name": "test_share", + } + } + + self.sess.post.assert_called_once_with("shares/manage", json=jsonDict) + + def test_unmanage_share(self): + sot = share.Share(**EXAMPLE) + microversion = sot._get_microversion(self.sess) + + self.assertIsNone(sot.unmanage(self.sess)) + + url = f'shares/{IDENTIFIER}/action' + body = {'unmanage': None} + + self.sess.post.assert_called_with( + url, json=body, headers={'Accept': ''}, microversion=microversion + ) diff --git a/openstack/tests/unit/shared_file_system/v2/test_share_access_rule.py b/openstack/tests/unit/shared_file_system/v2/test_share_access_rule.py new file mode 100644 index 0000000000..5699b62cc1 --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_share_access_rule.py @@ -0,0 +1,53 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import share_access_rule +from openstack.tests.unit import base + +EXAMPLE = { + "access_level": "rw", + "state": "error", + "id": "507bf114-36f2-4f56-8cf4-857985ca87c1", + "share_id": "fb213952-2352-41b4-ad7b-2c4c69d13eef", + "access_type": "cert", + "access_to": "example.com", + "access_key": None, + "created_at": "2021-09-12T02:01:04.000000", + "updated_at": "2021-09-12T02:01:04.000000", + "metadata": {"key1": "value1", "key2": "value2"}, +} + + +class TestShareAccessRule(base.TestCase): + def test_basic(self): + rules_resource = share_access_rule.ShareAccessRule() + self.assertEqual('access_list', rules_resource.resources_key) + self.assertEqual('/share-access-rules', rules_resource.base_path) + self.assertTrue(rules_resource.allow_list) + + self.assertDictEqual( + {"limit": "limit", "marker": "marker", "share_id": "share_id"}, + rules_resource._query_mapping._mapping, + ) + + def test_make_share_access_rules(self): + rules_resource = share_access_rule.ShareAccessRule(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], rules_resource.id) + self.assertEqual(EXAMPLE['access_level'], rules_resource.access_level) + self.assertEqual(EXAMPLE['state'], rules_resource.state) + self.assertEqual(EXAMPLE['id'], rules_resource.id) + self.assertEqual(EXAMPLE['access_type'], rules_resource.access_type) + self.assertEqual(EXAMPLE['access_to'], rules_resource.access_to) + self.assertEqual(EXAMPLE['access_key'], rules_resource.access_key) + self.assertEqual(EXAMPLE['created_at'], rules_resource.created_at) + self.assertEqual(EXAMPLE['updated_at'], rules_resource.updated_at) + self.assertEqual(EXAMPLE['metadata'], rules_resource.metadata) diff --git a/openstack/tests/unit/shared_file_system/v2/test_share_export_locations.py b/openstack/tests/unit/shared_file_system/v2/test_share_export_locations.py new file mode 100644 index 0000000000..be467566eb --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_share_export_locations.py @@ -0,0 +1,47 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import share_export_locations as el +from openstack.tests.unit import base + + +IDENTIFIER = '08a87d37-5ca2-4308-86c5-cba06d8d796c' +EXAMPLE = { + "id": "f87589cb-f4bc-4a9b-b481-ab701206eb85", + "path": ( + "199.19.213.225:/opt/stack/data/manila/mnt/" + "share-6ba490c5-5225-4c3b-9982-14b8f475c6d9" + ), + "preferred": False, + "share_instance_id": "6ba490c5-5225-4c3b-9982-14b8f475c6d9", + "is_admin_only": False, +} + + +class TestShareExportLocations(base.TestCase): + def test_basic(self): + export = el.ShareExportLocation() + self.assertEqual('export_locations', export.resources_key) + self.assertEqual( + '/shares/%(share_id)s/export_locations', export.base_path + ) + self.assertTrue(export.allow_list) + + def test_share_export_locations(self): + export = el.ShareExportLocation(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], export.id) + self.assertEqual(EXAMPLE['path'], export.path) + self.assertEqual(EXAMPLE['preferred'], export.is_preferred) + self.assertEqual( + EXAMPLE['share_instance_id'], export.share_instance_id + ) + self.assertEqual(EXAMPLE['is_admin_only'], export.is_admin) diff --git a/openstack/tests/unit/shared_file_system/v2/test_share_group.py b/openstack/tests/unit/shared_file_system/v2/test_share_group.py new file mode 100644 index 0000000000..f75976ff89 --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_share_group.py @@ -0,0 +1,81 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import share_group +from openstack.tests.unit import base + + +EXAMPLE = { + "status": "creating", + "description": None, + "links": "[]", + "availability_zone": None, + "source_share_group_snapshot_id": None, + "share_network_id": None, + "share_server_id": None, + "host": None, + "share_group_type_id": "89861c2a-10bf-4013-bdd4-3d020466aee4", + "consistent_snapshot_support": None, + "id": "f9c1f80c-2392-4e34-bd90-fc89cdc5bf93", + "name": None, + "created_at": "2021-06-03T19:20:33.974421", + "project_id": "e23850eeb91d4fa3866af634223e454c", + "share_types": ["ecd11f4c-d811-4471-b656-c755c77e02ba"], +} + + +class TestShareGroups(base.TestCase): + def test_basic(self): + share_groups = share_group.ShareGroup() + self.assertEqual('share_groups', share_groups.resources_key) + self.assertEqual('/share-groups', share_groups.base_path) + self.assertTrue(share_groups.allow_list) + self.assertTrue(share_groups.allow_fetch) + self.assertTrue(share_groups.allow_create) + self.assertTrue(share_groups.allow_commit) + self.assertTrue(share_groups.allow_delete) + self.assertFalse(share_groups.allow_head) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + "share_group_id": "share_group_id", + }, + share_groups._query_mapping._mapping, + ) + + def test_make_share_groups(self): + share_group_res = share_group.ShareGroup(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], share_group_res.id) + self.assertEqual(EXAMPLE['status'], share_group_res.status) + self.assertEqual( + EXAMPLE['availability_zone'], share_group_res.availability_zone + ) + self.assertEqual(EXAMPLE['description'], share_group_res.description) + self.assertEqual( + EXAMPLE['source_share_group_snapshot_id'], + share_group_res.share_group_snapshot_id, + ) + self.assertEqual( + EXAMPLE['share_network_id'], share_group_res.share_network_id + ) + self.assertEqual( + EXAMPLE['share_group_type_id'], share_group_res.share_group_type_id + ) + self.assertEqual( + EXAMPLE['consistent_snapshot_support'], + share_group_res.consistent_snapshot_support, + ) + self.assertEqual(EXAMPLE['created_at'], share_group_res.created_at) + self.assertEqual(EXAMPLE['project_id'], share_group_res.project_id) + self.assertEqual(EXAMPLE['share_types'], share_group_res.share_types) diff --git a/openstack/tests/unit/shared_file_system/v2/test_share_group_snapshot.py b/openstack/tests/unit/shared_file_system/v2/test_share_group_snapshot.py new file mode 100644 index 0000000000..92880f57c6 --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_share_group_snapshot.py @@ -0,0 +1,106 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.shared_file_system.v2 import share_group_snapshot +from openstack.tests.unit import base + +IDENTIFIER = '38152b6d-e1b5-465f-91bc-20bca4676a2a' +EXAMPLE = { + "id": IDENTIFIER, + "name": "snapshot_1", + "created_at": "2021-10-24T19:36:49.555325", + "status": "available", + "description": "first snapshot of sg-1", + "project_id": "7343d2f7770b4eb6a7bc33f44dcee1e0", + "share_group_id": "fb41512f-7c49-4304-afb1-66573c7feb14", +} + + +class TestShareGroupSnapshot(base.TestCase): + def test_basic(self): + share_group_snapshots = share_group_snapshot.ShareGroupSnapshot() + self.assertEqual( + 'share_group_snapshot', share_group_snapshots.resource_key + ) + self.assertEqual( + 'share_group_snapshots', share_group_snapshots.resources_key + ) + self.assertEqual( + '/share-group-snapshots', share_group_snapshots.base_path + ) + self.assertTrue(share_group_snapshots.allow_create) + self.assertTrue(share_group_snapshots.allow_fetch) + self.assertTrue(share_group_snapshots.allow_commit) + self.assertTrue(share_group_snapshots.allow_delete) + self.assertTrue(share_group_snapshots.allow_list) + self.assertFalse(share_group_snapshots.allow_head) + + def test_make_share_groups(self): + share_group_snapshots = share_group_snapshot.ShareGroupSnapshot( + **EXAMPLE + ) + self.assertEqual(EXAMPLE['id'], share_group_snapshots.id) + self.assertEqual(EXAMPLE['name'], share_group_snapshots.name) + self.assertEqual( + EXAMPLE['created_at'], share_group_snapshots.created_at + ) + self.assertEqual(EXAMPLE['status'], share_group_snapshots.status) + self.assertEqual( + EXAMPLE['description'], share_group_snapshots.description + ) + self.assertEqual( + EXAMPLE['project_id'], share_group_snapshots.project_id + ) + self.assertEqual( + EXAMPLE['share_group_id'], share_group_snapshots.share_group_id + ) + + +class TestShareGroupSnapshotActions(TestShareGroupSnapshot): + def setUp(self): + super(TestShareGroupSnapshot, self).setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.status_code = 200 + self.resp.json = mock.Mock(return_value=self.resp.body) + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = '3.0' + self.sess.post = mock.Mock(return_value=self.resp) + self.sess._get_connection = mock.Mock(return_value=self.cloud) + + def test_reset_status(self): + sot = share_group_snapshot.ShareGroupSnapshot(**EXAMPLE) + self.assertIsNone(sot.reset_status(self.sess, 'available')) + url = f'share-group-snapshots/{IDENTIFIER}/action' + body = {"reset_status": {"status": 'available'}} + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, + json=body, + headers=headers, + microversion=self.sess.default_microversion, + ) + + def test_get_members(self): + sot = share_group_snapshot.ShareGroupSnapshot(**EXAMPLE) + sot.get_members(self.sess) + url = f'share-group-snapshots/{IDENTIFIER}/members' + headers = {'Accept': ''} + self.sess.get.assert_called_with( + url, + headers=headers, + microversion=self.sess.default_microversion, + ) diff --git a/openstack/tests/unit/shared_file_system/v2/test_share_instance.py b/openstack/tests/unit/shared_file_system/v2/test_share_instance.py new file mode 100644 index 0000000000..cb461ffa83 --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_share_instance.py @@ -0,0 +1,121 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from unittest import mock + +from keystoneauth1 import adapter + +from openstack.shared_file_system.v2 import share_instance +from openstack.tests.unit import base + +IDENTIFIER = "75559a8b-c90c-42a7-bda2-edbe86acfb7b" + +EXAMPLE = { + "status": "available", + "progress": "100%", + "share_id": "d94a8548-2079-4be0-b21c-0a887acd31ca", + "availability_zone": "nova", + "replica_state": None, + "created_at": "2015-09-07T08:51:34.000000", + "cast_rules_to_readonly": False, + "share_network_id": "713df749-aac0-4a54-af52-10f6c991e80c", + "share_server_id": "ba11930a-bf1a-4aa7-bae4-a8dfbaa3cc73", + "host": "manila2@generic1#GENERIC1", + "access_rules_status": "active", + "id": IDENTIFIER, +} + + +class TestShareInstances(base.TestCase): + def test_basic(self): + share_instance_resource = share_instance.ShareInstance() + self.assertEqual( + 'share_instances', share_instance_resource.resources_key + ) + self.assertEqual('/share_instances', share_instance_resource.base_path) + self.assertTrue(share_instance_resource.allow_list) + self.assertFalse(share_instance_resource.allow_create) + self.assertTrue(share_instance_resource.allow_fetch) + self.assertFalse(share_instance_resource.allow_commit) + self.assertFalse(share_instance_resource.allow_delete) + + def test_make_share_instances(self): + share_instance_resource = share_instance.ShareInstance(**EXAMPLE) + self.assertEqual(EXAMPLE['status'], share_instance_resource.status) + self.assertEqual(EXAMPLE['progress'], share_instance_resource.progress) + self.assertEqual(EXAMPLE['share_id'], share_instance_resource.share_id) + self.assertEqual( + EXAMPLE['availability_zone'], + share_instance_resource.availability_zone, + ) + self.assertEqual( + EXAMPLE['replica_state'], share_instance_resource.replica_state + ) + self.assertEqual( + EXAMPLE['created_at'], share_instance_resource.created_at + ) + self.assertEqual( + EXAMPLE['cast_rules_to_readonly'], + share_instance_resource.cast_rules_to_readonly, + ) + self.assertEqual( + EXAMPLE['share_network_id'], + share_instance_resource.share_network_id, + ) + self.assertEqual( + EXAMPLE['share_server_id'], share_instance_resource.share_server_id + ) + self.assertEqual(EXAMPLE['host'], share_instance_resource.host) + self.assertEqual( + EXAMPLE['access_rules_status'], + share_instance_resource.access_rules_status, + ) + self.assertEqual(EXAMPLE['id'], share_instance_resource.id) + + +class TestShareInstanceActions(TestShareInstances): + def setUp(self): + super().setUp() + self.resp = mock.Mock() + self.resp.body = None + self.resp.status_code = 200 + self.resp.json = mock.Mock(return_value=self.resp.body) + self.sess = mock.Mock(spec=adapter.Adapter) + self.sess.default_microversion = '3.0' + self.sess.post = mock.Mock(return_value=self.resp) + self.sess._get_connection = mock.Mock(return_value=self.cloud) + + def test_reset_status(self): + sot = share_instance.ShareInstance(**EXAMPLE) + microversion = sot._get_microversion(self.sess) + + self.assertIsNone(sot.reset_status(self.sess, 'active')) + + url = f'share_instances/{IDENTIFIER}/action' + body = {"reset_status": {"status": 'active'}} + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, json=body, headers=headers, microversion=microversion + ) + + def test_force_delete(self): + sot = share_instance.ShareInstance(**EXAMPLE) + microversion = sot._get_microversion(self.sess) + + self.assertIsNone(sot.force_delete(self.sess)) + + url = f'share_instances/{IDENTIFIER}/action' + body = {'force_delete': None} + headers = {'Accept': ''} + self.sess.post.assert_called_with( + url, json=body, headers=headers, microversion=microversion + ) diff --git a/openstack/tests/unit/shared_file_system/v2/test_share_network.py b/openstack/tests/unit/shared_file_system/v2/test_share_network.py new file mode 100644 index 0000000000..4bd76bd4ab --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_share_network.py @@ -0,0 +1,63 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import share_network +from openstack.tests.unit import base + +IDENTIFIER = '6e1821be-c494-4f62-8301-5dcd19f4d615' +EXAMPLE = { + "id": IDENTIFIER, + "project_id": "4b8184eddd6b429a93231c056ae9cd12", + "name": "my_share_net", + "description": "My share network", + "created_at": "2021-06-10T10:11:17.291981", + "updated_at": None, + "share_network_subnets": [], +} + + +class TestShareNetwork(base.TestCase): + def test_basic(self): + networks = share_network.ShareNetwork() + self.assertEqual('share_networks', networks.resources_key) + self.assertEqual('/share-networks', networks.base_path) + self.assertTrue(networks.allow_list) + self.assertTrue(networks.allow_create) + self.assertTrue(networks.allow_fetch) + self.assertTrue(networks.allow_commit) + self.assertTrue(networks.allow_delete) + self.assertFalse(networks.allow_head) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + "project_id": "project_id", + "created_since": "created_since", + "created_before": "created_before", + "offset": "offset", + "security_service_id": "security_service_id", + "all_projects": "all_tenants", + "name": "name", + "description": "description", + }, + networks._query_mapping._mapping, + ) + + def test_share_network(self): + networks = share_network.ShareNetwork(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], networks.id) + self.assertEqual(EXAMPLE['name'], networks.name) + self.assertEqual(EXAMPLE['project_id'], networks.project_id) + self.assertEqual(EXAMPLE['description'], networks.description) + self.assertEqual(EXAMPLE['created_at'], networks.created_at) + self.assertEqual(EXAMPLE['updated_at'], networks.updated_at) diff --git a/openstack/tests/unit/shared_file_system/v2/test_share_network_subnet.py b/openstack/tests/unit/shared_file_system/v2/test_share_network_subnet.py new file mode 100644 index 0000000000..702ab2a969 --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_share_network_subnet.py @@ -0,0 +1,72 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import share_network_subnet as SNS +from openstack.tests.unit import base + +IDENTIFIER = '9cd5a59f-4d22-496f-8b1a-ea4860c24d39' +EXAMPLE = { + "id": IDENTIFIER, + "availability_zone": None, + "share_network_id": "652ef887-b805-4328-b65a-b88c64cb69ec", + "share_network_name": None, + "created_at": "2021-02-24T02:45:59.000000", + "segmentation_id": None, + "neutron_subnet_id": None, + "updated_at": None, + "neutron_net_id": None, + "ip_version": None, + "cidr": None, + "network_type": None, + "mtu": None, + "gateway": None, +} + + +class TestShareNetworkSubnet(base.TestCase): + def test_basic(self): + SNS_resource = SNS.ShareNetworkSubnet() + self.assertEqual('share_network_subnets', SNS_resource.resources_key) + self.assertEqual( + '/share-networks/%(share_network_id)s/subnets', + SNS_resource.base_path, + ) + self.assertTrue(SNS_resource.allow_list) + + def test_make_share_network_subnet(self): + SNS_resource = SNS.ShareNetworkSubnet(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], SNS_resource.id) + self.assertEqual( + EXAMPLE['availability_zone'], SNS_resource.availability_zone + ) + self.assertEqual( + EXAMPLE['share_network_id'], SNS_resource.share_network_id + ) + self.assertEqual( + EXAMPLE['share_network_name'], SNS_resource.share_network_name + ) + self.assertEqual(EXAMPLE['created_at'], SNS_resource.created_at) + self.assertEqual( + EXAMPLE['segmentation_id'], SNS_resource.segmentation_id + ) + self.assertEqual( + EXAMPLE['neutron_subnet_id'], SNS_resource.neutron_subnet_id + ) + self.assertEqual(EXAMPLE['updated_at'], SNS_resource.updated_at) + self.assertEqual( + EXAMPLE['neutron_net_id'], SNS_resource.neutron_net_id + ) + self.assertEqual(EXAMPLE['ip_version'], SNS_resource.ip_version) + self.assertEqual(EXAMPLE['cidr'], SNS_resource.cidr) + self.assertEqual(EXAMPLE['network_type'], SNS_resource.network_type) + self.assertEqual(EXAMPLE['mtu'], SNS_resource.mtu) + self.assertEqual(EXAMPLE['gateway'], SNS_resource.gateway) diff --git a/openstack/tests/unit/shared_file_system/v2/test_share_snapshot.py b/openstack/tests/unit/shared_file_system/v2/test_share_snapshot.py new file mode 100644 index 0000000000..3a6608a186 --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_share_snapshot.py @@ -0,0 +1,60 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import share_snapshot +from openstack.tests.unit import base + + +EXAMPLE = { + "status": "creating", + "share_id": "406ea93b-32e9-4907-a117-148b3945749f", + "user_id": "5c7bdb6eb0504d54a619acf8375c08ce", + "name": "snapshot_share1", + "created_at": "2021-06-07T11:50:39.756808", + "description": "Here is a snapshot of share Share1", + "share_proto": "NFS", + "share_size": 1, + "id": "6d221c1d-0200-461e-8d20-24b4776b9ddb", + "project_id": "cadd7139bc3148b8973df097c0911016", + "size": 1, +} + + +class TestShareSnapshot(base.TestCase): + def test_basic(self): + snapshot_resource = share_snapshot.ShareSnapshot() + self.assertEqual('snapshots', snapshot_resource.resources_key) + self.assertEqual('/snapshots', snapshot_resource.base_path) + self.assertTrue(snapshot_resource.allow_list) + + self.assertDictEqual( + { + "limit": "limit", + "marker": "marker", + "snapshot_id": "snapshot_id", + }, + snapshot_resource._query_mapping._mapping, + ) + + def test_make_share_snapshot(self): + snapshot_resource = share_snapshot.ShareSnapshot(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], snapshot_resource.id) + self.assertEqual(EXAMPLE['share_id'], snapshot_resource.share_id) + self.assertEqual(EXAMPLE['user_id'], snapshot_resource.user_id) + self.assertEqual(EXAMPLE['created_at'], snapshot_resource.created_at) + self.assertEqual(EXAMPLE['status'], snapshot_resource.status) + self.assertEqual(EXAMPLE['name'], snapshot_resource.name) + self.assertEqual(EXAMPLE['description'], snapshot_resource.description) + self.assertEqual(EXAMPLE['share_proto'], snapshot_resource.share_proto) + self.assertEqual(EXAMPLE['share_size'], snapshot_resource.share_size) + self.assertEqual(EXAMPLE['project_id'], snapshot_resource.project_id) + self.assertEqual(EXAMPLE['size'], snapshot_resource.size) diff --git a/openstack/tests/unit/shared_file_system/v2/test_share_snapshot_instance.py b/openstack/tests/unit/shared_file_system/v2/test_share_snapshot_instance.py new file mode 100644 index 0000000000..708b4f346c --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_share_snapshot_instance.py @@ -0,0 +1,51 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import share_snapshot_instance +from openstack.tests.unit import base + +EXAMPLE = { + "status": "available", + "share_id": "618599ab-09a1-432d-973a-c102564c7fec", + "share_instance_id": "8edff0cb-e5ce-4bab-aa99-afe02ed6a76a", + "snapshot_id": "d447de19-a6d3-40b3-ae9f-895c86798924", + "progress": "100%", + "created_at": "2021-06-04T00:44:52.000000", + "id": "275516e8-c998-4e78-a41e-7dd3a03e71cd", + "provider_location": "/path/to/fake...", + "updated_at": "2017-06-04T00:44:54.000000", +} + + +class TestShareSnapshotInstances(base.TestCase): + def test_basic(self): + instances = share_snapshot_instance.ShareSnapshotInstance() + self.assertEqual('snapshot_instance', instances.resource_key) + self.assertEqual('snapshot_instances', instances.resources_key) + self.assertEqual('/snapshot-instances', instances.base_path) + self.assertTrue(instances.allow_list) + + def test_make_share_snapshot_instance(self): + instance = share_snapshot_instance.ShareSnapshotInstance(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], instance.id) + self.assertEqual(EXAMPLE['share_id'], instance.share_id) + self.assertEqual( + EXAMPLE['share_instance_id'], instance.share_instance_id + ) + self.assertEqual(EXAMPLE['snapshot_id'], instance.snapshot_id) + self.assertEqual(EXAMPLE['status'], instance.status) + self.assertEqual(EXAMPLE['progress'], instance.progress) + self.assertEqual(EXAMPLE['created_at'], instance.created_at) + self.assertEqual(EXAMPLE['updated_at'], instance.updated_at) + self.assertEqual( + EXAMPLE['provider_location'], instance.provider_location + ) diff --git a/openstack/tests/unit/shared_file_system/v2/test_storage_pool.py b/openstack/tests/unit/shared_file_system/v2/test_storage_pool.py new file mode 100644 index 0000000000..b0e510a4c4 --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_storage_pool.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import storage_pool +from openstack.tests.unit import base + + +EXAMPLE = { + "name": "opencloud@alpha#ALPHA_pool", + "host": "opencloud", + "backend": "alpha", + "pool": "ALPHA_pool", + "capabilities": { + "pool_name": "ALPHA_pool", + "total_capacity_gb": 1230.0, + "free_capacity_gb": 1210.0, + "reserved_percentage": 0, + "share_backend_name": "ALPHA", + "storage_protocol": "NFS_CIFS", + "vendor_name": "Open Source", + "driver_version": "1.0", + "timestamp": "2021-07-31T00:28:02.935569", + "driver_handles_share_servers": True, + "snapshot_support": True, + "create_share_from_snapshot_support": True, + "revert_to_snapshot_support": True, + "mount_snapshot_support": True, + "dedupe": False, + "compression": False, + "replication_type": None, + "replication_domain": None, + "sg_consistent_snapshot_support": "pool", + "ipv4_support": True, + "ipv6_support": False, + }, +} + + +class TestStoragePool(base.TestCase): + def test_basic(self): + pool_resource = storage_pool.StoragePool() + self.assertEqual('pools', pool_resource.resources_key) + self.assertEqual('/scheduler-stats/pools', pool_resource.base_path) + self.assertTrue(pool_resource.allow_list) + + self.assertDictEqual( + { + 'pool': 'pool', + 'backend': 'backend', + 'host': 'host', + 'limit': 'limit', + 'marker': 'marker', + 'capabilities': 'capabilities', + 'share_type': 'share_type', + }, + pool_resource._query_mapping._mapping, + ) + + def test_make_storage_pool(self): + pool_resource = storage_pool.StoragePool(**EXAMPLE) + self.assertEqual(EXAMPLE['pool'], pool_resource.pool) + self.assertEqual(EXAMPLE['host'], pool_resource.host) + self.assertEqual(EXAMPLE['name'], pool_resource.name) + self.assertEqual(EXAMPLE['backend'], pool_resource.backend) + self.assertEqual(EXAMPLE['capabilities'], pool_resource.capabilities) diff --git a/openstack/tests/unit/shared_file_system/v2/test_user_message.py b/openstack/tests/unit/shared_file_system/v2/test_user_message.py new file mode 100644 index 0000000000..ed31237574 --- /dev/null +++ b/openstack/tests/unit/shared_file_system/v2/test_user_message.py @@ -0,0 +1,66 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.shared_file_system.v2 import user_message +from openstack.tests.unit import base + + +IDENTIFIER = "2784bc88-b729-4220-a6bb-a8b7a8f53aad" +EXAMPLE = { + "id": IDENTIFIER, + "project_id": "dcc9de3c5fc8471ba3662dbb2b6166d5", + "action_id": "001", + "detail_id": "008", + "message_level": "ERROR", + "created_at": "2021-03-26T05:16:39.000000", + "expires_at": "2021-04-25T05:16:39.000000", + "request_id": "req-e4b3e6de-ce4d-4ef2-b1e7-0087200e4db3", + "resource_type": "SHARE", + "resource_id": "c2e4ca07-8c37-4014-92c9-2171c7813fa0", + "user_message": ( + "allocate host: No storage could be allocated" + "for this share request, Capabilities filter" + "didn't succeed." + ), +} + + +class TestUserMessage(base.TestCase): + def test_basic(self): + message = user_message.UserMessage() + self.assertEqual('messages', message.resources_key) + self.assertEqual('/messages', message.base_path) + self.assertTrue(message.allow_list) + self.assertFalse(message.allow_create) + self.assertFalse(message.allow_commit) + self.assertTrue(message.allow_delete) + self.assertTrue(message.allow_fetch) + self.assertFalse(message.allow_head) + + self.assertDictEqual( + {"limit": "limit", "marker": "marker", "message_id": "message_id"}, + message._query_mapping._mapping, + ) + + def test_user_message(self): + messages = user_message.UserMessage(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], messages.id) + self.assertEqual(EXAMPLE['resource_id'], messages.resource_id) + self.assertEqual(EXAMPLE['message_level'], messages.message_level) + self.assertEqual(EXAMPLE['user_message'], messages.user_message) + self.assertEqual(EXAMPLE['expires_at'], messages.expires_at) + self.assertEqual(EXAMPLE['detail_id'], messages.detail_id) + self.assertEqual(EXAMPLE['created_at'], messages.created_at) + self.assertEqual(EXAMPLE['request_id'], messages.request_id) + self.assertEqual(EXAMPLE['project_id'], messages.project_id) + self.assertEqual(EXAMPLE['resource_type'], messages.resource_type) + self.assertEqual(EXAMPLE['action_id'], messages.action_id) diff --git a/openstack/tests/unit/telemetry/alarm/test_alarm_service.py b/openstack/tests/unit/telemetry/alarm/test_alarm_service.py deleted file mode 100644 index 8106e961f9..0000000000 --- a/openstack/tests/unit/telemetry/alarm/test_alarm_service.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.telemetry.alarm import alarm_service - - -class TestAlarmService(testtools.TestCase): - - def test_service(self): - sot = alarm_service.AlarmService() - self.assertEqual('alarming', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(1, len(sot.valid_versions)) - self.assertEqual('v2', sot.valid_versions[0].module) - self.assertEqual('v2', sot.valid_versions[0].path) diff --git a/openstack/tests/unit/telemetry/alarm/v2/test_alarm.py b/openstack/tests/unit/telemetry/alarm/v2/test_alarm.py deleted file mode 100644 index cd0cb1af53..0000000000 --- a/openstack/tests/unit/telemetry/alarm/v2/test_alarm.py +++ /dev/null @@ -1,107 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from openstack.telemetry.alarm.v2 import alarm - -IDENTIFIER = 'IDENTIFIER' -EXAMPLE = { - 'alarm_actions': ['1'], - 'alarm_id': IDENTIFIER, - 'combination_rule': {'alarm_ids': ['2', 'b'], 'operator': 'or', }, - 'description': '3', - 'enabled': True, - 'insufficient_data_actions': ['4'], - 'name': '5', - 'ok_actions': ['6'], - 'project_id': '7', - 'repeat_actions': False, - 'severity': 'low', - 'state': 'insufficient data', - 'state_timestamp': '2015-03-09T12:15:57.233772', - 'timestamp': '2015-03-09T12:15:57.233772', - 'threshold_rule': { - 'meter_name': 'a', - 'evaluation_periods:': '1', - 'period': '60', - 'statistic': 'avg', - 'threshold': '92.6', - 'comparison_operator': 'gt', - 'exclude_outliers': True, - }, - 'time_constraints': [{'name': 'a', 'duration': 'b', 'start': 'c', }], - 'type': '10', - 'user_id': '11', -} - - -class TestAlarm(testtools.TestCase): - - def setUp(self): - super(TestAlarm, self).setUp() - self.resp = mock.Mock() - self.resp.body = '' - self.resp.json = mock.Mock(return_value=self.resp.body) - self.sess = mock.Mock() - self.sess.put = mock.Mock(return_value=self.resp) - - def test_basic(self): - sot = alarm.Alarm() - self.assertIsNone(sot.resource_key) - self.assertIsNone(sot.resources_key) - self.assertEqual('/alarms', sot.base_path) - self.assertEqual('alarming', sot.service.service_type) - self.assertTrue(sot.allow_create) - self.assertTrue(sot.allow_retrieve) - self.assertTrue(sot.allow_update) - self.assertTrue(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_make_it(self): - sot = alarm.Alarm(EXAMPLE) - self.assertEqual(IDENTIFIER, sot.id) - self.assertEqual(EXAMPLE['alarm_actions'], sot.alarm_actions) - self.assertEqual(IDENTIFIER, sot.alarm_id) - self.assertEqual(EXAMPLE['combination_rule'], sot.combination_rule) - self.assertEqual(EXAMPLE['description'], sot.description) - self.assertTrue(sot.is_enabled) - self.assertEqual(EXAMPLE['insufficient_data_actions'], - sot.insufficient_data_actions) - self.assertEqual(EXAMPLE['name'], sot.name) - self.assertEqual(EXAMPLE['ok_actions'], sot.ok_actions) - self.assertEqual(EXAMPLE['project_id'], sot.project_id) - self.assertFalse(sot.is_repeat_actions) - self.assertEqual(EXAMPLE['severity'], sot.severity) - self.assertEqual(EXAMPLE['state'], sot.state) - self.assertEqual(EXAMPLE['state_timestamp'], sot.state_changed_at) - self.assertEqual(EXAMPLE['timestamp'], sot.updated_at) - self.assertEqual(EXAMPLE['threshold_rule'], sot.threshold_rule) - self.assertEqual(EXAMPLE['time_constraints'], sot.time_constraints) - self.assertEqual(EXAMPLE['type'], sot.type) - self.assertEqual(EXAMPLE['user_id'], sot.user_id) - - def test_check_status(self): - sot = alarm.Alarm(EXAMPLE) - sot.check_state(self.sess) - - url = 'alarms/IDENTIFIER/state' - self.sess.get.assert_called_with(url, endpoint_filter=sot.service) - - def test_change_status(self): - sot = alarm.Alarm(EXAMPLE) - self.assertEqual(self.resp.body, sot.change_state(self.sess, 'alarm')) - - url = 'alarms/IDENTIFIER/state' - self.sess.put.assert_called_with(url, endpoint_filter=sot.service, - json='alarm') diff --git a/openstack/tests/unit/telemetry/alarm/v2/test_alarm_change.py b/openstack/tests/unit/telemetry/alarm/v2/test_alarm_change.py deleted file mode 100644 index bd272440e8..0000000000 --- a/openstack/tests/unit/telemetry/alarm/v2/test_alarm_change.py +++ /dev/null @@ -1,74 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from openstack.telemetry.alarm.v2 import alarm_change - -IDENTIFIER = 'IDENTIFIER' -EXAMPLE = { - 'alarm_id': 0, - 'detail': '1', - 'event_id': IDENTIFIER, - 'on_behalf_of': '3', - 'project_id': '4', - 'timestamp': '2015-03-09T12:15:57.233772', - 'type': '6', - 'user_id': '7', -} - - -class TestAlarmChange(testtools.TestCase): - - def test_basic(self): - sot = alarm_change.AlarmChange() - self.assertEqual('alarm_change', sot.resource_key) - self.assertIsNone(sot.resources_key) - self.assertEqual('/alarms/%(alarm_id)s/history', sot.base_path) - self.assertEqual('alarming', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_retrieve) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_make_it(self): - sot = alarm_change.AlarmChange(EXAMPLE) - self.assertEqual(IDENTIFIER, sot.id) - self.assertEqual(EXAMPLE['alarm_id'], sot.alarm_id) - self.assertEqual(EXAMPLE['detail'], sot.detail) - self.assertEqual(IDENTIFIER, sot.event_id) - self.assertEqual(EXAMPLE['on_behalf_of'], sot.on_behalf_of_id) - self.assertEqual(EXAMPLE['project_id'], sot.project_id) - self.assertEqual(EXAMPLE['timestamp'], sot.triggered_at) - self.assertEqual(EXAMPLE['type'], sot.type) - self.assertEqual(EXAMPLE['user_id'], sot.user_id) - - def test_list(self): - sess = mock.Mock() - resp = mock.Mock() - resp.json = mock.Mock(return_value=[EXAMPLE, EXAMPLE]) - sess.get = mock.Mock(return_value=resp) - path_args = {'alarm_id': IDENTIFIER} - - found = alarm_change.AlarmChange.list(sess, path_args=path_args) - first = next(found) - self.assertEqual(IDENTIFIER, first.id) - self.assertEqual(EXAMPLE['alarm_id'], first.alarm_id) - self.assertEqual(EXAMPLE['detail'], first.detail) - self.assertEqual(IDENTIFIER, first.event_id) - self.assertEqual(EXAMPLE['on_behalf_of'], first.on_behalf_of_id) - self.assertEqual(EXAMPLE['project_id'], first.project_id) - self.assertEqual(EXAMPLE['timestamp'], first.triggered_at) - self.assertEqual(EXAMPLE['type'], first.type) - self.assertEqual(EXAMPLE['user_id'], first.user_id) diff --git a/openstack/tests/unit/telemetry/alarm/v2/test_proxy.py b/openstack/tests/unit/telemetry/alarm/v2/test_proxy.py deleted file mode 100644 index 343db89169..0000000000 --- a/openstack/tests/unit/telemetry/alarm/v2/test_proxy.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.telemetry.alarm.v2 import _proxy -from openstack.telemetry.alarm.v2 import alarm -from openstack.telemetry.alarm.v2 import alarm_change -from openstack.tests.unit import test_proxy_base - - -class TestAlarmProxy(test_proxy_base.TestProxyBase): - def setUp(self): - super(TestAlarmProxy, self).setUp() - self.proxy = _proxy.Proxy(self.session) - - def test_alarm_change_find(self): - self.verify_find(self.proxy.find_alarm_change, - alarm_change.AlarmChange) - - def test_alarm_changes(self): - larm = alarm.Alarm.existing(alarm_id='larm') - expected_kwargs = {'path_args': {'alarm_id': 'larm'}} - self.verify_list(self.proxy.alarm_changes, alarm_change.AlarmChange, - method_args=[larm], paginated=False, - expected_kwargs=expected_kwargs) - - def test_alarm_create_attrs(self): - self.verify_create(self.proxy.create_alarm, alarm.Alarm) - - def test_alarm_delete(self): - self.verify_delete(self.proxy.delete_alarm, alarm.Alarm, False) - - def test_alarm_delete_ignore(self): - self.verify_delete(self.proxy.delete_alarm, alarm.Alarm, True) - - def test_alarm_find(self): - self.verify_find(self.proxy.find_alarm, alarm.Alarm) - - def test_alarm_get(self): - self.verify_get(self.proxy.get_alarm, alarm.Alarm) - - def test_alarms(self): - self.verify_list(self.proxy.alarms, alarm.Alarm, paginated=False) - - def test_alarm_update(self): - self.verify_update(self.proxy.update_alarm, alarm.Alarm) diff --git a/openstack/tests/unit/telemetry/test_telemetry_service.py b/openstack/tests/unit/telemetry/test_telemetry_service.py deleted file mode 100644 index acc7da94ce..0000000000 --- a/openstack/tests/unit/telemetry/test_telemetry_service.py +++ /dev/null @@ -1,28 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.telemetry import telemetry_service - - -class TestTelemetryService(testtools.TestCase): - - def test_service(self): - sot = telemetry_service.TelemetryService() - self.assertEqual('metering', sot.service_type) - self.assertEqual('public', sot.interface) - self.assertIsNone(sot.region) - self.assertIsNone(sot.service_name) - self.assertEqual(1, len(sot.valid_versions)) - self.assertEqual('v2', sot.valid_versions[0].module) - self.assertEqual('v2', sot.valid_versions[0].path) diff --git a/openstack/tests/unit/telemetry/v2/test_capability.py b/openstack/tests/unit/telemetry/v2/test_capability.py deleted file mode 100644 index 8d37c8ae68..0000000000 --- a/openstack/tests/unit/telemetry/v2/test_capability.py +++ /dev/null @@ -1,70 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from openstack.telemetry.v2 import capability - -EXAMPLE = { - "id": "123", - "enabled": False, -} -BODY = { - "api": { - "statistics:query:complex": False, - "alarms:history:query:simple": True, - "events:query:simple": True, - "alarms:query:simple": True, - "resources:query:simple": True, - } -} - - -class TestCapability(testtools.TestCase): - def test_basic(self): - sot = capability.Capability() - self.assertEqual('capability', sot.resource_key) - self.assertEqual('capabilities', sot.resources_key) - self.assertEqual('/capabilities', sot.base_path) - self.assertEqual('metering', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_make_it(self): - sot = capability.Capability(**EXAMPLE) - self.assertEqual(EXAMPLE['id'], sot.id) - self.assertEqual(EXAMPLE['enabled'], sot.is_enabled) - - def test_list(self): - sess = mock.Mock() - resp = mock.Mock() - resp.json = mock.Mock(return_value=BODY) - sess.get = mock.Mock(return_value=resp) - - caps = capability.Capability.list(sess) - - caps = sorted(caps, key=lambda cap: cap.id) - self.assertEqual(5, len(caps)) - self.assertEqual('alarms:history:query:simple', caps[0].id) - self.assertTrue(caps[0].is_enabled) - self.assertEqual('alarms:query:simple', caps[1].id) - self.assertTrue(caps[1].is_enabled) - self.assertEqual('events:query:simple', caps[2].id) - self.assertTrue(caps[2].is_enabled) - self.assertEqual('resources:query:simple', caps[3].id) - self.assertTrue(caps[3].is_enabled) - self.assertEqual('statistics:query:complex', caps[4].id) - self.assertFalse(caps[4].is_enabled) diff --git a/openstack/tests/unit/telemetry/v2/test_meter.py b/openstack/tests/unit/telemetry/v2/test_meter.py deleted file mode 100644 index 83cdb89438..0000000000 --- a/openstack/tests/unit/telemetry/v2/test_meter.py +++ /dev/null @@ -1,54 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.telemetry.v2 import meter - -IDENTIFIER = 'IDENTIFIER' -EXAMPLE = { - 'meter_id': IDENTIFIER, - 'name': 'instance', - 'project_id': '123', - 'resource_id': '456', - 'source': 'abc', - 'type': 'def', - 'unit': 'ghi', - 'user_id': '789' -} - - -class TestMeter(testtools.TestCase): - - def test_basic(self): - sot = meter.Meter() - self.assertEqual('meter', sot.resource_key) - self.assertIsNone(sot.resources_key) - self.assertEqual('/meters', sot.base_path) - self.assertEqual('metering', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_make_it(self): - sot = meter.Meter(**EXAMPLE) - self.assertEqual(EXAMPLE['meter_id'], sot.id) - self.assertEqual(EXAMPLE['meter_id'], sot.meter_id) - self.assertEqual(EXAMPLE['name'], sot.name) - self.assertEqual(EXAMPLE['project_id'], sot.project_id) - self.assertEqual(EXAMPLE['resource_id'], sot.resource_id) - self.assertEqual(EXAMPLE['source'], sot.source) - self.assertEqual(EXAMPLE['type'], sot.type) - self.assertEqual(EXAMPLE['unit'], sot.unit) - self.assertEqual(EXAMPLE['user_id'], sot.user_id) diff --git a/openstack/tests/unit/telemetry/v2/test_proxy.py b/openstack/tests/unit/telemetry/v2/test_proxy.py deleted file mode 100644 index 428a0e0e72..0000000000 --- a/openstack/tests/unit/telemetry/v2/test_proxy.py +++ /dev/null @@ -1,69 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack.telemetry.v2 import _proxy -from openstack.telemetry.v2 import capability -from openstack.telemetry.v2 import meter -from openstack.telemetry.v2 import resource -from openstack.telemetry.v2 import sample -from openstack.telemetry.v2 import statistics -from openstack.tests.unit import test_proxy_base2 - - -class TestTelemetryProxy(test_proxy_base2.TestProxyBase): - def setUp(self): - super(TestTelemetryProxy, self).setUp() - self.proxy = _proxy.Proxy(self.session) - - def test_capability_find(self): - self.verify_find(self.proxy.find_capability, capability.Capability) - - def test_capabilities(self): - self.verify_list(self.proxy.capabilities, capability.Capability, - paginated=False) - - def test_meter_find(self): - self.verify_find(self.proxy.find_meter, meter.Meter) - - def test_meters(self): - self.verify_list(self.proxy.meters, meter.Meter, paginated=False) - - def test_resource_find(self): - self.verify_find(self.proxy.find_resource, resource.Resource) - - def test_resource_get(self): - self.verify_get(self.proxy.get_resource, resource.Resource) - - def test_resources(self): - self.verify_list(self.proxy.resources, resource.Resource, - paginated=False) - - def test_sample_create_attrs(self): - self.verify_create(self.proxy.create_sample, sample.Sample) - - def test_sample_find(self): - self.verify_find(self.proxy.find_sample, sample.Sample) - - def test_samples(self): - expected_kwargs = {'counter_name': 'meterone'} - self.verify_list(self.proxy.samples, sample.Sample, - method_args=['meterone'], - paginated=False, expected_kwargs=expected_kwargs) - - def test_statistics_find(self): - self.verify_find(self.proxy.find_statistics, statistics.Statistics) - - def test_statistics(self): - expected_kwargs = {'meter_name': 'meterone'} - self.verify_list(self.proxy.statistics, statistics.Statistics, - method_args=['meterone'], - paginated=False, expected_kwargs=expected_kwargs) diff --git a/openstack/tests/unit/telemetry/v2/test_resource.py b/openstack/tests/unit/telemetry/v2/test_resource.py deleted file mode 100644 index 62051830c2..0000000000 --- a/openstack/tests/unit/telemetry/v2/test_resource.py +++ /dev/null @@ -1,59 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.telemetry.v2 import resource - -IDENTIFIER = 'IDENTIFIER' -LINKS = [{'href': 'first_uri', 'rel': 'label 1', }, - {'href': 'other_uri', 'rel': 'label', }, ] -EXAMPLE = { - 'resource_id': IDENTIFIER, - 'first_sample_timestamp': '2015-03-09T12:15:57.233772', - 'last_sample_timestamp': '2015-03-09T12:15:57.233772', - 'links': LINKS, - 'metadata': {'name_one': '1', 'name_two': '2', }, - 'project_id': '123', - 'source': 'abc', - 'user_id': '789' -} - - -class TestResource(testtools.TestCase): - - def test_basic(self): - sot = resource.Resource() - self.assertIsNone(sot.resource_key) - self.assertIsNone(sot.resources_key) - self.assertEqual('/resources', sot.base_path) - self.assertEqual('metering', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_make_it(self): - sot = resource.Resource(**EXAMPLE) - self.assertEqual(EXAMPLE['resource_id'], sot.id) - self.assertEqual(EXAMPLE['resource_id'], sot.resource_id) - self.assertEqual(EXAMPLE['first_sample_timestamp'], - sot.first_sample_at) - self.assertEqual(EXAMPLE['last_sample_timestamp'], - sot.last_sample_at) - self.assertEqual(EXAMPLE['links'], sot.links) - self.assertEqual(EXAMPLE['metadata'], sot.metadata) - self.assertEqual(EXAMPLE['project_id'], sot.project_id) - self.assertEqual(EXAMPLE['resource_id'], sot.resource_id) - self.assertEqual(EXAMPLE['source'], sot.source) - self.assertEqual(EXAMPLE['user_id'], sot.user_id) diff --git a/openstack/tests/unit/telemetry/v2/test_sample.py b/openstack/tests/unit/telemetry/v2/test_sample.py deleted file mode 100644 index 9c31427d9c..0000000000 --- a/openstack/tests/unit/telemetry/v2/test_sample.py +++ /dev/null @@ -1,83 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from openstack.telemetry.v2 import sample - -SAMPLE = { - 'sample_id': '0', - 'metadata': {'1': 'one'}, - 'counter_name': '2', - 'message_id': '4', - 'project_id': '3', - 'recorded_at': '2015-03-09T12:15:57.233772', - 'resource_id': '5', - 'source': '6', - 'timestamp': '2015-03-09T12:15:57.233772', - 'type': '8', - 'unit': '9', - 'user_id': '10', - 'volume': '11.1', -} - - -class TestSample(testtools.TestCase): - - def test_basic(self): - sot = sample.Sample() - self.assertIsNone(sot.resource_key) - self.assertIsNone(sot.resources_key) - self.assertEqual('/meters/%(counter_name)s', sot.base_path) - self.assertEqual('metering', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertTrue(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_make_new(self): - sot = sample.Sample(**SAMPLE) - self.assertEqual(SAMPLE['message_id'], sot.id) - self.assertEqual(SAMPLE['metadata'], sot.metadata) - self.assertEqual(SAMPLE['counter_name'], sot.counter_name) - self.assertEqual(SAMPLE['project_id'], sot.project_id) - self.assertEqual(SAMPLE['recorded_at'], sot.recorded_at) - self.assertEqual(SAMPLE['resource_id'], sot.resource_id) - self.assertEqual(SAMPLE['source'], sot.source) - self.assertEqual(SAMPLE['timestamp'], sot.generated_at) - self.assertEqual(SAMPLE['type'], sot.type) - self.assertEqual(SAMPLE['unit'], sot.unit) - self.assertEqual(SAMPLE['user_id'], sot.user_id) - self.assertEqual(SAMPLE['volume'], sot.volume) - - def test_list(self): - sess = mock.Mock() - resp = mock.Mock() - resp.json = mock.Mock(return_value=[SAMPLE]) - sess.get = mock.Mock(return_value=resp) - - found = sample.Sample.list(sess, counter_name='name_of_meter') - first = next(found) - self.assertEqual(SAMPLE['message_id'], first.id) - self.assertEqual(SAMPLE['metadata'], first.metadata) - self.assertEqual(SAMPLE['counter_name'], first.counter_name) - self.assertEqual(SAMPLE['project_id'], first.project_id) - self.assertEqual(SAMPLE['recorded_at'], first.recorded_at) - self.assertEqual(SAMPLE['resource_id'], first.resource_id) - self.assertEqual(SAMPLE['source'], first.source) - self.assertEqual(SAMPLE['timestamp'], first.generated_at) - self.assertEqual(SAMPLE['type'], first.type) - self.assertEqual(SAMPLE['unit'], first.unit) - self.assertEqual(SAMPLE['user_id'], first.user_id) - self.assertEqual(SAMPLE['volume'], first.volume) diff --git a/openstack/tests/unit/telemetry/v2/test_statistics.py b/openstack/tests/unit/telemetry/v2/test_statistics.py deleted file mode 100644 index 71ea61b3e4..0000000000 --- a/openstack/tests/unit/telemetry/v2/test_statistics.py +++ /dev/null @@ -1,93 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from openstack.telemetry.v2 import statistics - -EXAMPLE = { - 'aggregate': '1', - 'avg': '2', - 'count': '3', - 'duration': '4', - 'duration_end': '2015-03-09T12:45:00.000000', - 'duration_start': '2015-03-09T12:15:00.000000', - 'groupby': '7', - 'max': '8', - 'min': '9', - 'period': '10', - 'period_end': '2015-03-09T12:45:00.000000', - 'period_start': '2015-03-09T12:15:00.000000', - 'sum': '13', - 'unit': '14', -} - - -class TestStatistics(testtools.TestCase): - - def test_basic(self): - sot = statistics.Statistics() - self.assertEqual('statistics', sot.resource_key) - self.assertIsNone(sot.resources_key) - self.assertEqual('/meters/%(meter_name)s/statistics', sot.base_path) - self.assertEqual('metering', sot.service.service_type) - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertTrue(sot.allow_list) - - def test_make_it(self): - sot = statistics.Statistics(**EXAMPLE) - self.assertEqual(EXAMPLE['unit'], sot.id) - self.assertEqual(EXAMPLE['aggregate'], sot.aggregate) - self.assertEqual(EXAMPLE['avg'], sot.avg) - self.assertEqual(EXAMPLE['count'], sot.count) - self.assertEqual(EXAMPLE['duration'], sot.duration) - self.assertEqual(EXAMPLE['duration_end'], sot.duration_end_at) - self.assertEqual(EXAMPLE['duration_start'], sot.duration_start_at) - self.assertEqual(EXAMPLE['groupby'], sot.group_by) - self.assertEqual(EXAMPLE['max'], sot.max) - self.assertEqual(EXAMPLE['min'], sot.min) - self.assertEqual(EXAMPLE['period'], sot.period) - self.assertEqual(EXAMPLE['period_end'], sot.period_end_at) - self.assertEqual(EXAMPLE['period_start'], sot.period_start_at) - self.assertEqual(EXAMPLE['sum'], sot.sum) - self.assertEqual(EXAMPLE['unit'], sot.unit) - - def test_list(self): - sess = mock.Mock() - resp = mock.Mock() - resp.json = mock.Mock(return_value=[EXAMPLE]) - sess.get = mock.Mock(return_value=resp) - reply = statistics.Statistics.list(sess, meter_name='example') - - url = '/meters/example/statistics' - stat = next(reply) - sess.get.assert_called_with(url, endpoint_filter=stat.service, - params={}) - self.assertEqual(EXAMPLE['aggregate'], stat.aggregate) - self.assertEqual(EXAMPLE['avg'], stat.avg) - self.assertEqual(EXAMPLE['count'], stat.count) - self.assertEqual(EXAMPLE['duration'], stat.duration) - self.assertEqual(EXAMPLE['duration_end'], stat.duration_end_at) - self.assertEqual(EXAMPLE['duration_start'], stat.duration_start_at) - self.assertEqual(EXAMPLE['groupby'], stat.group_by) - self.assertEqual(EXAMPLE['max'], stat.max) - self.assertEqual(EXAMPLE['min'], stat.min) - self.assertEqual(EXAMPLE['period'], stat.period) - self.assertEqual(EXAMPLE['period_end'], stat.period_end_at) - self.assertEqual(EXAMPLE['period_start'], stat.period_start_at) - self.assertEqual(EXAMPLE['sum'], stat.sum) - self.assertEqual(EXAMPLE['unit'], stat.unit) - self.assertRaises(StopIteration, next, reply) diff --git a/openstack/tests/unit/test_connection.py b/openstack/tests/unit/test_connection.py index 909cca5ea7..9c21caa892 100644 --- a/openstack/tests/unit/test_connection.py +++ b/openstack/tests/unit/test_connection.py @@ -11,148 +11,91 @@ # under the License. import os +from unittest import mock import fixtures -from keystoneauth1 import session as ksa_session -import mock -import os_client_config +from keystoneauth1 import identity +from keystoneauth1 import session +import openstack.config from openstack import connection -from openstack import exceptions -from openstack import profile -from openstack import session +from openstack import proxy +from openstack import service_description +from openstack.tests import fakes from openstack.tests.unit import base +from openstack.tests.unit.fake import fake_service -CONFIG_AUTH_URL = "http://127.0.0.1:5000/v2.0" +CONFIG_AUTH_URL = "https://identity.example.com/" CONFIG_USERNAME = "BozoTheClown" CONFIG_PASSWORD = "TopSecret" CONFIG_PROJECT = "TheGrandPrizeGame" CONFIG_CACERT = "TrustMe" -CLOUD_CONFIG = """ +CLOUD_CONFIG = f""" clouds: - sample: + sample-cloud: region_name: RegionOne auth: - auth_url: {auth_url} - username: {username} - password: {password} - project_name: {project} - insecure: + auth_url: {CONFIG_AUTH_URL} + username: {CONFIG_USERNAME} + password: {CONFIG_PASSWORD} + project_name: {CONFIG_PROJECT} + insecure-cloud: auth: - auth_url: {auth_url} - username: {username} - password: {password} - project_name: {project} - cacert: {cacert} + auth_url: {CONFIG_AUTH_URL} + username: {CONFIG_USERNAME} + password: {CONFIG_PASSWORD} + project_name: {CONFIG_PROJECT} + cacert: {CONFIG_CACERT} + verify: False + insecure-cloud-alternative-format: + auth: + auth_url: {CONFIG_AUTH_URL} + username: {CONFIG_USERNAME} + password: {CONFIG_PASSWORD} + project_name: {CONFIG_PROJECT} insecure: True - cacert: + cacert-cloud: auth: - auth_url: {auth_url} - username: {username} - password: {password} - project_name: {project} - cacert: {cacert} - insecure: False -""".format(auth_url=CONFIG_AUTH_URL, username=CONFIG_USERNAME, - password=CONFIG_PASSWORD, project=CONFIG_PROJECT, - cacert=CONFIG_CACERT) - - -class TestConnection(base.TestCase): - @mock.patch("openstack.session.Session") - def test_other_parameters(self, mock_session_init): - mock_session_init.return_value = mock_session_init - mock_profile = mock.Mock() - mock_profile.get_services = mock.Mock(return_value=[]) - conn = connection.Connection(profile=mock_profile, authenticator='2', - verify=True, cert='cert', user_agent='1') - args = {'auth': '2', 'user_agent': '1', 'verify': True, 'cert': 'cert'} - mock_session_init.assert_called_with(mock_profile, **args) - self.assertEqual(mock_session_init, conn.session) - - def test_session_provided(self): - mock_session = mock.Mock(spec=session.Session) - mock_profile = mock.Mock() - mock_profile.get_services = mock.Mock(return_value=[]) - conn = connection.Connection(session=mock_session, - profile=mock_profile, - user_agent='1') - self.assertEqual(mock_session, conn.session) + auth_url: {CONFIG_AUTH_URL} + username: {CONFIG_USERNAME} + password: {CONFIG_PASSWORD} + project_name: {CONFIG_PROJECT} + cacert: {CONFIG_CACERT} + profiled-cloud: + profile: dummy + auth: + username: {CONFIG_USERNAME} + password: {CONFIG_PASSWORD} + project_name: {CONFIG_PROJECT} + cacert: {CONFIG_CACERT} +""" + +VENDOR_CONFIG = f""" +{{ + "name": "dummy", + "profile": {{ + "auth": {{ + "auth_url": "{CONFIG_AUTH_URL}" + }}, + "vendor_hook": "openstack.tests.unit.test_connection:vendor_hook" + }} +}} +""" + +PUBLIC_CLOUDS_YAML = f""" +public-clouds: + dummy: + auth: + auth_url: {CONFIG_AUTH_URL} + vendor_hook: openstack.tests.unit.test_connection:vendor_hook +""" - def test_ksa_session_provided(self): - mock_session = mock.Mock(spec=ksa_session.Session) - mock_profile = mock.Mock() - mock_profile.get_services = mock.Mock(return_value=[]) - self.assertRaises(exceptions.SDKException, connection.Connection, - session=mock_session, profile=mock_profile, - user_agent='1') - - @mock.patch("keystoneauth1.loading.base.get_plugin_loader") - def test_create_authenticator(self, mock_get_plugin): - mock_plugin = mock.Mock() - mock_loader = mock.Mock() - mock_options = [ - mock.Mock(dest="auth_url"), - mock.Mock(dest="password"), - mock.Mock(dest="username"), - ] - mock_loader.get_options = mock.Mock(return_value=mock_options) - mock_loader.load_from_options = mock.Mock(return_value=mock_plugin) - mock_get_plugin.return_value = mock_loader - auth_args = { - 'auth_url': '0', - 'username': '1', - 'password': '2', - } - conn = connection.Connection(auth_plugin='v2password', **auth_args) - mock_get_plugin.assert_called_with('v2password') - mock_loader.load_from_options.assert_called_with(**auth_args) - self.assertEqual(mock_plugin, conn.authenticator) - - @mock.patch("keystoneauth1.loading.base.get_plugin_loader") - def test_default_plugin(self, mock_get_plugin): - connection.Connection() - self.assertTrue(mock_get_plugin.called) - self.assertEqual(mock_get_plugin.call_args, mock.call("password")) - - @mock.patch("keystoneauth1.loading.base.get_plugin_loader") - def test_pass_authenticator(self, mock_get_plugin): - mock_plugin = mock.Mock() - mock_get_plugin.return_value = None - conn = connection.Connection(authenticator=mock_plugin) - self.assertFalse(mock_get_plugin.called) - self.assertEqual(mock_plugin, conn.authenticator) - def test_create_session(self): - auth = mock.Mock() - prof = profile.Profile() - conn = connection.Connection(authenticator=auth, profile=prof) - self.assertEqual(auth, conn.authenticator) - self.assertEqual(prof, conn.profile) - self.assertEqual('openstack.telemetry.alarm.v2._proxy', - conn.alarm.__class__.__module__) - self.assertEqual('openstack.cluster.v1._proxy', - conn.cluster.__class__.__module__) - self.assertEqual('openstack.compute.v2._proxy', - conn.compute.__class__.__module__) - self.assertEqual('openstack.database.v1._proxy', - conn.database.__class__.__module__) - self.assertEqual('openstack.identity.v3._proxy', - conn.identity.__class__.__module__) - self.assertEqual('openstack.image.v2._proxy', - conn.image.__class__.__module__) - self.assertEqual('openstack.network.v2._proxy', - conn.network.__class__.__module__) - self.assertEqual('openstack.object_store.v1._proxy', - conn.object_store.__class__.__module__) - self.assertEqual('openstack.orchestration.v1._proxy', - conn.orchestration.__class__.__module__) - self.assertEqual('openstack.telemetry.v2._proxy', - conn.telemetry.__class__.__module__) - - def _prepare_test_config(self): +class _TestConnectionBase(base.TestCase): + def setUp(self): + super().setUp() # Create a temporary directory where our test config will live # and insert it into the search path via OS_CLIENT_CONFIG_FILE. config_dir = self.useFixture(fixtures.TempDir()).path @@ -161,80 +104,449 @@ def _prepare_test_config(self): with open(config_path, "w") as conf: conf.write(CLOUD_CONFIG) - self.useFixture(fixtures.EnvironmentVariable( - "OS_CLIENT_CONFIG_FILE", config_path)) + self.useFixture( + fixtures.EnvironmentVariable("OS_CLIENT_CONFIG_FILE", config_path) + ) + self.use_keystone_v2() - def test_from_config_given_data(self): - self._prepare_test_config() - data = os_client_config.OpenStackConfig().get_one_cloud("sample") +class TestConnection(_TestConnectionBase): + def test_other_parameters(self): + conn = connection.Connection(cloud='sample-cloud', cert='cert') + self.assertEqual(conn.session.cert, 'cert') - sot = connection.from_config(cloud_config=data) + def test_session_provided(self): + mock_session = mock.Mock(spec=session.Session) + mock_session.auth = identity.V3Password( + auth_url='https://auth.example.com', + password='passw0rd', + user_id='fake', + ) + conn = connection.Connection(session=mock_session, cert='cert') + self.assertEqual(mock_session, conn.session) + self.assertEqual('auth.example.com', conn.config.name) - self.assertEqual(CONFIG_USERNAME, - sot.authenticator._username) - self.assertEqual(CONFIG_PASSWORD, - sot.authenticator._password) - self.assertEqual(CONFIG_AUTH_URL, - sot.authenticator.auth_url) - self.assertEqual(CONFIG_PROJECT, - sot.authenticator._project_name) + def test_create_session(self): + conn = connection.Connection(cloud='sample-cloud') + self.assertIsNotNone(conn) + # TODO(mordred) Rework this - we need to provide requests-mock + # entries for each of the proxies below + # self.assertEqual('openstack.proxy', + # conn.alarm.__class__.__module__) + # self.assertEqual('openstack.clustering.v1._proxy', + # conn.clustering.__class__.__module__) + # self.assertEqual('openstack.compute.v2._proxy', + # conn.compute.__class__.__module__) + # self.assertEqual('openstack.database.v1._proxy', + # conn.database.__class__.__module__) + # self.assertEqual('openstack.identity.v2._proxy', + # conn.identity.__class__.__module__) + # self.assertEqual('openstack.image.v2._proxy', + # conn.image.__class__.__module__) + # self.assertEqual('openstack.object_store.v1._proxy', + # conn.object_store.__class__.__module__) + # self.assertEqual('openstack.load_balancer.v2._proxy', + # conn.load_balancer.__class__.__module__) + # self.assertEqual('openstack.orchestration.v1._proxy', + # conn.orchestration.__class__.__module__) + # self.assertEqual('openstack.workflow.v2._proxy', + # conn.workflow.__class__.__module__) + + def test_create_unknown_proxy(self): + self.register_uris( + [ + self.get_placement_discovery_mock_dict(), + ] + ) + + def closure(): + return self.cloud.placement + + self.assertIsInstance(self.cloud.placement, proxy.Proxy) + self.assert_calls() + + def test_create_connection_version_param_default(self): + c1 = connection.Connection(cloud='sample-cloud') + conn = connection.Connection(session=c1.session) + self.assertEqual( + 'openstack.identity.v3._proxy', conn.identity.__class__.__module__ + ) + + def test_create_connection_version_param_string(self): + c1 = connection.Connection(cloud='sample-cloud') + conn = connection.Connection( + session=c1.session, identity_api_version='2' + ) + self.assertEqual( + 'openstack.identity.v2._proxy', conn.identity.__class__.__module__ + ) + + def test_create_connection_version_param_int(self): + c1 = connection.Connection(cloud='sample-cloud') + conn = connection.Connection( + session=c1.session, identity_api_version=3 + ) + self.assertEqual( + 'openstack.identity.v3._proxy', conn.identity.__class__.__module__ + ) + + def test_create_connection_version_param_bogus(self): + c1 = connection.Connection(cloud='sample-cloud') + conn = connection.Connection( + session=c1.session, identity_api_version='red' + ) + # TODO(mordred) This is obviously silly behavior + self.assertEqual( + 'openstack.identity.v3._proxy', conn.identity.__class__.__module__ + ) + + def test_from_config_given_config(self): + cloud_region = openstack.config.OpenStackConfig().get_one( + "sample-cloud" + ) + + sot = connection.from_config(config=cloud_region) + + self.assertEqual( + CONFIG_USERNAME, sot.config.config['auth']['username'] + ) + self.assertEqual( + CONFIG_PASSWORD, sot.config.config['auth']['password'] + ) + self.assertEqual( + CONFIG_AUTH_URL, sot.config.config['auth']['auth_url'] + ) + self.assertEqual( + CONFIG_PROJECT, sot.config.config['auth']['project_name'] + ) + + def test_from_config_given_cloud(self): + sot = connection.from_config(cloud="sample-cloud") + + self.assertEqual( + CONFIG_USERNAME, sot.config.config['auth']['username'] + ) + self.assertEqual( + CONFIG_PASSWORD, sot.config.config['auth']['password'] + ) + self.assertEqual( + CONFIG_AUTH_URL, sot.config.config['auth']['auth_url'] + ) + self.assertEqual( + CONFIG_PROJECT, sot.config.config['auth']['project_name'] + ) + + def test_from_config_given_cloud_config(self): + cloud_region = openstack.config.OpenStackConfig().get_one( + "sample-cloud" + ) + + sot = connection.from_config(cloud_config=cloud_region) + + self.assertEqual( + CONFIG_USERNAME, sot.config.config['auth']['username'] + ) + self.assertEqual( + CONFIG_PASSWORD, sot.config.config['auth']['password'] + ) + self.assertEqual( + CONFIG_AUTH_URL, sot.config.config['auth']['auth_url'] + ) + self.assertEqual( + CONFIG_PROJECT, sot.config.config['auth']['project_name'] + ) + + def test_from_config_given_cloud_name(self): + sot = connection.from_config(cloud_name="sample-cloud") + + self.assertEqual( + CONFIG_USERNAME, sot.config.config['auth']['username'] + ) + self.assertEqual( + CONFIG_PASSWORD, sot.config.config['auth']['password'] + ) + self.assertEqual( + CONFIG_AUTH_URL, sot.config.config['auth']['auth_url'] + ) + self.assertEqual( + CONFIG_PROJECT, sot.config.config['auth']['project_name'] + ) - def test_from_config_given_name(self): - self._prepare_test_config() + def test_from_config_verify(self): + sot = connection.from_config(cloud="insecure-cloud") + self.assertFalse(sot.session.verify) - sot = connection.from_config(cloud_name="sample") + sot = connection.from_config(cloud="cacert-cloud") + self.assertEqual(CONFIG_CACERT, sot.session.verify) - self.assertEqual(CONFIG_USERNAME, - sot.authenticator._username) - self.assertEqual(CONFIG_PASSWORD, - sot.authenticator._password) - self.assertEqual(CONFIG_AUTH_URL, - sot.authenticator.auth_url) - self.assertEqual(CONFIG_PROJECT, - sot.authenticator._project_name) + def test_from_config_insecure(self): + # Ensure that the "insecure=True" flag implies "verify=False" + sot = connection.from_config("insecure-cloud-alternative-format") + self.assertFalse(sot.session.verify) - def test_from_config_given_options(self): - self._prepare_test_config() - version = "100" +class TestOsloConfig(_TestConnectionBase): + def test_from_conf(self): + c1 = connection.Connection(cloud='sample-cloud') + conn = connection.Connection( + session=c1.session, oslo_conf=self._load_ks_cfg_opts() + ) + # There was no config for keystone + self.assertIsInstance( + conn.identity, service_description._ServiceDisabledProxyShim + ) + # But nova was in there + self.assertEqual( + 'openstack.compute.v2._proxy', conn.compute.__class__.__module__ + ) + + def test_from_conf_filter_service_types(self): + c1 = connection.Connection(cloud='sample-cloud') + conn = connection.Connection( + session=c1.session, + oslo_conf=self._load_ks_cfg_opts(), + service_types={'orchestration', 'i-am-ignored'}, + ) + # There was no config for keystone + self.assertIsInstance( + conn.identity, service_description._ServiceDisabledProxyShim + ) + # Nova was in there, but disabled because not requested + self.assertIsInstance( + conn.compute, service_description._ServiceDisabledProxyShim + ) + + +class TestNetworkConnection(base.TestCase): + # Verify that if the catalog has the suffix we don't mess things up. + def test_network_proxy(self): + self.os_fixture.v3_token.remove_service('network') + svc = self.os_fixture.v3_token.add_service('network') + svc.add_endpoint( + interface='public', + url='https://network.example.com/v2.0', + region='RegionOne', + ) + self.use_keystone_v3() + self.assertEqual( + 'openstack.network.v2._proxy', + self.cloud.network.__class__.__module__, + ) + self.assert_calls() + self.assertEqual( + "https://network.example.com/v2.0", + self.cloud.network.get_endpoint(), + ) + + +class TestNetworkConnectionSuffix(base.TestCase): + # We need to do the neutron adapter test differently because it needs + # to actually get a catalog. + + def test_network_proxy(self): + self.assertEqual( + 'openstack.network.v2._proxy', + self.cloud.network.__class__.__module__, + ) + self.assert_calls() + self.assertEqual( + "https://network.example.com/v2.0", + self.cloud.network.get_endpoint(), + ) + + +class TestAuthorize(base.TestCase): + def test_authorize_works(self): + res = self.cloud.authorize() + self.assertEqual('KeystoneToken-1', res) + + def test_authorize_failure(self): + self.use_broken_keystone() + + self.assertRaises( + openstack.exceptions.SDKException, self.cloud.authorize + ) + + +class TestNewService(base.TestCase): + def test_add_service_v1(self): + svc = self.os_fixture.v3_token.add_service('fake') + svc.add_endpoint( + interface='public', + region='RegionOne', + url=f'https://fake.example.com/v1/{fakes.PROJECT_ID}', + ) + self.use_keystone_v3() + conn = self.cloud + + service = fake_service.FakeService('fake') + + conn.add_service(service) + + # Ensure no discovery calls made + self.assertEqual(0, len(self.adapter.request_history)) + + self.register_uris( + [ + dict( + method='GET', + uri='https://fake.example.com', + status_code=404, + ), + dict( + method='GET', + uri='https://fake.example.com/v1/', + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url('fake'), + status_code=404, + ), + ] + ) + + self.assertEqual( + 'openstack.tests.unit.fake.v1._proxy', + conn.fake.__class__.__module__, + ) + self.assertTrue(conn.fake.dummy()) + + def test_add_service_v2(self): + svc = self.os_fixture.v3_token.add_service('fake') + svc.add_endpoint( + interface='public', + region='RegionOne', + url=f'https://fake.example.com/v2/{fakes.PROJECT_ID}', + ) + self.use_keystone_v3() + conn = self.cloud + + self.register_uris( + [ + dict( + method='GET', + uri='https://fake.example.com', + status_code=404, + ), + dict( + method='GET', + uri='https://fake.example.com/v2/', + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url('fake'), + status_code=404, + ), + ] + ) + + service = fake_service.FakeService('fake') + + conn.add_service(service) + + self.assertEqual( + 'openstack.tests.unit.fake.v2._proxy', + conn.fake.__class__.__module__, + ) + self.assertFalse(conn.fake.dummy()) + + def test_replace_system_service(self): + svc = self.os_fixture.v3_token.add_service('fake') + svc.add_endpoint( + interface='public', + region='RegionOne', + url=f'https://fake.example.com/v2/{fakes.PROJECT_ID}', + ) + self.use_keystone_v3() + conn = self.cloud + + # delete native dns service + delattr(conn, 'dns') + + self.register_uris( + [ + dict( + method='GET', + uri='https://fake.example.com', + status_code=404, + ), + dict( + method='GET', + uri='https://fake.example.com/v2/', + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url('fake'), + status_code=404, + ), + ] + ) + + # add fake service with alias 'DNS' + service = fake_service.FakeService('fake', aliases=['dns']) + conn.add_service(service) + + # ensure dns service responds as we expect from replacement + self.assertFalse(conn.dns.dummy()) + + +def vendor_hook(conn): + setattr(conn, 'test', 'test_val') + + +class TestVendorProfile(base.TestCase): + def setUp(self): + super().setUp() + # Create a temporary directory where our test config will live + # and insert it into the search path via OS_CLIENT_CONFIG_FILE. + config_dir = self.useFixture(fixtures.TempDir()).path + config_path = os.path.join(config_dir, "clouds.yaml") + public_clouds = os.path.join(config_dir, "clouds-public.yaml") - class Opts(object): - compute_api_version = version + with open(config_path, "w") as conf: + conf.write(CLOUD_CONFIG) - sot = connection.from_config(cloud_name="sample", options=Opts) + with open(public_clouds, "w") as conf: + conf.write(PUBLIC_CLOUDS_YAML) - pref = sot.session.profile.get_filter("compute") + self.useFixture( + fixtures.EnvironmentVariable("OS_CLIENT_CONFIG_FILE", config_path) + ) + self.use_keystone_v2() - # NOTE: Along the way, the `v` prefix gets added so we can build - # up URLs with it. - self.assertEqual("v" + version, pref.version) + self.config = openstack.config.loader.OpenStackConfig( + vendor_files=[public_clouds] + ) - def test_from_config_verify(self): - self._prepare_test_config() + def test_conn_from_profile(self): + self.cloud = self.config.get_one(cloud='profiled-cloud') - sot = connection.from_config(cloud_name="insecure") - self.assertFalse(sot.session.verify) + conn = connection.Connection(config=self.cloud) - sot = connection.from_config(cloud_name="cacert") - self.assertEqual(CONFIG_CACERT, sot.session.verify) + self.assertIsNotNone(conn) - def test_authorize_works(self): - fake_session = mock.Mock(spec=session.Session) - fake_headers = {'X-Auth-Token': 'FAKE_TOKEN'} - fake_session.get_auth_headers.return_value = fake_headers - - sot = connection.Connection(session=fake_session, - authenticator=mock.Mock()) - res = sot.authorize() - self.assertEqual('FAKE_TOKEN', res) - - def test_authorize_silent_failure(self): - fake_session = mock.Mock(spec=session.Session) - fake_session.get_auth_headers.return_value = None - fake_session.__module__ = 'openstack.session' - - sot = connection.Connection(session=fake_session, - authenticator=mock.Mock()) - res = sot.authorize() - self.assertIsNone(res) + def test_hook_from_profile(self): + self.cloud = self.config.get_one(cloud='profiled-cloud') + + conn = connection.Connection(config=self.cloud) + + self.assertEqual('test_val', conn.test) + + def test_hook_from_connection_param(self): + conn = connection.Connection( + cloud='sample-cloud', + vendor_hook='openstack.tests.unit.test_connection:vendor_hook', + ) + + self.assertEqual('test_val', conn.test) + + def test_hook_from_connection_ignore_missing(self): + conn = connection.Connection( + cloud='sample-cloud', + vendor_hook='openstack.tests.unit.test_connection:missing', + ) + + self.assertIsNotNone(conn) diff --git a/openstack/tests/unit/test_exceptions.py b/openstack/tests/unit/test_exceptions.py index 54a95b1fb4..1f11f218a4 100644 --- a/openstack/tests/unit/test_exceptions.py +++ b/openstack/tests/unit/test_exceptions.py @@ -10,48 +10,270 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools +import json +from unittest import mock +import uuid +import warnings from openstack import exceptions +from openstack.tests.unit import base +from openstack.tests.unit import fakes -class Test_Exception(testtools.TestCase): +class Test_Exception(base.TestCase): def test_method_not_supported(self): exc = exceptions.MethodNotSupported(self.__class__, 'list') - expected = ('The list method is not supported for ' + - 'openstack.tests.unit.test_exceptions.Test_Exception') + expected = ( + 'The list method is not supported for ' + 'openstack.tests.unit.test_exceptions.Test_Exception' + ) self.assertEqual(expected, str(exc)) -class Test_HttpException(testtools.TestCase): - +class Test_HttpException(base.TestCase): def setUp(self): - super(Test_HttpException, self).setUp() - self.message = "mayday" + super().setUp() + self.message = 'mayday' + self.response = fakes.FakeResponse( + status_code=401, + data={ + 'error': { + 'code': 401, + 'message': ( + 'The request you have made requires authentication.' + ), + 'title': 'Unauthorized', + }, + }, + ) def _do_raise(self, *args, **kwargs): raise exceptions.HttpException(*args, **kwargs) def test_message(self): - exc = self.assertRaises(exceptions.HttpException, - self._do_raise, self.message) + exc = self.assertRaises( + exceptions.HttpException, + self._do_raise, + self.message, + response=self.response, + ) self.assertEqual(self.message, exc.message) def test_details(self): details = "some details" - exc = self.assertRaises(exceptions.HttpException, - self._do_raise, self.message, - details=details) + exc = self.assertRaises( + exceptions.HttpException, + self._do_raise, + self.message, + response=self.response, + details=details, + ) self.assertEqual(self.message, exc.message) self.assertEqual(details, exc.details) def test_http_status(self): http_status = 123 - exc = self.assertRaises(exceptions.HttpException, - self._do_raise, self.message, - http_status=http_status) + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter('always') + + exc = self.assertRaises( + exceptions.HttpException, + self._do_raise, + self.message, + http_status=http_status, + ) + + self.assertEqual(self.message, exc.message) + self.assertEqual(http_status, exc.status_code) + + self.assertIn( + "The 'http_status' parameter is unnecessary", + str(w[-1]), + ) + + +class TestRaiseFromResponse(base.TestCase): + def setUp(self): + super().setUp() + self.message = "Where is my kitty?" + + def _do_raise(self, *args, **kwargs): + return exceptions.raise_from_response(*args, **kwargs) + def test_raise_no_exception(self): + response = mock.Mock() + response.status_code = 200 + self.assertIsNone(self._do_raise(response)) + + def test_raise_not_found_exception(self): + response = mock.Mock() + response.status_code = 404 + response.headers = { + 'content-type': 'application/json', + 'x-openstack-request-id': uuid.uuid4().hex, + } + exc = self.assertRaises( + exceptions.NotFoundException, + self._do_raise, + response, + error_message=self.message, + ) + self.assertEqual(self.message, exc.message) + self.assertEqual(response.status_code, exc.status_code) + self.assertEqual( + response.headers.get('x-openstack-request-id'), exc.request_id + ) + + def test_raise_bad_request_exception(self): + response = mock.Mock() + response.status_code = 400 + response.headers = { + 'content-type': 'application/json', + 'x-openstack-request-id': uuid.uuid4().hex, + } + exc = self.assertRaises( + exceptions.BadRequestException, + self._do_raise, + response, + error_message=self.message, + ) + self.assertEqual(self.message, exc.message) + self.assertEqual(response.status_code, exc.status_code) + self.assertEqual( + response.headers.get('x-openstack-request-id'), exc.request_id + ) + + def test_raise_http_exception(self): + response = mock.Mock() + response.status_code = 403 + response.headers = { + 'content-type': 'application/json', + 'x-openstack-request-id': uuid.uuid4().hex, + } + exc = self.assertRaises( + exceptions.HttpException, + self._do_raise, + response, + error_message=self.message, + ) self.assertEqual(self.message, exc.message) - self.assertEqual(http_status, exc.http_status) + self.assertEqual(response.status_code, exc.status_code) + self.assertEqual( + response.headers.get('x-openstack-request-id'), exc.request_id + ) + + def test_raise_compute_format(self): + response = mock.Mock() + response.status_code = 404 + response.headers = { + 'content-type': 'application/json', + } + response.json.return_value = { + 'itemNotFound': { + 'message': self.message, + 'code': 404, + } + } + exc = self.assertRaises( + exceptions.NotFoundException, + self._do_raise, + response, + error_message=self.message, + ) + self.assertEqual(response.status_code, exc.status_code) + self.assertEqual(self.message, exc.details) + self.assertIn(self.message, str(exc)) + + def test_raise_network_format(self): + response = mock.Mock() + response.status_code = 404 + response.headers = { + 'content-type': 'application/json', + } + response.json.return_value = { + 'NeutronError': { + 'message': self.message, + 'type': 'FooNotFound', + 'detail': '', + } + } + exc = self.assertRaises( + exceptions.NotFoundException, + self._do_raise, + response, + error_message=self.message, + ) + self.assertEqual(response.status_code, exc.status_code) + self.assertEqual(self.message, exc.details) + self.assertIn(self.message, str(exc)) + + def test_raise_baremetal_old_format(self): + response = mock.Mock() + response.status_code = 404 + response.headers = { + 'content-type': 'application/json', + } + response.json.return_value = { + 'error_message': json.dumps( + { + 'faultstring': self.message, + 'faultcode': 'Client', + 'debuginfo': None, + } + ) + } + exc = self.assertRaises( + exceptions.NotFoundException, + self._do_raise, + response, + error_message=self.message, + ) + self.assertEqual(response.status_code, exc.status_code) + self.assertEqual(self.message, exc.details) + self.assertIn(self.message, str(exc)) + + def test_raise_baremetal_corrected_format(self): + response = mock.Mock() + response.status_code = 404 + response.headers = { + 'content-type': 'application/json', + } + response.json.return_value = { + 'error_message': { + 'faultstring': self.message, + 'faultcode': 'Client', + 'debuginfo': None, + } + } + exc = self.assertRaises( + exceptions.NotFoundException, + self._do_raise, + response, + error_message=self.message, + ) + self.assertEqual(response.status_code, exc.status_code) + self.assertEqual(self.message, exc.details) + self.assertIn(self.message, str(exc)) + + def test_raise_wsme_format(self): + response = mock.Mock() + response.status_code = 404 + response.headers = { + 'content-type': 'application/json', + } + response.json.return_value = { + 'faultstring': self.message, + 'faultcode': 'Client', + 'debuginfo': None, + } + exc = self.assertRaises( + exceptions.NotFoundException, + self._do_raise, + response, + error_message=self.message, + ) + self.assertEqual(response.status_code, exc.status_code) + self.assertEqual(self.message, exc.details) + self.assertIn(self.message, str(exc)) diff --git a/openstack/tests/unit/test_fakes.py b/openstack/tests/unit/test_fakes.py new file mode 100644 index 0000000000..8013d80682 --- /dev/null +++ b/openstack/tests/unit/test_fakes.py @@ -0,0 +1,95 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import format as _format +from openstack import resource +from openstack.test import fakes +from openstack.tests.unit import base + + +class TestGetFake(base.TestCase): + def test_generate_fake_resource_one(self): + res = fakes.generate_fake_resource(resource.Resource) + self.assertIsInstance(res, resource.Resource) + + def test_generate_fake_resource_list(self): + res = list(fakes.generate_fake_resources(resource.Resource, 2)) + self.assertEqual(2, len(res)) + self.assertIsInstance(res[0], resource.Resource) + + def test_generate_fake_resource_types(self): + class Foo(resource.Resource): + a = resource.Body("a", type=str) + b = resource.Body("b", type=int) + c = resource.Body("c", type=bool) + d = resource.Body("d", type=_format.BoolStr) + e = resource.Body("e", type=dict) + f = resource.URI("path") + + class Bar(resource.Resource): + a = resource.Body("a", type=list, list_type=str) + b = resource.Body("b", type=list, list_type=dict) + c = resource.Body("c", type=list, list_type=Foo) + + foo = fakes.generate_fake_resource(Foo) + self.assertIsInstance(foo.a, str) + self.assertIsInstance(foo.b, int) + self.assertIsInstance(foo.c, bool) + self.assertIsInstance(foo.d, bool) + self.assertIsInstance(foo.e, dict) + self.assertIsInstance(foo.f, str) + + bar = fakes.generate_fake_resource(Bar) + self.assertIsInstance(bar.a, list) + self.assertEqual(1, len(bar.a)) + self.assertIsInstance(bar.a[0], str) + self.assertIsInstance(bar.b, list) + self.assertEqual(1, len(bar.b)) + self.assertIsInstance(bar.b[0], dict) + self.assertIsInstance(bar.c, list) + self.assertEqual(1, len(bar.c)) + self.assertIsInstance(bar.c[0], Foo) + self.assertIsInstance(bar.c[0].a, str) + self.assertIsInstance(bar.c[0].b, int) + self.assertIsInstance(bar.c[0].c, bool) + self.assertIsInstance(bar.c[0].d, bool) + self.assertIsInstance(bar.c[0].e, dict) + self.assertIsInstance(bar.c[0].f, str) + + def test_generate_fake_resource_attrs(self): + class Fake(resource.Resource): + a = resource.Body("a", type=str) + b = resource.Body("b", type=str) + + res = fakes.generate_fake_resource(Fake, b="bar") + self.assertIsInstance(res.a, str) + self.assertIsInstance(res.b, str) + self.assertEqual("bar", res.b) + + def test_generate_fake_resource_types_inherit(self): + class Fake(resource.Resource): + a = resource.Body("a", type=str) + + class FakeInherit(resource.Resource): + a = resource.Body("a", type=Fake) + + res = fakes.generate_fake_resource(FakeInherit) + self.assertIsInstance(res.a, Fake) + self.assertIsInstance(res.a.a, str) + + def test_unknown_attrs_as_props(self): + class Fake(resource.Resource): + properties = resource.Body("properties") + _store_unknown_attrs_as_properties = True + + res = fakes.generate_fake_resource(Fake) + self.assertIsInstance(res.properties, dict) diff --git a/openstack/tests/unit/test_fields.py b/openstack/tests/unit/test_fields.py new file mode 100644 index 0000000000..e60be0f66f --- /dev/null +++ b/openstack/tests/unit/test_fields.py @@ -0,0 +1,325 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import fields +from openstack import format +from openstack import resource +from openstack.tests.unit import base + + +class TestConvertValue(base.TestCase): + def test_convert_value(self): + class FakeResource(resource.Resource): + abc = fields.Body('abc', type=int) + + test_data = [ + { + 'name': 'no data_type', + 'value': '123', + 'data_type': None, + 'expected': '123', + }, + { + 'name': 'convert list to list with no list_type', + 'value': ['123'], + 'data_type': list, + 'expected': ['123'], + }, + { + 'name': 'convert tuple to list with no list_type', + 'value': ('123',), + 'data_type': list, + 'expected': ['123'], + }, + { + 'name': 'convert set to list with no list_type', + 'value': {'123'}, + 'data_type': list, + 'expected': ['123'], + }, + { + 'name': 'convert list to list with list_type', + 'value': ['123'], + 'data_type': list, + 'list_type': int, + 'expected': [123], + }, + { + 'name': 'convert tuple to list with list_type', + 'value': ('123',), + 'data_type': list, + 'list_type': int, + 'expected': [123], + }, + { + 'name': 'convert set to list with list_type', + 'value': {'123'}, + 'data_type': list, + 'list_type': int, + 'expected': [123], + }, + { + 'name': 'convert with formatter', + 'value': 'true', + 'data_type': format.BoolStr, + 'expected': True, + }, + { + 'name': 'convert to resource', + 'value': {'abc': '123'}, + 'data_type': FakeResource, + # NOTE(stephenfin): The Resource.__eq__ compares the underlying + # value types, not the converted value types, so we need a + # string here + 'expected': FakeResource(abc='123'), + }, + { + 'name': 'convert string to int', + 'value': '123', + 'data_type': int, + 'expected': 123, + }, + { + 'name': 'convert invalid string to int', + 'value': 'abc', + 'data_type': int, + 'expected': 0, + }, + { + 'name': 'convert valid int to string', + 'value': 123, + 'data_type': str, + 'expected': '123', + }, + { + 'name': 'convert string to bool', + 'value': 'anything', + 'data_type': bool, + 'expected': True, + }, + ] + + for data in test_data: + with self.subTest(msg=data['name']): + ret = fields._convert_type( + data['value'], data['data_type'], data.get('list_type') + ) + self.assertEqual(ret, data['expected']) + + +class TestComponent(base.TestCase): + class ExampleComponent(fields._BaseComponent): + key = "_example" + + # Since we're testing ExampleComponent, which is as isolated as we + # can test _BaseComponent due to it's needing to be a data member + # of a class that has an attribute on the parent class named `key`, + # each test has to implement a class with a name that is the same + # as ExampleComponent.key, which should be a dict containing the + # keys and values to test against. + + def test_implementations(self): + self.assertEqual("_body", fields.Body.key) + self.assertEqual("_header", fields.Header.key) + self.assertEqual("_uri", fields.URI.key) + + def test_creation(self): + sot = fields._BaseComponent( + "name", type=int, default=1, alternate_id=True, aka="alias" + ) + + self.assertEqual("name", sot.name) + self.assertEqual(int, sot.type) + self.assertEqual(1, sot.default) + self.assertEqual("alias", sot.aka) + self.assertTrue(sot.alternate_id) + + def test_get_no_instance(self): + sot = fields._BaseComponent("test") + + # Test that we short-circuit everything when given no instance. + result = sot.__get__(None, None) + self.assertIs(sot, result) + + # NOTE: Some tests will use a default=1 setting when testing result + # values that should be None because the default-for-default is also None. + def test_get_name_None(self): + name = "name" + + class Parent: + _example = {name: None} + + instance = Parent() + sot = TestComponent.ExampleComponent(name, default=1) + + # Test that we short-circuit any typing of a None value. + result = sot.__get__(instance, None) + self.assertIsNone(result) + + def test_get_default(self): + expected_result = 123 + + class Parent: + _example = {} + + instance = Parent() + # NOTE: type=dict but the default value is an int. If we didn't + # short-circuit the typing part of __get__ it would fail. + sot = TestComponent.ExampleComponent( + "name", type=dict, default=expected_result + ) + + # Test that we directly return any default value. + result = sot.__get__(instance, None) + self.assertEqual(expected_result, result) + + def test_get_name_untyped(self): + name = "name" + expected_result = 123 + + class Parent: + _example = {name: expected_result} + + instance = Parent() + sot = TestComponent.ExampleComponent("name") + + # Test that we return any the value as it is set. + result = sot.__get__(instance, None) + self.assertEqual(expected_result, result) + + # The code path for typing after a raw value has been found is the same. + def test_get_name_typed(self): + name = "name" + value = "123" + + class Parent: + _example = {name: value} + + instance = Parent() + sot = TestComponent.ExampleComponent("name", type=int) + + # Test that we run the underlying value through type conversion. + result = sot.__get__(instance, None) + self.assertEqual(int(value), result) + + def test_get_name_formatter(self): + name = "name" + value = "123" + expected_result = "one hundred twenty three" + + class Parent: + _example = {name: value} + + class FakeFormatter(format.Formatter): + @classmethod + def deserialize(cls, value): + return expected_result + + instance = Parent() + sot = TestComponent.ExampleComponent("name", type=FakeFormatter) + + # Mock out issubclass rather than having an actual format.Formatter + # This can't be mocked via decorator, isolate it to wrapping the call. + result = sot.__get__(instance, None) + self.assertEqual(expected_result, result) + + def test_set_name_untyped(self): + name = "name" + expected_value = "123" + + class Parent: + _example = {} + + instance = Parent() + sot = TestComponent.ExampleComponent("name") + + # Test that we don't run the value through type conversion. + sot.__set__(instance, expected_value) + self.assertEqual(expected_value, instance._example[name]) + + def test_set_name_typed(self): + expected_value = "123" + + class Parent: + _example = {} + + instance = Parent() + + # The type we give to ExampleComponent has to be an actual type, + # not an instance, so we can't get the niceties of a mock.Mock + # instance that would allow us to call `assert_called_once_with` to + # ensure that we're sending the value through the type. + # Instead, we use this tiny version of a similar thing. + class FakeType: + calls = [] + + def __init__(self, arg): + FakeType.calls.append(arg) + + sot = TestComponent.ExampleComponent("name", type=FakeType) + + # Test that we run the value through type conversion. + sot.__set__(instance, expected_value) + self.assertEqual([expected_value], FakeType.calls) + + def test_set_name_formatter(self): + expected_value = "123" + + class Parent: + _example = {} + + instance = Parent() + + # As with test_set_name_typed, create a pseudo-Mock to track what + # gets called on the type. + class FakeFormatter(format.Formatter): + calls = [] + + @classmethod + def deserialize(cls, arg): + FakeFormatter.calls.append(arg) + + sot = TestComponent.ExampleComponent("name", type=FakeFormatter) + + # Test that we run the value through type conversion. + sot.__set__(instance, expected_value) + self.assertEqual([expected_value], FakeFormatter.calls) + + def test_delete_name(self): + name = "name" + expected_value = "123" + + class Parent: + _example = {name: expected_value} + + instance = Parent() + + sot = TestComponent.ExampleComponent("name") + + sot.__delete__(instance) + + self.assertNotIn(name, instance._example) + + def test_delete_name_doesnt_exist(self): + name = "name" + expected_value = "123" + + class Parent: + _example = {"what": expected_value} + + instance = Parent() + + sot = TestComponent.ExampleComponent(name) + + sot.__delete__(instance) + + self.assertNotIn(name, instance._example) diff --git a/openstack/tests/unit/test_format.py b/openstack/tests/unit/test_format.py index f78454dd58..532133502f 100644 --- a/openstack/tests/unit/test_format.py +++ b/openstack/tests/unit/test_format.py @@ -10,13 +10,11 @@ # License for the specific language governing permissions and limitations # under the License. -import testtools - from openstack import format +from openstack.tests.unit import base -class TestBoolStrFormatter(testtools.TestCase): - +class TestBoolStrFormatter(base.TestCase): def test_deserialize(self): self.assertTrue(format.BoolStr.deserialize(True)) self.assertTrue(format.BoolStr.deserialize('True')) @@ -29,10 +27,3 @@ def test_deserialize(self): self.assertRaises(ValueError, format.BoolStr.deserialize, None) self.assertRaises(ValueError, format.BoolStr.deserialize, '') self.assertRaises(ValueError, format.BoolStr.deserialize, 'INVALID') - - def test_serialize(self): - self.assertEqual('true', format.BoolStr.serialize(True)) - self.assertEqual('false', format.BoolStr.serialize(False)) - self.assertRaises(ValueError, format.BoolStr.serialize, None) - self.assertRaises(ValueError, format.BoolStr.serialize, '') - self.assertRaises(ValueError, format.BoolStr.serialize, 'True') diff --git a/openstack/tests/unit/test_hacking.py b/openstack/tests/unit/test_hacking.py new file mode 100644 index 0000000000..df4ce222f6 --- /dev/null +++ b/openstack/tests/unit/test_hacking.py @@ -0,0 +1,96 @@ +# Copyright 2019 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack._hacking import checks +from openstack.tests.unit import base + + +class HackingTestCase(base.TestCase): + """This class tests the hacking checks in openstack._hacking.checks. + + It works by passing strings to the check methods like the pep8/flake8 + parser would. The parser loops over each line in the file and then passes + the parameters to the check method. The parameter names in the check method + dictate what type of object is passed to the check method. + + The parameter types are:: + + logical_line: A processed line with the following modifications: + - Multi-line statements converted to a single line. + - Stripped left and right. + - Contents of strings replaced with "xxx" of same length. + - Comments removed. + physical_line: Raw line of text from the input file. + lines: a list of the raw lines from the input file + tokens: the tokens that contribute to this logical line + line_number: line number in the input file + total_lines: number of lines in the input file + blank_lines: blank lines before this one + indent_char: indentation character in this file (" " or "\t") + indent_level: indentation (with tabs expanded to multiples of 8) + previous_indent_level: indentation on previous line + previous_logical: previous logical line + filename: Path of the file being run through pep8 + + When running a test on a check method the return will be False/None if + there is no violation in the sample input. If there is an error a tuple is + returned with a position in the line, and a message. So to check the result + just assertTrue if the check is expected to fail and assertFalse if it + should pass. + """ + + def test_assert_no_setupclass(self): + self.assertEqual( + len(list(checks.assert_no_setupclass("def setUpClass(cls)"))), 1 + ) + + self.assertEqual( + len(list(checks.assert_no_setupclass("# setUpClass is evil"))), 0 + ) + + self.assertEqual( + len( + list( + checks.assert_no_setupclass( + "def setUpClassyDrinkingLocation(cls)" + ) + ) + ), + 0, + ) + + def test_assert_no_deprecated_exceptions(self): + self.assertEqual( + len( + list( + checks.assert_no_deprecated_exceptions( + "raise exc.OpenStackCloudTimeout", + "openstack/cloud/compute.py", + ) + ) + ), + 1, + ) + + self.assertEqual( + len( + list( + checks.assert_no_deprecated_exceptions( + "raise exc.OpenStackCloudTimeout", + "openstack/cloud/exc.py", + ) + ) + ), + 0, + ) diff --git a/openstack/tests/unit/test_microversions.py b/openstack/tests/unit/test_microversions.py new file mode 100644 index 0000000000..a72189f0d4 --- /dev/null +++ b/openstack/tests/unit/test_microversions.py @@ -0,0 +1,144 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import exceptions +from openstack.tests import fakes +from openstack.tests.unit import base + + +class TestMicroversions(base.TestCase): + def setUp(self): + super().setUp() + self.use_compute_discovery() + + def test_get_bad_inferred_max_microversion(self): + self.cloud.config.config['compute_api_version'] = '2.61' + + self.assertRaises( + exceptions.ConfigException, + self.cloud.get_server, + 'doesNotExist', + ) + + self.assert_calls() + + def test_get_bad_default_max_microversion(self): + self.cloud.config.config['compute_default_microversion'] = '2.61' + + self.assertRaises( + exceptions.ConfigException, + self.cloud.get_server, + 'doesNotExist', + ) + + self.assert_calls() + + def test_get_bad_inferred_min_microversion(self): + self.cloud.config.config['compute_api_version'] = '2.7' + + self.assertRaises( + exceptions.ConfigException, + self.cloud.get_server, + 'doesNotExist', + ) + + self.assert_calls() + + def test_get_bad_default_min_microversion(self): + self.cloud.config.config['compute_default_microversion'] = '2.7' + + self.assertRaises( + exceptions.ConfigException, + self.cloud.get_server, + 'doesNotExist', + ) + + self.assert_calls() + + def test_inferred_default_microversion(self): + self.cloud.config.config['compute_api_version'] = '2.42' + + server = fakes.make_fake_server('123', 'mickey') + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'mickey'] + ), + request_headers={'OpenStack-API-Version': 'compute 2.42'}, + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=mickey'], + ), + request_headers={'OpenStack-API-Version': 'compute 2.42'}, + json={'servers': [server]}, + ), + ] + ) + + r = self.cloud.get_server('mickey', bare=True) + self.assertIsNotNone(r) + self.assertEqual(server['name'], r['name']) + + self.assert_calls() + + def test_default_microversion(self): + self.cloud.config.config['compute_default_microversion'] = '2.42' + + server = fakes.make_fake_server('123', 'mickey') + + self.register_uris( + [ + dict( + method='GET', + uri=self.get_mock_url( + 'compute', 'public', append=['servers', 'mickey'] + ), + request_headers={'OpenStack-API-Version': 'compute 2.42'}, + status_code=404, + ), + dict( + method='GET', + uri=self.get_mock_url( + 'compute', + 'public', + append=['servers', 'detail'], + qs_elements=['name=mickey'], + ), + request_headers={'OpenStack-API-Version': 'compute 2.42'}, + json={'servers': [server]}, + ), + ] + ) + + r = self.cloud.get_server('mickey', bare=True) + self.assertIsNotNone(r) + self.assertEqual(server['name'], r['name']) + + self.assert_calls() + + def test_conflicting_implied_and_direct(self): + self.cloud.config.config['compute_default_microversion'] = '2.7' + self.cloud.config.config['compute_api_version'] = '2.13' + + self.assertRaises(exceptions.ConfigException, self.cloud.get_server) + + # We should fail before we even authenticate + self.assertEqual(0, len(self.adapter.request_history)) diff --git a/openstack/tests/unit/test_missing_version.py b/openstack/tests/unit/test_missing_version.py new file mode 100644 index 0000000000..1efc0ac920 --- /dev/null +++ b/openstack/tests/unit/test_missing_version.py @@ -0,0 +1,55 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import warnings + +import testtools + +from openstack import exceptions +from openstack import proxy +from openstack.tests.unit import base + + +class TestMissingVersion(base.TestCase): + def setUp(self): + super().setUp() + self.os_fixture.clear_tokens() + svc = self.os_fixture.v3_token.add_service('image') + svc.add_endpoint( + url='https://example.com/image/', + region='RegionOne', + interface='public', + ) + self.use_keystone_v3() + self.use_glance( + image_version_json='bad-glance-version.json', + image_discovery_url='https://example.com/image/', + ) + + def test_unsupported_version(self): + with testtools.ExpectedException(exceptions.NotSupported): + self.cloud.image.get('/') + + self.assert_calls() + + def test_unsupported_version_override(self): + self.cloud.config.config['image_api_version'] = '7' + with warnings.catch_warnings(record=True) as w: + warnings.simplefilter("always") + warnings.simplefilter("ignore", DeprecationWarning) + self.assertIsInstance(self.cloud.image, proxy.Proxy) + self.assertEqual(1, len(w)) + self.assertIn( + "Service image has no discoverable version.", + str(w[-1].message), + ) + self.assert_calls() diff --git a/openstack/tests/unit/test_placement_rest.py b/openstack/tests/unit/test_placement_rest.py new file mode 100644 index 0000000000..32529ae15e --- /dev/null +++ b/openstack/tests/unit/test_placement_rest.py @@ -0,0 +1,109 @@ +# Copyright (c) 2018 Red Hat, Inc. + +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import ddt +from keystoneauth1 import exceptions + +from openstack.tests.unit import base + + +@ddt.ddt +class TestPlacementRest(base.TestCase): + def setUp(self): + super().setUp() + self.use_placement() + + def _register_uris(self, status_code=None): + uri = dict( + method='GET', + uri=self.get_mock_url( + 'placement', 'public', append=['allocation_candidates'] + ), + json={}, + ) + if status_code is not None: + uri['status_code'] = status_code + self.register_uris([uri]) + + def _validate_resp(self, resp, status_code): + self.assertEqual(status_code, resp.status_code) + self.assertEqual( + 'https://placement.example.com/allocation_candidates', resp.url + ) + self.assert_calls() + + @ddt.data({}, {'raise_exc': False}, {'raise_exc': True}) + def test_discovery(self, get_kwargs): + self._register_uris() + # Regardless of raise_exc, a <400 response doesn't raise + rs = self.cloud.placement.get('/allocation_candidates', **get_kwargs) + self._validate_resp(rs, 200) + + @ddt.data({}, {'raise_exc': False}) + def test_discovery_err(self, get_kwargs): + self._register_uris(status_code=500) + # >=400 doesn't raise by default or with explicit raise_exc=False + rs = self.cloud.placement.get('/allocation_candidates', **get_kwargs) + self._validate_resp(rs, 500) + + def test_discovery_exc(self): + self._register_uris(status_code=500) + # raise_exc=True raises a ksa exception appropriate to the status code + ex = self.assertRaises( + exceptions.InternalServerError, + self.cloud.placement.get, + '/allocation_candidates', + raise_exc=True, + ) + self._validate_resp(ex.response, 500) + + def test_microversion_discovery(self): + self.assertEqual( + (1, 17), self.cloud.placement.get_endpoint_data().max_microversion + ) + self.assert_calls() + + +class TestBadPlacementRest(base.TestCase): + def setUp(self): + self.skipTest('Need to re-add support for broken placement versions') + super().setUp() + # The bad-placement.json is for older placement that was + # missing the status field from its discovery doc. This + # lets us show that we can talk to such a placement. + self.use_placement(discovery_fixture='bad-placement.json') + + def _register_uris(self, status_code=None): + uri = dict( + method='GET', + uri=self.get_mock_url( + 'placement', 'public', append=['allocation_candidates'] + ), + json={}, + ) + if status_code is not None: + uri['status_code'] = status_code + self.register_uris([uri]) + + def _validate_resp(self, resp, status_code): + self.assertEqual(status_code, resp.status_code) + self.assertEqual( + 'https://placement.example.com/allocation_candidates', resp.url + ) + self.assert_calls() + + def test_discovery(self): + self._register_uris() + rs = self.cloud.placement.get('/allocation_candidates') + self._validate_resp(rs, 200) diff --git a/openstack/tests/unit/test_profile.py b/openstack/tests/unit/test_profile.py deleted file mode 100644 index b18b7eaa44..0000000000 --- a/openstack/tests/unit/test_profile.py +++ /dev/null @@ -1,103 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -from openstack import exceptions -from openstack import profile -from openstack.tests.unit import base - - -class TestProfile(base.TestCase): - def test_init(self): - prof = profile.Profile() - expected = [ - 'alarming', - 'baremetal', - 'clustering', - 'compute', - 'database', - 'identity', - 'image', - 'key-manager', - 'messaging', - 'metering', - 'network', - 'object-store', - 'orchestration', - 'volume', - ] - self.assertEqual(expected, prof.service_keys) - - def test_default_versions(self): - prof = profile.Profile() - self.assertEqual('v1', prof.get_filter('baremetal').version) - self.assertEqual('v1', prof.get_filter('clustering').version) - self.assertEqual('v2', prof.get_filter('compute').version) - self.assertEqual('v1', prof.get_filter('database').version) - self.assertEqual('v3', prof.get_filter('identity').version) - self.assertEqual('v2', prof.get_filter('image').version) - self.assertEqual('v2', prof.get_filter('network').version) - self.assertEqual('v1', prof.get_filter('object-store').version) - self.assertEqual('v1', prof.get_filter('orchestration').version) - self.assertEqual('v1', prof.get_filter('key-manager').version) - self.assertEqual('v2', prof.get_filter('metering').version) - self.assertEqual('v2', prof.get_filter('volume').version) - self.assertEqual('v1', prof.get_filter('messaging').version) - - def test_set(self): - prof = profile.Profile() - prof.set_version('alarming', 'v2') - self.assertEqual('v2', prof.get_filter('alarming').version) - prof.set_version('baremetal', 'v1') - self.assertEqual('v1', prof.get_filter('baremetal').version) - prof.set_version('clustering', 'v1') - self.assertEqual('v1', prof.get_filter('clustering').version) - prof.set_version('compute', 'v2') - self.assertEqual('v2', prof.get_filter('compute').version) - prof.set_version('database', 'v3') - self.assertEqual('v3', prof.get_filter('database').version) - prof.set_version('identity', 'v4') - self.assertEqual('v4', prof.get_filter('identity').version) - prof.set_version('image', 'v5') - self.assertEqual('v5', prof.get_filter('image').version) - prof.set_version('metering', 'v6') - self.assertEqual('v6', prof.get_filter('metering').version) - prof.set_version('network', 'v7') - self.assertEqual('v7', prof.get_filter('network').version) - prof.set_version('object-store', 'v8') - self.assertEqual('v8', prof.get_filter('object-store').version) - prof.set_version('orchestration', 'v9') - self.assertEqual('v9', prof.get_filter('orchestration').version) - - def test_set_version_bad_service(self): - prof = profile.Profile() - self.assertRaises(exceptions.SDKException, prof.set_version, 'bogus', - 'v2') - - def test_set_api_version(self): - # This tests that api_version is effective after explicit setting, or - # else it defaults to None. - prof = profile.Profile() - prof.set_api_version('clustering', '1.2') - svc = prof.get_filter('clustering') - self.assertEqual('1.2', svc.api_version) - svc = prof.get_filter('compute') - self.assertIsNone(svc.api_version) - - def test_set_all(self): - prof = profile.Profile() - prof.set_name(prof.ALL, 'fee') - prof.set_region(prof.ALL, 'fie') - prof.set_interface(prof.ALL, 'public') - for service in prof.service_keys: - self.assertEqual('fee', prof.get_filter(service).service_name) - self.assertEqual('fie', prof.get_filter(service).region) - self.assertEqual('public', prof.get_filter(service).interface) diff --git a/openstack/tests/unit/test_proxy.py b/openstack/tests/unit/test_proxy.py index 311bd64390..cfb09036f2 100644 --- a/openstack/tests/unit/test_proxy.py +++ b/openstack/tests/unit/test_proxy.py @@ -10,12 +10,19 @@ # License for the specific language governing permissions and limitations # under the License. -import mock -import testtools +import copy +import queue +from unittest import mock + +from keystoneauth1 import session +from testscenarios import load_tests_apply_scenarios as load_tests # noqa from openstack import exceptions from openstack import proxy from openstack import resource +from openstack.tests.unit import base +from openstack.tests.unit import fakes +from openstack import utils class DeleteableResource(resource.Resource): @@ -23,7 +30,7 @@ class DeleteableResource(resource.Resource): class UpdateableResource(resource.Resource): - allow_update = True + allow_commit = True class CreateableResource(resource.Resource): @@ -31,21 +38,30 @@ class CreateableResource(resource.Resource): class RetrieveableResource(resource.Resource): - allow_retrieve = True + allow_fetch = True class ListableResource(resource.Resource): allow_list = True +class FilterableResource(resource.Resource): + allow_list = True + base_path = '/fakes' + + _query_mapping = resource.QueryParameters('a') + a = resource.Body('a') + b = resource.Body('b') + c = resource.Body('c') + + class HeadableResource(resource.Resource): allow_head = True -class Test_check_resource(testtools.TestCase): - +class TestProxyPrivate(base.TestCase): def setUp(self): - super(Test_check_resource, self).setUp() + super().setUp() def method(self, expected_type, value): return value @@ -53,61 +69,123 @@ def method(self, expected_type, value): self.sot = mock.Mock() self.sot.method = method - def _test_correct(self, value): - decorated = proxy._check_resource(strict=False)(self.sot.method) - rv = decorated(self.sot, resource.Resource, value) + self.session = mock.Mock() + self.session._sdk_connection = self.cloud + self.fake_proxy = proxy.Proxy(self.session) + self.fake_proxy._connection = self.cloud - self.assertEqual(value, rv) + def test__get_uri_attribute_no_parent(self): + class Child(resource.Resource): + something = resource.Body("something") - def test_correct_resource(self): - res = resource.Resource() - self._test_correct(res) + attr = "something" + value = "nothing" + child = Child(something=value) - def test_notstrict_id(self): - self._test_correct("abc123-id") + result = self.fake_proxy._get_uri_attribute(child, None, attr) - def test_strict_id(self): - decorated = proxy._check_resource(strict=True)(self.sot.method) - self.assertRaisesRegexp(ValueError, "A Resource must be passed", - decorated, self.sot, resource.Resource, - "this-is-not-a-resource") + self.assertEqual(value, result) - def test_incorrect_resource(self): - class OneType(resource.Resource): + def test__get_uri_attribute_with_parent(self): + class Parent(resource.Resource): pass - class AnotherType(resource.Resource): - pass + value = "nothing" + parent = Parent(id=value) + + result = self.fake_proxy._get_uri_attribute("child", parent, "attr") + + self.assertEqual(value, result) + + def test__get_resource_new(self): + value = "hello" + fake_type = mock.Mock(spec=resource.Resource) + fake_type.new = mock.Mock(return_value=value) + attrs = {"first": "Brian", "last": "Curtin"} + + result = self.fake_proxy._get_resource(fake_type, None, **attrs) + + fake_type.new.assert_called_with(connection=self.cloud, **attrs) + self.assertEqual(value, result) + + def test__get_resource_from_id(self): + id = "eye dee" + value = "hello" + attrs = {"first": "Brian", "last": "Curtin"} + + # The isinstance check needs to take a type, not an instance, + # so the mock.assert_called_with method isn't helpful here since + # we can't pass in a mocked object. This class is a crude version + # of that same behavior to let us check that `new` gets + # called with the expected arguments. + + class Fake: + call = {} + + @classmethod + def new(cls, **kwargs): + cls.call = kwargs + return value + + result = self.fake_proxy._get_resource(Fake, id, **attrs) - value = AnotherType() - decorated = proxy._check_resource(strict=False)(self.sot.method) - self.assertRaisesRegexp(ValueError, - "Expected OneType but received AnotherType", - decorated, self.sot, OneType, value) + self.assertDictEqual( + dict(id=id, connection=mock.ANY, **attrs), Fake.call + ) + self.assertEqual(value, result) + def test__get_resource_from_resource(self): + res = mock.Mock(spec=resource.Resource) + res._update = mock.Mock() -class TestProxyDelete(testtools.TestCase): + attrs = {"first": "Brian", "last": "Curtin"} + result = self.fake_proxy._get_resource(resource.Resource, res, **attrs) + + res._update.assert_called_once_with(**attrs) + self.assertEqual(result, res) + + def test__get_resource_from_munch(self): + cls = mock.Mock() + res = mock.Mock(spec=resource.Resource) + res._update = mock.Mock() + cls._from_munch.return_value = res + + m = utils.Munch(answer=42) + attrs = {"first": "Brian", "last": "Curtin"} + + result = self.fake_proxy._get_resource(cls, m, **attrs) + + cls._from_munch.assert_called_once_with(m, connection=self.cloud) + res._update.assert_called_once_with(**attrs) + self.assertEqual(result, res) + + +class TestProxyDelete(base.TestCase): def setUp(self): - super(TestProxyDelete, self).setUp() + super().setUp() self.session = mock.Mock() + self.session._sdk_connection = self.cloud self.fake_id = 1 self.res = mock.Mock(spec=DeleteableResource) self.res.id = self.fake_id self.res.delete = mock.Mock() - self.sot = proxy.BaseProxy(self.session) - DeleteableResource.existing = mock.Mock(return_value=self.res) + self.sot = proxy.Proxy(self.session) + self.sot._connection = self.cloud + DeleteableResource.new = mock.Mock(return_value=self.res) def test_delete(self): self.sot._delete(DeleteableResource, self.res) - self.res.delete.assert_called_with(self.session) + self.res.delete.assert_called_with(self.sot) self.sot._delete(DeleteableResource, self.fake_id) - DeleteableResource.existing.assert_called_with(id=self.fake_id) - self.res.delete.assert_called_with(self.session) + DeleteableResource.new.assert_called_with( + connection=self.cloud, id=self.fake_id + ) + self.res.delete.assert_called_with(self.sot) # Delete generally doesn't return anything, so we will normally # swallow any return from within a service's proxy, but make sure @@ -118,33 +196,47 @@ def test_delete(self): def test_delete_ignore_missing(self): self.res.delete.side_effect = exceptions.NotFoundException( - message="test", http_status=404) + message="test", + response=fakes.FakeResponse(status_code=404, data={'error': None}), + ) rv = self.sot._delete(DeleteableResource, self.fake_id) self.assertIsNone(rv) - def test_delete_ResourceNotFound(self): + def test_delete_NotFound(self): self.res.delete.side_effect = exceptions.NotFoundException( - message="test", http_status=404) - - self.assertRaisesRegexp( - exceptions.ResourceNotFound, - "No %s found for %s" % (DeleteableResource.__name__, self.res), - self.sot._delete, DeleteableResource, self.res, - ignore_missing=False) + message="test", + response=fakes.FakeResponse(status_code=404, data={'error': None}), + ) + + self.assertRaisesRegex( + exceptions.NotFoundException, + # TODO(shade) The mocks here are hiding the thing we want to test. + "test", + self.sot._delete, + DeleteableResource, + self.res, + ignore_missing=False, + ) def test_delete_HttpException(self): - self.res.delete.side_effect = exceptions.HttpException( - message="test", http_status=500) - - self.assertRaises(exceptions.HttpException, self.sot._delete, - DeleteableResource, self.res, ignore_missing=False) + self.res.delete.side_effect = exceptions.ResourceNotFound( + message="test", + response=fakes.FakeResponse(status_code=500, data={'error': None}), + ) + self.assertRaises( + exceptions.HttpException, + self.sot._delete, + DeleteableResource, + self.res, + ignore_missing=False, + ) -class TestProxyUpdate(testtools.TestCase): +class TestProxyUpdate(base.TestCase): def setUp(self): - super(TestProxyUpdate, self).setUp() + super().setUp() self.session = mock.Mock() @@ -152,41 +244,52 @@ def setUp(self): self.fake_result = "fake_result" self.res = mock.Mock(spec=UpdateableResource) - self.res.update = mock.Mock(return_value=self.fake_result) - self.res.update_attrs = mock.Mock() + self.res.commit = mock.Mock(return_value=self.fake_result) - self.sot = proxy.BaseProxy(self.session) + self.sot = proxy.Proxy(self.session) + self.sot._connection = self.cloud self.attrs = {"x": 1, "y": 2, "z": 3} - UpdateableResource.existing = mock.Mock(return_value=self.res) + UpdateableResource.new = mock.Mock(return_value=self.res) - def _test_update(self, value): - rv = self.sot._update(UpdateableResource, value, **self.attrs) + def test_update_resource(self): + rv = self.sot._update(UpdateableResource, self.res, **self.attrs) self.assertEqual(rv, self.fake_result) - self.res.update_attrs.assert_called_once_with(self.attrs) - self.res.update.assert_called_once_with(self.session) + self.res._update.assert_called_once_with(**self.attrs) + self.res.commit.assert_called_once_with(self.sot, base_path=None) - def test_update_resource(self): - self._test_update(self.res) + def test_update_resource_override_base_path(self): + base_path = 'dummy' + rv = self.sot._update( + UpdateableResource, self.res, base_path=base_path, **self.attrs + ) + + self.assertEqual(rv, self.fake_result) + self.res._update.assert_called_once_with(**self.attrs) + self.res.commit.assert_called_once_with(self.sot, base_path=base_path) def test_update_id(self): - self._test_update(self.fake_id) + rv = self.sot._update(UpdateableResource, self.fake_id, **self.attrs) + self.assertEqual(rv, self.fake_result) + self.res.commit.assert_called_once_with(self.sot, base_path=None) -class TestProxyCreate(testtools.TestCase): +class TestProxyCreate(base.TestCase): def setUp(self): - super(TestProxyCreate, self).setUp() + super().setUp() self.session = mock.Mock() + self.session._sdk_connection = self.cloud self.fake_result = "fake_result" self.res = mock.Mock(spec=CreateableResource) self.res.create = mock.Mock(return_value=self.fake_result) - self.sot = proxy.BaseProxy(self.session) + self.sot = proxy.Proxy(self.session) + self.sot._connection = self.cloud def test_create_attributes(self): CreateableResource.new = mock.Mock(return_value=self.res) @@ -195,97 +298,220 @@ def test_create_attributes(self): rv = self.sot._create(CreateableResource, **attrs) self.assertEqual(rv, self.fake_result) - CreateableResource.new.assert_called_once_with(**attrs) - self.res.create.assert_called_once_with(self.session) + CreateableResource.new.assert_called_once_with( + connection=self.cloud, **attrs + ) + self.res.create.assert_called_once_with(self.sot, base_path=None) + def test_create_attributes_override_base_path(self): + CreateableResource.new = mock.Mock(return_value=self.res) + + base_path = 'dummy' + attrs = {"x": 1, "y": 2, "z": 3} + rv = self.sot._create(CreateableResource, base_path=base_path, **attrs) -class TestProxyGet(testtools.TestCase): + self.assertEqual(rv, self.fake_result) + CreateableResource.new.assert_called_once_with( + connection=self.cloud, **attrs + ) + self.res.create.assert_called_once_with(self.sot, base_path=base_path) + +class TestProxyBulkCreate(base.TestCase): + def setUp(self): + super().setUp() + + class Res(resource.Resource): + pass + + self.session = mock.Mock() + self.result = mock.sentinel + self.data = mock.Mock() + + self.sot = proxy.Proxy(self.session) + self.cls = Res + self.cls.bulk_create = mock.Mock(return_value=self.result) + + def test_bulk_create_attributes(self): + rv = self.sot._bulk_create(self.cls, self.data) + + self.assertEqual(rv, self.result) + self.cls.bulk_create.assert_called_once_with( + self.sot, self.data, base_path=None + ) + + def test_bulk_create_attributes_override_base_path(self): + base_path = 'dummy' + + rv = self.sot._bulk_create(self.cls, self.data, base_path=base_path) + + self.assertEqual(rv, self.result) + self.cls.bulk_create.assert_called_once_with( + self.sot, self.data, base_path=base_path + ) + + +class TestProxyGet(base.TestCase): def setUp(self): - super(TestProxyGet, self).setUp() + super().setUp() self.session = mock.Mock() + self.session._sdk_connection = self.cloud self.fake_id = 1 self.fake_name = "fake_name" self.fake_result = "fake_result" self.res = mock.Mock(spec=RetrieveableResource) self.res.id = self.fake_id - self.res.get = mock.Mock(return_value=self.fake_result) + self.res.fetch = mock.Mock(return_value=self.fake_result) - self.sot = proxy.BaseProxy(self.session) - RetrieveableResource.existing = mock.Mock(return_value=self.res) + self.sot = proxy.Proxy(self.session) + self.sot._connection = self.cloud + RetrieveableResource.new = mock.Mock(return_value=self.res) def test_get_resource(self): rv = self.sot._get(RetrieveableResource, self.res) - self.res.get.assert_called_with(self.session, args=None) + self.res.fetch.assert_called_with( + self.sot, + requires_id=True, + base_path=None, + skip_cache=mock.ANY, + error_message=mock.ANY, + ) self.assertEqual(rv, self.fake_result) def test_get_resource_with_args(self): - rv = self.sot._get(RetrieveableResource, self.res, args={'K': 'V'}) - - self.res.get.assert_called_with(self.session, args={'K': 'V'}) + args = {"key": "value"} + rv = self.sot._get(RetrieveableResource, self.res, **args) + + self.res._update.assert_called_once_with(**args) + self.res.fetch.assert_called_with( + self.sot, + requires_id=True, + base_path=None, + skip_cache=mock.ANY, + error_message=mock.ANY, + ) self.assertEqual(rv, self.fake_result) def test_get_id(self): rv = self.sot._get(RetrieveableResource, self.fake_id) - RetrieveableResource.existing.assert_called_with(id=self.fake_id) - self.res.get.assert_called_with(self.session, args=None) + RetrieveableResource.new.assert_called_with( + connection=self.cloud, id=self.fake_id + ) + self.res.fetch.assert_called_with( + self.sot, + requires_id=True, + base_path=None, + skip_cache=mock.ANY, + error_message=mock.ANY, + ) self.assertEqual(rv, self.fake_result) - def test_get_not_found(self): - self.res.get.side_effect = exceptions.NotFoundException( - message="test", http_status=404) + def test_get_base_path(self): + base_path = 'dummy' + rv = self.sot._get( + RetrieveableResource, self.fake_id, base_path=base_path + ) + + RetrieveableResource.new.assert_called_with( + connection=self.cloud, id=self.fake_id + ) + self.res.fetch.assert_called_with( + self.sot, + requires_id=True, + base_path=base_path, + skip_cache=mock.ANY, + error_message=mock.ANY, + ) + self.assertEqual(rv, self.fake_result) - self.assertRaisesRegexp( - exceptions.ResourceNotFound, - "No %s found for %s" % (RetrieveableResource.__name__, self.res), - self.sot._get, RetrieveableResource, self.res) + def test_get_not_found(self): + self.res.fetch.side_effect = exceptions.NotFoundException( + message="test", + response=fakes.FakeResponse(status_code=404, data={'error': None}), + ) + self.assertRaisesRegex( + exceptions.NotFoundException, + "test", + self.sot._get, + RetrieveableResource, + self.res, + ) -class TestProxyList(testtools.TestCase): +class TestProxyList(base.TestCase): def setUp(self): - super(TestProxyList, self).setUp() + super().setUp() self.session = mock.Mock() - self.fake_a = 1 - self.fake_b = 2 - self.fake_c = 3 - self.fake_resource = resource.Resource.new(id=self.fake_a) + self.args = {"a": "A", "b": "B", "c": "C"} self.fake_response = [resource.Resource()] - self.fake_query = {"a": self.fake_resource, "b": self.fake_b} - self.fake_path_args = {"c": self.fake_c} - self.sot = proxy.BaseProxy(self.session) + self.sot = proxy.Proxy(self.session) + self.sot._connection = self.cloud ListableResource.list = mock.Mock() ListableResource.list.return_value = self.fake_response - def _test_list(self, path_args, paginated, **query): - rv = self.sot._list(ListableResource, path_args=path_args, - paginated=paginated, **query) + def _test_list(self, paginated, base_path=None): + rv = self.sot._list( + ListableResource, + paginated=paginated, + base_path=base_path, + **self.args, + ) self.assertEqual(self.fake_response, rv) ListableResource.list.assert_called_once_with( - self.session, path_args=path_args, paginated=paginated, - params={'a': self.fake_a, 'b': self.fake_b}) + self.sot, paginated=paginated, base_path=base_path, **self.args + ) def test_list_paginated(self): - self._test_list(self.fake_path_args, True, **self.fake_query) + self._test_list(True) def test_list_non_paginated(self): - self._test_list(self.fake_path_args, False, **self.fake_query) - - -class TestProxyHead(testtools.TestCase): - + self._test_list(False) + + def test_list_override_base_path(self): + self._test_list(False, base_path='dummy') + + def test_list_filters_jmespath(self): + fake_response = [ + FilterableResource(a='a1', b='b1', c='c'), + FilterableResource(a='a2', b='b2', c='c'), + FilterableResource(a='a3', b='b3', c='c'), + ] + FilterableResource.list = mock.Mock() + FilterableResource.list.return_value = fake_response + + rv = self.sot._list( + FilterableResource, + paginated=False, + base_path=None, + jmespath_filters="[?c=='c']", + ) + self.assertEqual(3, len(rv)) + + # Test filtering based on unknown attribute + rv = self.sot._list( + FilterableResource, + paginated=False, + base_path=None, + jmespath_filters="[?d=='c']", + ) + self.assertEqual(0, len(rv)) + + +class TestProxyHead(base.TestCase): def setUp(self): - super(TestProxyHead, self).setUp() + super().setUp() self.session = mock.Mock() + self.session._sdk_connection = self.cloud self.fake_id = 1 self.fake_name = "fake_name" @@ -294,60 +520,323 @@ def setUp(self): self.res.id = self.fake_id self.res.head = mock.Mock(return_value=self.fake_result) - self.sot = proxy.BaseProxy(self.session) - HeadableResource.existing = mock.Mock(return_value=self.res) + self.sot = proxy.Proxy(self.session) + self.sot._connection = self.cloud + HeadableResource.new = mock.Mock(return_value=self.res) def test_head_resource(self): rv = self.sot._head(HeadableResource, self.res) - self.res.head.assert_called_with(self.session) + self.res.head.assert_called_with(self.sot, base_path=None) + self.assertEqual(rv, self.fake_result) + + def test_head_resource_base_path(self): + base_path = 'dummy' + rv = self.sot._head(HeadableResource, self.res, base_path=base_path) + + self.res.head.assert_called_with(self.sot, base_path=base_path) self.assertEqual(rv, self.fake_result) def test_head_id(self): rv = self.sot._head(HeadableResource, self.fake_id) - HeadableResource.existing.assert_called_with(id=self.fake_id) - self.res.head.assert_called_with(self.session) + HeadableResource.new.assert_called_with( + connection=self.cloud, id=self.fake_id + ) + self.res.head.assert_called_with(self.sot, base_path=None) self.assertEqual(rv, self.fake_result) - def test_head_no_value(self): - MockHeadResource = mock.Mock(spec=HeadableResource) - instance = mock.Mock() - MockHeadResource.return_value = instance - - self.sot._head(MockHeadResource) - - MockHeadResource.assert_called_with() - instance.head.assert_called_with(self.session) - - @mock.patch("openstack.resource.wait_for_status") - def test_wait_for(self, mock_wait): - mock_resource = mock.Mock() - mock_wait.return_value = mock_resource - self.sot.wait_for_status(mock_resource, 'ACTIVE') - mock_wait.assert_called_once_with( - self.session, mock_resource, 'ACTIVE', [], 2, 120) - - @mock.patch("openstack.resource.wait_for_status") - def test_wait_for_params(self, mock_wait): - mock_resource = mock.Mock() - mock_wait.return_value = mock_resource - self.sot.wait_for_status(mock_resource, 'ACTIVE', ['ERROR'], 1, 2) - mock_wait.assert_called_once_with( - self.session, mock_resource, 'ACTIVE', ['ERROR'], 1, 2) - - @mock.patch("openstack.resource.wait_for_delete") - def test_wait_for_delete(self, mock_wait): - mock_resource = mock.Mock() - mock_wait.return_value = mock_resource - self.sot.wait_for_delete(mock_resource) - mock_wait.assert_called_once_with( - self.session, mock_resource, 2, 120) - - @mock.patch("openstack.resource.wait_for_delete") - def test_wait_for_delete_params(self, mock_wait): - mock_resource = mock.Mock() - mock_wait.return_value = mock_resource - self.sot.wait_for_delete(mock_resource, 1, 2) - mock_wait.assert_called_once_with( - self.session, mock_resource, 1, 2) + +class TestExtractName(base.TestCase): + scenarios = [ + ('slash_servers_bare', dict(url='/servers', parts=['servers'])), + ('slash_servers_arg', dict(url='/servers/1', parts=['server'])), + ('servers_bare', dict(url='servers', parts=['servers'])), + ('servers_arg', dict(url='servers/1', parts=['server'])), + ('networks_bare', dict(url='/v2.0/networks', parts=['networks'])), + ('networks_arg', dict(url='/v2.0/networks/1', parts=['network'])), + ('tokens', dict(url='/v3/tokens', parts=['tokens'])), + ('discovery', dict(url='/', parts=['discovery'])), + ( + 'secgroups', + dict( + url='/servers/1/os-security-groups', + parts=['server', 'os-security-groups'], + ), + ), + ('bm_chassis', dict(url='/v1/chassis/id', parts=['chassis'])), + ] + + def test_extract_name(self): + results = proxy.Proxy(mock.Mock())._extract_name(self.url) + self.assertEqual(self.parts, results) + + +class TestProxyCache(base.TestCase): + class Res(resource.Resource): + base_path = 'fake' + + allow_commit = True + allow_fetch = True + + foo = resource.Body('foo') + + def setUp(self): + super().setUp(cloud_config_fixture='clouds_cache.yaml') + + self.session = mock.Mock(spec=session.Session) + self.session._sdk_connection = self.cloud + self.session.get_project_id = mock.Mock(return_value='fake_prj') + + self.response = mock.Mock() + self.response.status_code = 200 + self.response.history = [] + self.response.headers = {} + self.response.body = {} + self.response.json = mock.Mock(return_value=self.response.body) + self.session.request = mock.Mock(return_value=self.response) + + self.sot = proxy.Proxy(self.session) + self.sot._connection = self.cloud + self.sot.service_type = 'srv' + + def _get_key(self, id): + return f"srv.fake.fake/{id}.{{'microversion': None, 'params': {{}}}}" + + def test_get_not_in_cache(self): + self.cloud._cache_expirations['srv.fake'] = 5 + self.sot._get(self.Res, '1') + + self.session.request.assert_called_with( + 'fake/1', + 'GET', + connect_retries=mock.ANY, + raise_exc=mock.ANY, + global_request_id=mock.ANY, + microversion=mock.ANY, + params=mock.ANY, + endpoint_filter=mock.ANY, + headers=mock.ANY, + rate_semaphore=mock.ANY, + ) + self.assertIn(self._get_key(1), self.cloud._api_cache_keys) + + def test_get_from_cache(self): + key = self._get_key(2) + + self.cloud._cache.set(key, self.response) + # set expiration for the resource to respect cache + self.cloud._cache_expirations['srv.fake'] = 5 + + self.sot._get(self.Res, '2') + self.session.request.assert_not_called() + + def test_modify(self): + key = self._get_key(3) + + self.cloud._cache.set(key, self.response) + self.cloud._api_cache_keys.add(key) + self.cloud._cache_expirations['srv.fake'] = 5 + + # Ensure first call gets value from cache + self.sot._get(self.Res, '3') + self.session.request.assert_not_called() + + # update call invalidates the cache and triggers API + rs = self.Res.existing(id='3') + self.sot._update(self.Res, rs, foo='bar') + + self.session.request.assert_called() + self.assertIsNotNone(self.cloud._cache.get(key)) + self.assertEqual('NoValue', type(self.cloud._cache.get(key)).__name__) + self.assertNotIn(key, self.cloud._api_cache_keys) + + # next get call again triggers API + self.sot._get(self.Res, '3') + self.session.request.assert_called() + + def test_get_bypass_cache(self): + key = self._get_key(4) + + resp = copy.deepcopy(self.response) + resp.body = {'foo': 'bar'} + self.cloud._api_cache_keys.add(key) + self.cloud._cache.set(key, resp) + # set expiration for the resource to respect cache + self.cloud._cache_expirations['srv.fake'] = 5 + + self.sot._get(self.Res, '4', skip_cache=True) + self.session.request.assert_called() + # validate we got empty body as expected, and not what is in cache + self.assertEqual(dict(), self.response.body) + self.assertNotIn(key, self.cloud._api_cache_keys) + self.assertEqual('NoValue', type(self.cloud._cache.get(key)).__name__) + + +class TestProxyCleanup(base.TestCase): + def setUp(self): + super().setUp() + + self.session = mock.Mock() + self.session._sdk_connection = self.cloud + + self.fake_id = 1 + self.fake_name = "fake_name" + self.fake_result = "fake_result" + self.res = mock.Mock(spec=resource.Resource) + self.res.id = self.fake_id + self.res.created_at = '2020-01-02T03:04:05' + self.res.updated_at = '2020-01-03T03:04:05' + self.res_no_updated = mock.Mock(spec=resource.Resource) + self.res_no_updated.created_at = '2020-01-02T03:04:05' + + self.sot = proxy.Proxy(self.session) + self.sot.service_type = "block-storage" + + self.delete_mock = mock.Mock() + + def test_filters_evaluation_created_at(self): + self.assertTrue( + self.sot._service_cleanup_resource_filters_evaluation( + self.res, filters={'created_at': '2020-02-03T00:00:00'} + ) + ) + + def test_filters_evaluation_created_at_not(self): + self.assertFalse( + self.sot._service_cleanup_resource_filters_evaluation( + self.res, filters={'created_at': '2020-01-01T00:00:00'} + ) + ) + + def test_filters_evaluation_updated_at(self): + self.assertTrue( + self.sot._service_cleanup_resource_filters_evaluation( + self.res, filters={'updated_at': '2020-02-03T00:00:00'} + ) + ) + + def test_filters_evaluation_updated_at_not(self): + self.assertFalse( + self.sot._service_cleanup_resource_filters_evaluation( + self.res, filters={'updated_at': '2020-01-01T00:00:00'} + ) + ) + + def test_filters_evaluation_updated_at_missing(self): + self.assertFalse( + self.sot._service_cleanup_resource_filters_evaluation( + self.res_no_updated, + filters={'updated_at': '2020-01-01T00:00:00'}, + ) + ) + + def test_filters_empty(self): + self.assertTrue( + self.sot._service_cleanup_resource_filters_evaluation( + self.res_no_updated + ) + ) + + def test_service_cleanup_dry_run(self): + self.assertTrue( + self.sot._service_cleanup_del_res( + self.delete_mock, self.res, dry_run=True + ) + ) + self.delete_mock.assert_not_called() + + def test_service_cleanup_dry_run_default(self): + self.assertTrue( + self.sot._service_cleanup_del_res(self.delete_mock, self.res) + ) + self.delete_mock.assert_not_called() + + def test_service_cleanup_real_run(self): + self.assertTrue( + self.sot._service_cleanup_del_res( + self.delete_mock, + self.res, + dry_run=False, + ) + ) + self.delete_mock.assert_called_with(self.res) + + def test_service_cleanup_real_run_identified_resources(self): + rd = dict() + self.assertTrue( + self.sot._service_cleanup_del_res( + self.delete_mock, + self.res, + dry_run=False, + identified_resources=rd, + ) + ) + self.delete_mock.assert_called_with(self.res) + self.assertEqual(self.res, rd[self.res.id]) + + def test_service_cleanup_resource_evaluation_false(self): + self.assertFalse( + self.sot._service_cleanup_del_res( + self.delete_mock, + self.res, + dry_run=False, + resource_evaluation_fn=lambda x, y, z: False, + ) + ) + self.delete_mock.assert_not_called() + + def test_service_cleanup_resource_evaluation_true(self): + self.assertTrue( + self.sot._service_cleanup_del_res( + self.delete_mock, + self.res, + dry_run=False, + resource_evaluation_fn=lambda x, y, z: True, + ) + ) + self.delete_mock.assert_called() + + def test_service_cleanup_resource_evaluation_override_filters(self): + self.assertFalse( + self.sot._service_cleanup_del_res( + self.delete_mock, + self.res, + dry_run=False, + resource_evaluation_fn=lambda x, y, z: False, + filters={'created_at': '2200-01-01'}, + ) + ) + + def test_service_cleanup_filters(self): + self.assertTrue( + self.sot._service_cleanup_del_res( + self.delete_mock, + self.res, + dry_run=False, + filters={'created_at': '2200-01-01'}, + ) + ) + self.delete_mock.assert_called() + + def test_service_cleanup_queue(self): + q = queue.Queue() + self.assertTrue( + self.sot._service_cleanup_del_res( + self.delete_mock, + self.res, + dry_run=False, + client_status_queue=q, + filters={'created_at': '2200-01-01'}, + ) + ) + self.assertEqual(self.res, q.get_nowait()) + + def test_should_skip_resource_cleanup(self): + excluded = ["block_storage.backup"] + self.assertTrue( + self.sot.should_skip_resource_cleanup("backup", excluded) + ) + self.assertFalse( + self.sot.should_skip_resource_cleanup("volume", excluded) + ) diff --git a/openstack/tests/unit/test_proxy2.py b/openstack/tests/unit/test_proxy2.py deleted file mode 100644 index 0e6dc56801..0000000000 --- a/openstack/tests/unit/test_proxy2.py +++ /dev/null @@ -1,418 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from openstack import exceptions -from openstack import proxy2 -from openstack import resource2 - - -class DeleteableResource(resource2.Resource): - allow_delete = True - - -class UpdateableResource(resource2.Resource): - allow_update = True - - -class CreateableResource(resource2.Resource): - allow_create = True - - -class RetrieveableResource(resource2.Resource): - allow_retrieve = True - - -class ListableResource(resource2.Resource): - allow_list = True - - -class HeadableResource(resource2.Resource): - allow_head = True - - -class TestProxyPrivate(testtools.TestCase): - - def setUp(self): - super(TestProxyPrivate, self).setUp() - - def method(self, expected_type, value): - return value - - self.sot = mock.Mock() - self.sot.method = method - - self.fake_proxy = proxy2.BaseProxy("session") - - def _test_correct(self, value): - decorated = proxy2._check_resource(strict=False)(self.sot.method) - rv = decorated(self.sot, resource2.Resource, value) - - self.assertEqual(value, rv) - - def test__check_resource_correct_resource(self): - res = resource2.Resource() - self._test_correct(res) - - def test__check_resource_notstrict_id(self): - self._test_correct("abc123-id") - - def test__check_resource_strict_id(self): - decorated = proxy2._check_resource(strict=True)(self.sot.method) - self.assertRaisesRegexp(ValueError, "A Resource must be passed", - decorated, self.sot, resource2.Resource, - "this-is-not-a-resource") - - def test__check_resource_incorrect_resource(self): - class OneType(resource2.Resource): - pass - - class AnotherType(resource2.Resource): - pass - - value = AnotherType() - decorated = proxy2._check_resource(strict=False)(self.sot.method) - self.assertRaisesRegexp(ValueError, - "Expected OneType but received AnotherType", - decorated, self.sot, OneType, value) - - def test__get_uri_attribute_no_parent(self): - class Child(resource2.Resource): - something = resource2.Body("something") - - attr = "something" - value = "nothing" - child = Child(something=value) - - result = self.fake_proxy._get_uri_attribute(child, None, attr) - - self.assertEqual(value, result) - - def test__get_uri_attribute_with_parent(self): - class Parent(resource2.Resource): - pass - - value = "nothing" - parent = Parent(id=value) - - result = self.fake_proxy._get_uri_attribute("child", parent, "attr") - - self.assertEqual(value, result) - - def test__get_resource_new(self): - value = "hello" - fake_type = mock.Mock(spec=resource2.Resource) - fake_type.new = mock.Mock(return_value=value) - attrs = {"first": "Brian", "last": "Curtin"} - - result = self.fake_proxy._get_resource(fake_type, None, **attrs) - - fake_type.new.assert_called_with(**attrs) - self.assertEqual(value, result) - - def test__get_resource_from_id(self): - id = "eye dee" - value = "hello" - attrs = {"first": "Brian", "last": "Curtin"} - - # The isinstance check needs to take a type, not an instance, - # so the mock.assert_called_with method isn't helpful here since - # we can't pass in a mocked object. This class is a crude version - # of that same behavior to let us check that `new` gets - # called with the expected arguments. - - class Fake(object): - call = {} - - @classmethod - def new(cls, **kwargs): - cls.call = kwargs - return value - - result = self.fake_proxy._get_resource(Fake, id, **attrs) - - self.assertDictEqual(dict(id=id, **attrs), Fake.call) - self.assertEqual(value, result) - - def test__get_resource_from_resource(self): - res = mock.Mock(spec=resource2.Resource) - res._update = mock.Mock() - - attrs = {"first": "Brian", "last": "Curtin"} - - result = self.fake_proxy._get_resource(resource2.Resource, - res, **attrs) - - res._update.assert_called_once_with(**attrs) - self.assertEqual(result, res) - - -class TestProxyDelete(testtools.TestCase): - - def setUp(self): - super(TestProxyDelete, self).setUp() - - self.session = mock.Mock() - - self.fake_id = 1 - self.res = mock.Mock(spec=DeleteableResource) - self.res.id = self.fake_id - self.res.delete = mock.Mock() - - self.sot = proxy2.BaseProxy(self.session) - DeleteableResource.new = mock.Mock(return_value=self.res) - - def test_delete(self): - self.sot._delete(DeleteableResource, self.res) - self.res.delete.assert_called_with(self.session) - - self.sot._delete(DeleteableResource, self.fake_id) - DeleteableResource.new.assert_called_with(id=self.fake_id) - self.res.delete.assert_called_with(self.session) - - # Delete generally doesn't return anything, so we will normally - # swallow any return from within a service's proxy, but make sure - # we can still return for any cases where values are returned. - self.res.delete.return_value = self.fake_id - rv = self.sot._delete(DeleteableResource, self.fake_id) - self.assertEqual(rv, self.fake_id) - - def test_delete_ignore_missing(self): - self.res.delete.side_effect = exceptions.NotFoundException( - message="test", http_status=404) - - rv = self.sot._delete(DeleteableResource, self.fake_id) - self.assertIsNone(rv) - - def test_delete_ResourceNotFound(self): - self.res.delete.side_effect = exceptions.NotFoundException( - message="test", http_status=404) - - self.assertRaisesRegexp( - exceptions.ResourceNotFound, - "No %s found for %s" % (DeleteableResource.__name__, self.res), - self.sot._delete, DeleteableResource, self.res, - ignore_missing=False) - - def test_delete_HttpException(self): - self.res.delete.side_effect = exceptions.HttpException( - message="test", http_status=500) - - self.assertRaises(exceptions.HttpException, self.sot._delete, - DeleteableResource, self.res, ignore_missing=False) - - -class TestProxyUpdate(testtools.TestCase): - - def setUp(self): - super(TestProxyUpdate, self).setUp() - - self.session = mock.Mock() - - self.fake_id = 1 - self.fake_result = "fake_result" - - self.res = mock.Mock(spec=UpdateableResource) - self.res.update = mock.Mock(return_value=self.fake_result) - - self.sot = proxy2.BaseProxy(self.session) - - self.attrs = {"x": 1, "y": 2, "z": 3} - - UpdateableResource.new = mock.Mock(return_value=self.res) - - def test_update_resource(self): - rv = self.sot._update(UpdateableResource, self.res, **self.attrs) - - self.assertEqual(rv, self.fake_result) - self.res._update.assert_called_once_with(**self.attrs) - self.res.update.assert_called_once_with(self.session) - - def test_update_id(self): - rv = self.sot._update(UpdateableResource, self.fake_id, **self.attrs) - - self.assertEqual(rv, self.fake_result) - self.res.update.assert_called_once_with(self.session) - - -class TestProxyCreate(testtools.TestCase): - - def setUp(self): - super(TestProxyCreate, self).setUp() - - self.session = mock.Mock() - - self.fake_result = "fake_result" - self.res = mock.Mock(spec=CreateableResource) - self.res.create = mock.Mock(return_value=self.fake_result) - - self.sot = proxy2.BaseProxy(self.session) - - def test_create_attributes(self): - CreateableResource.new = mock.Mock(return_value=self.res) - - attrs = {"x": 1, "y": 2, "z": 3} - rv = self.sot._create(CreateableResource, **attrs) - - self.assertEqual(rv, self.fake_result) - CreateableResource.new.assert_called_once_with(**attrs) - self.res.create.assert_called_once_with(self.session) - - -class TestProxyGet(testtools.TestCase): - - def setUp(self): - super(TestProxyGet, self).setUp() - - self.session = mock.Mock() - - self.fake_id = 1 - self.fake_name = "fake_name" - self.fake_result = "fake_result" - self.res = mock.Mock(spec=RetrieveableResource) - self.res.id = self.fake_id - self.res.get = mock.Mock(return_value=self.fake_result) - - self.sot = proxy2.BaseProxy(self.session) - RetrieveableResource.new = mock.Mock(return_value=self.res) - - def test_get_resource(self): - rv = self.sot._get(RetrieveableResource, self.res) - - self.res.get.assert_called_with(self.session, requires_id=True) - self.assertEqual(rv, self.fake_result) - - def test_get_resource_with_args(self): - args = {"key": "value"} - rv = self.sot._get(RetrieveableResource, self.res, **args) - - self.res._update.assert_called_once_with(**args) - self.res.get.assert_called_with(self.session, requires_id=True) - self.assertEqual(rv, self.fake_result) - - def test_get_id(self): - rv = self.sot._get(RetrieveableResource, self.fake_id) - - RetrieveableResource.new.assert_called_with(id=self.fake_id) - self.res.get.assert_called_with(self.session, requires_id=True) - self.assertEqual(rv, self.fake_result) - - def test_get_not_found(self): - self.res.get.side_effect = exceptions.NotFoundException( - message="test", http_status=404) - - self.assertRaisesRegexp( - exceptions.ResourceNotFound, - "No %s found for %s" % (RetrieveableResource.__name__, self.res), - self.sot._get, RetrieveableResource, self.res) - - -class TestProxyList(testtools.TestCase): - - def setUp(self): - super(TestProxyList, self).setUp() - - self.session = mock.Mock() - - self.args = {"a": "A", "b": "B", "c": "C"} - self.fake_response = [resource2.Resource()] - - self.sot = proxy2.BaseProxy(self.session) - ListableResource.list = mock.Mock() - ListableResource.list.return_value = self.fake_response - - def _test_list(self, paginated): - rv = self.sot._list(ListableResource, paginated=paginated, **self.args) - - self.assertEqual(self.fake_response, rv) - ListableResource.list.assert_called_once_with( - self.session, paginated=paginated, **self.args) - - def test_list_paginated(self): - self._test_list(True) - - def test_list_non_paginated(self): - self._test_list(False) - - -class TestProxyHead(testtools.TestCase): - - def setUp(self): - super(TestProxyHead, self).setUp() - - self.session = mock.Mock() - - self.fake_id = 1 - self.fake_name = "fake_name" - self.fake_result = "fake_result" - self.res = mock.Mock(spec=HeadableResource) - self.res.id = self.fake_id - self.res.head = mock.Mock(return_value=self.fake_result) - - self.sot = proxy2.BaseProxy(self.session) - HeadableResource.new = mock.Mock(return_value=self.res) - - def test_head_resource(self): - rv = self.sot._head(HeadableResource, self.res) - - self.res.head.assert_called_with(self.session) - self.assertEqual(rv, self.fake_result) - - def test_head_id(self): - rv = self.sot._head(HeadableResource, self.fake_id) - - HeadableResource.new.assert_called_with(id=self.fake_id) - self.res.head.assert_called_with(self.session) - self.assertEqual(rv, self.fake_result) - - -class TestProxyWaits(testtools.TestCase): - - def setUp(self): - super(TestProxyWaits, self).setUp() - - self.session = mock.Mock() - self.sot = proxy2.BaseProxy(self.session) - - @mock.patch("openstack.resource2.wait_for_status") - def test_wait_for(self, mock_wait): - mock_resource = mock.Mock() - mock_wait.return_value = mock_resource - self.sot.wait_for_status(mock_resource, 'ACTIVE') - mock_wait.assert_called_once_with( - self.session, mock_resource, 'ACTIVE', [], 2, 120) - - @mock.patch("openstack.resource2.wait_for_status") - def test_wait_for_params(self, mock_wait): - mock_resource = mock.Mock() - mock_wait.return_value = mock_resource - self.sot.wait_for_status(mock_resource, 'ACTIVE', ['ERROR'], 1, 2) - mock_wait.assert_called_once_with( - self.session, mock_resource, 'ACTIVE', ['ERROR'], 1, 2) - - @mock.patch("openstack.resource2.wait_for_delete") - def test_wait_for_delete(self, mock_wait): - mock_resource = mock.Mock() - mock_wait.return_value = mock_resource - self.sot.wait_for_delete(mock_resource) - mock_wait.assert_called_once_with( - self.session, mock_resource, 2, 120) - - @mock.patch("openstack.resource2.wait_for_delete") - def test_wait_for_delete_params(self, mock_wait): - mock_resource = mock.Mock() - mock_wait.return_value = mock_resource - self.sot.wait_for_delete(mock_resource, 1, 2) - mock_wait.assert_called_once_with( - self.session, mock_resource, 1, 2) diff --git a/openstack/tests/unit/test_proxy_base.py b/openstack/tests/unit/test_proxy_base.py index 348422d82c..0b512ef0f8 100644 --- a/openstack/tests/unit/test_proxy_base.py +++ b/openstack/tests/unit/test_proxy_base.py @@ -10,214 +10,315 @@ # License for the specific language governing permissions and limitations # under the License. -import mock +from unittest import mock from openstack.tests.unit import base class TestProxyBase(base.TestCase): def setUp(self): - super(TestProxyBase, self).setUp() + super().setUp() self.session = mock.Mock() - def _add_path_args_for_verify(self, path_args, method_args, - expected_kwargs, value=None): - if path_args is not None: - if value is None: - for key in path_args: - method_args.append(path_args[key]) - expected_kwargs['path_args'] = path_args - - def _verify(self, mock_method, test_method, - method_args=None, method_kwargs=None, - expected_args=None, expected_kwargs=None, - expected_result=None): + def _verify( + self, + mock_method, + test_method, + *, + method_args=None, + method_kwargs=None, + method_result=None, + expected_args=None, + expected_kwargs=None, + expected_result=None, + ): with mock.patch(mock_method) as mocked: mocked.return_value = expected_result - if any([method_args, method_kwargs, - expected_args, expected_kwargs]): - method_args = method_args or () - method_kwargs = method_kwargs or {} - expected_args = expected_args or () - expected_kwargs = expected_kwargs or {} - - self.assertEqual(expected_result, test_method(*method_args, - **method_kwargs)) - mocked.assert_called_with(self.session, - *expected_args, **expected_kwargs) - else: - self.assertEqual(expected_result, test_method()) - mocked.assert_called_with(self.session) - - # NOTE(briancurtin): This is a duplicate version of _verify that is - # temporarily here while we shift APIs. The difference is that - # calls from the Proxy classes aren't going to be going directly into - # the Resource layer anymore, so they don't pass in the session which - # was tested in assert_called_with. - # This is being done in lieu of adding logic and complicating - # the _verify method. It will be removed once there is one API to - # be verifying. - def _verify2(self, mock_method, test_method, - method_args=None, method_kwargs=None, method_result=None, - expected_args=None, expected_kwargs=None, - expected_result=None): - with mock.patch(mock_method) as mocked: - mocked.return_value = expected_result - if any([method_args, method_kwargs, - expected_args, expected_kwargs]): + if any( + [ + method_args, + method_kwargs, + expected_args, + expected_kwargs, + ] + ): method_args = method_args or () method_kwargs = method_kwargs or {} expected_args = expected_args or () expected_kwargs = expected_kwargs or {} if method_result: - self.assertEqual(method_result, test_method(*method_args, - **method_kwargs)) + self.assertEqual( + method_result, + test_method(*method_args, **method_kwargs), + ) else: - self.assertEqual(expected_result, test_method(*method_args, - **method_kwargs)) - mocked.assert_called_with(*expected_args, **expected_kwargs) + self.assertEqual( + expected_result, + test_method(*method_args, **method_kwargs), + ) + + # Check how the mock was called in detail + called_args, called_kwargs = mocked.call_args + self.assertEqual(expected_args, list(called_args)) + + # NOTE(gtema): if base_path is not in expected_kwargs or empty + # exclude it from the comparison, since some methods might + # still invoke method with None value + base_path = expected_kwargs.get('base_path', None) + if base_path is None: + expected_kwargs.pop('base_path', None) + called_kwargs.pop('base_path', None) + # ditto for paginated + paginated = expected_kwargs.get('paginated', None) + if paginated is None: + expected_kwargs.pop('paginated', None) + called_kwargs.pop('paginated', None) + # and ignore_missing + ignore_missing = expected_kwargs.get('ignore_missing', None) + if ignore_missing is None: + expected_kwargs.pop('ignore_missing', None) + called_kwargs.pop('ignore_missing', None) + + self.assertDictEqual(expected_kwargs, called_kwargs) else: self.assertEqual(expected_result, test_method()) - mocked.assert_called_with(self.session) - - def verify_create(self, test_method, resource_type, - mock_method="openstack.proxy.BaseProxy._create", - expected_result="result", **kwargs): - the_kwargs = {"x": 1, "y": 2, "z": 3} - method_kwargs = kwargs.pop("method_kwargs", the_kwargs) - expected_args = [resource_type] - expected_kwargs = kwargs.pop("expected_kwargs", the_kwargs) - - self._verify2(mock_method, test_method, - expected_result=expected_result, - method_kwargs=method_kwargs, - expected_args=expected_args, - expected_kwargs=expected_kwargs, - **kwargs) - - def verify_delete(self, test_method, resource_type, ignore, - input_path_args=None, expected_path_args=None, - mock_method="openstack.proxy.BaseProxy._delete"): - method_args = ["resource_or_id"] - method_kwargs = {"ignore_missing": ignore} - if isinstance(input_path_args, dict): - for key in input_path_args: - method_kwargs[key] = input_path_args[key] - elif isinstance(input_path_args, list): - method_args = input_path_args - expected_kwargs = {"ignore_missing": ignore} - if expected_path_args: - expected_kwargs["path_args"] = expected_path_args - self._verify2(mock_method, test_method, - method_args=method_args, - method_kwargs=method_kwargs, - expected_args=[resource_type, "resource_or_id"], - expected_kwargs=expected_kwargs) - - def verify_get(self, test_method, resource_type, value=None, args=None, - mock_method="openstack.proxy.BaseProxy._get", - ignore_value=False, **kwargs): - the_value = value - if value is None: - the_value = [] if ignore_value else ["value"] - expected_args = kwargs.pop("expected_args", []) - expected_kwargs = kwargs.pop("expected_kwargs", {}) - method_kwargs = kwargs.pop("method_kwargs", kwargs) - if args: - expected_kwargs["args"] = args - if kwargs: - expected_kwargs["path_args"] = kwargs - if not expected_args: - expected_args = [resource_type] + the_value - self._verify2(mock_method, test_method, - method_args=the_value, - method_kwargs=method_kwargs or {}, - expected_args=expected_args, - expected_kwargs=expected_kwargs) - - def verify_head(self, test_method, resource_type, - mock_method="openstack.proxy.BaseProxy._head", - value=None, **kwargs): - the_value = [value] if value is not None else [] - expected_kwargs = {"path_args": kwargs} if kwargs else {} - self._verify2(mock_method, test_method, - method_args=the_value, - method_kwargs=kwargs, - expected_args=[resource_type] + the_value, - expected_kwargs=expected_kwargs) - - def verify_find(self, test_method, resource_type, value=None, - mock_method="openstack.proxy.BaseProxy._find", - path_args=None, **kwargs): - method_args = value or ["name_or_id"] - expected_kwargs = {} - - self._add_path_args_for_verify(path_args, method_args, expected_kwargs, - value=value) - - # TODO(briancurtin): if sub-tests worked in this mess of - # test dependencies, the following would be a lot easier to work with. - expected_kwargs["ignore_missing"] = False - self._verify2(mock_method, test_method, - method_args=method_args + [False], - expected_args=[resource_type, "name_or_id"], - expected_kwargs=expected_kwargs, - expected_result="result", - **kwargs) - - expected_kwargs["ignore_missing"] = True - self._verify2(mock_method, test_method, - method_args=method_args + [True], - expected_args=[resource_type, "name_or_id"], - expected_kwargs=expected_kwargs, - expected_result="result", - **kwargs) - - def verify_list(self, test_method, resource_type, paginated=False, - mock_method="openstack.proxy.BaseProxy._list", - **kwargs): - expected_kwargs = kwargs.pop("expected_kwargs", {}) - expected_kwargs.update({"paginated": paginated}) - expected_kwargs['limit'] = 2 - method_kwargs = kwargs.pop("method_kwargs", {}) - method_kwargs['limit'] = 2 - self._verify2(mock_method, test_method, - method_kwargs=method_kwargs, - expected_args=[resource_type], - expected_kwargs=expected_kwargs, - expected_result=["result"], - **kwargs) - - def verify_list_no_kwargs(self, test_method, resource_type, - paginated=False, - mock_method="openstack.proxy.BaseProxy._list"): - self._verify2(mock_method, test_method, - method_kwargs={}, - expected_args=[resource_type], - expected_kwargs={"paginated": paginated}, - expected_result=["result"]) - - def verify_update(self, test_method, resource_type, value=None, - mock_method="openstack.proxy.BaseProxy._update", - expected_result="result", path_args=None, **kwargs): - method_args = value or ["resource_or_id"] - method_kwargs = {"x": 1, "y": 2, "z": 3} - expected_args = kwargs.pop("expected_args", ["resource_or_id"]) - expected_kwargs = method_kwargs.copy() - - self._add_path_args_for_verify(path_args, method_args, expected_kwargs, - value=value) - - self._verify2(mock_method, test_method, - expected_result=expected_result, - method_args=method_args, - method_kwargs=method_kwargs, - expected_args=[resource_type] + expected_args, - expected_kwargs=expected_kwargs, - **kwargs) + mocked.assert_called_with(test_method.__self__) + + def verify_create( + self, + test_method, + resource_type, + base_path=None, + *, + method_args=None, + method_kwargs=None, + expected_args=None, + expected_kwargs=None, + expected_result="result", + mock_method="openstack.proxy.Proxy._create", + ): + if method_args is None: + method_args = [] + if method_kwargs is None: + method_kwargs = {"x": 1, "y": 2, "z": 3} + if expected_args is None: + expected_args = method_args.copy() + if expected_kwargs is None: + expected_kwargs = method_kwargs.copy() + expected_kwargs["base_path"] = base_path + + self._verify( + mock_method, + test_method, + method_args=method_args, + method_kwargs=method_kwargs, + expected_args=[resource_type, *expected_args], + expected_kwargs=expected_kwargs, + expected_result=expected_result, + ) + + def verify_delete( + self, + test_method, + resource_type, + ignore_missing=True, + *, + method_args=None, + method_kwargs=None, + expected_args=None, + expected_kwargs=None, + mock_method="openstack.proxy.Proxy._delete", + ): + if method_args is None: + method_args = ['resource_id'] + if method_kwargs is None: + method_kwargs = {} + method_kwargs["ignore_missing"] = ignore_missing + if expected_args is None: + expected_args = method_args.copy() + if expected_kwargs is None: + expected_kwargs = method_kwargs.copy() + + self._verify( + mock_method, + test_method, + method_args=method_args, + method_kwargs=method_kwargs, + expected_args=[resource_type, *expected_args], + expected_kwargs=expected_kwargs, + ) + + def verify_get( + self, + test_method, + resource_type, + requires_id=False, + base_path=None, + *, + method_args=None, + method_kwargs=None, + expected_args=None, + expected_kwargs=None, + mock_method="openstack.proxy.Proxy._get", + ): + if method_args is None: + method_args = ['resource_id'] + if method_kwargs is None: + method_kwargs = {} + if expected_args is None: + expected_args = method_args.copy() + if expected_kwargs is None: + expected_kwargs = method_kwargs.copy() + + self._verify( + mock_method, + test_method, + method_args=method_args, + method_kwargs=method_kwargs, + expected_args=[resource_type, *expected_args], + expected_kwargs=expected_kwargs, + ) + + def verify_get_overrided(self, proxy, resource_type, patch_target): + with mock.patch(patch_target, autospec=True) as res: + proxy._get_resource = mock.Mock(return_value=res) + proxy._get(resource_type) + res.fetch.assert_called_once_with( + proxy, + requires_id=True, + base_path=None, + error_message=mock.ANY, + skip_cache=False, + ) + + def verify_head( + self, + test_method, + resource_type, + base_path=None, + *, + method_args=None, + method_kwargs=None, + expected_args=None, + expected_kwargs=None, + mock_method="openstack.proxy.Proxy._head", + ): + if method_args is None: + method_args = ['resource_id'] + if method_kwargs is None: + method_kwargs = {} + expected_args = expected_args or method_args.copy() + expected_kwargs = expected_kwargs or method_kwargs.copy() + + self._verify( + mock_method, + test_method, + method_args=method_args, + method_kwargs=method_kwargs, + expected_args=[resource_type, *expected_args], + expected_kwargs=expected_kwargs, + ) + + def verify_find( + self, + test_method, + resource_type, + name_or_id='resource_name', + ignore_missing=True, + *, + method_args=None, + method_kwargs=None, + expected_args=None, + expected_kwargs=None, + mock_method="openstack.proxy.Proxy._find", + ): + method_args = [name_or_id] + (method_args or []) + method_kwargs = method_kwargs or {} + method_kwargs["ignore_missing"] = ignore_missing + expected_args = expected_args or method_args.copy() + expected_kwargs = expected_kwargs or method_kwargs.copy() + + self._verify( + mock_method, + test_method, + method_args=method_args, + method_kwargs=method_kwargs, + expected_args=[resource_type, *expected_args], + expected_kwargs=expected_kwargs, + ) + + def verify_list( + self, + test_method, + resource_type, + paginated=None, + base_path=None, + *, + method_args=None, + method_kwargs=None, + expected_args=None, + expected_kwargs=None, + mock_method="openstack.proxy.Proxy._list", + ): + if method_args is None: + method_args = [] + if method_kwargs is None: + method_kwargs = {} + if paginated is not None: + method_kwargs["paginated"] = paginated + if expected_args is None: + expected_args = method_args.copy() + if expected_kwargs is None: + expected_kwargs = method_kwargs.copy() + if base_path is not None: + expected_kwargs["base_path"] = base_path + + self._verify( + mock_method, + test_method, + method_args=method_args, + method_kwargs=method_kwargs, + expected_args=[resource_type, *expected_args], + expected_kwargs=expected_kwargs, + ) + + def verify_update( + self, + test_method, + resource_type, + base_path=None, + *, + method_args=None, + method_kwargs=None, + expected_args=None, + expected_kwargs=None, + expected_result="result", + mock_method="openstack.proxy.Proxy._update", + ): + if method_args is None: + method_args = ['resource_id'] + if method_kwargs is None: + method_kwargs = {"x": 1, "y": 2, "z": 3} + method_kwargs["base_path"] = base_path + if expected_args is None: + expected_args = method_args.copy() + if expected_kwargs is None: + expected_kwargs = method_kwargs.copy() + + self._verify( + mock_method, + test_method, + method_args=method_args, + method_kwargs=method_kwargs, + expected_args=[resource_type, *expected_args], + expected_kwargs=expected_kwargs, + ) def verify_wait_for_status( - self, test_method, - mock_method="openstack.resource.wait_for_status", **kwargs): + self, + test_method, + mock_method="openstack.resource.wait_for_status", + **kwargs, + ): self._verify(mock_method, test_method, **kwargs) diff --git a/openstack/tests/unit/test_proxy_base2.py b/openstack/tests/unit/test_proxy_base2.py deleted file mode 100644 index f984de6593..0000000000 --- a/openstack/tests/unit/test_proxy_base2.py +++ /dev/null @@ -1,226 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock - -from openstack.tests.unit import base - - -class TestProxyBase(base.TestCase): - def setUp(self): - super(TestProxyBase, self).setUp() - self.session = mock.Mock() - - def _add_path_args_for_verify(self, path_args, method_args, - expected_kwargs, value=None): - if path_args is not None: - if value is None: - for key in path_args: - method_args.append(path_args[key]) - expected_kwargs['path_args'] = path_args - - def _verify(self, mock_method, test_method, - method_args=None, method_kwargs=None, - expected_args=None, expected_kwargs=None, - expected_result=None): - with mock.patch(mock_method) as mocked: - mocked.return_value = expected_result - if any([method_args, method_kwargs, - expected_args, expected_kwargs]): - method_args = method_args or () - method_kwargs = method_kwargs or {} - expected_args = expected_args or () - expected_kwargs = expected_kwargs or {} - - self.assertEqual(expected_result, test_method(*method_args, - **method_kwargs)) - mocked.assert_called_with(self.session, - *expected_args, **expected_kwargs) - else: - self.assertEqual(expected_result, test_method()) - mocked.assert_called_with(self.session) - - # NOTE(briancurtin): This is a duplicate version of _verify that is - # temporarily here while we shift APIs. The difference is that - # calls from the Proxy classes aren't going to be going directly into - # the Resource layer anymore, so they don't pass in the session which - # was tested in assert_called_with. - # This is being done in lieu of adding logic and complicating - # the _verify method. It will be removed once there is one API to - # be verifying. - def _verify2(self, mock_method, test_method, - method_args=None, method_kwargs=None, method_result=None, - expected_args=None, expected_kwargs=None, - expected_result=None): - with mock.patch(mock_method) as mocked: - mocked.return_value = expected_result - if any([method_args, method_kwargs, - expected_args, expected_kwargs]): - method_args = method_args or () - method_kwargs = method_kwargs or {} - expected_args = expected_args or () - expected_kwargs = expected_kwargs or {} - - if method_result: - self.assertEqual(method_result, test_method(*method_args, - **method_kwargs)) - else: - self.assertEqual(expected_result, test_method(*method_args, - **method_kwargs)) - mocked.assert_called_with(*expected_args, **expected_kwargs) - else: - self.assertEqual(expected_result, test_method()) - mocked.assert_called_with(self.session) - - def verify_create(self, test_method, resource_type, - mock_method="openstack.proxy2.BaseProxy._create", - expected_result="result", **kwargs): - the_kwargs = {"x": 1, "y": 2, "z": 3} - method_kwargs = kwargs.pop("method_kwargs", the_kwargs) - expected_args = [resource_type] - expected_kwargs = kwargs.pop("expected_kwargs", the_kwargs) - - self._verify2(mock_method, test_method, - expected_result=expected_result, - method_kwargs=method_kwargs, - expected_args=expected_args, - expected_kwargs=expected_kwargs, - **kwargs) - - def verify_delete(self, test_method, resource_type, ignore, - input_path_args=None, expected_path_args=None, - method_kwargs=None, expected_args=None, - expected_kwargs=None, - mock_method="openstack.proxy2.BaseProxy._delete"): - method_args = ["resource_or_id"] - method_kwargs = method_kwargs or {} - method_kwargs["ignore_missing"] = ignore - if isinstance(input_path_args, dict): - for key in input_path_args: - method_kwargs[key] = input_path_args[key] - elif isinstance(input_path_args, list): - method_args = input_path_args - expected_kwargs = expected_kwargs or {} - expected_kwargs["ignore_missing"] = ignore - if expected_path_args: - expected_kwargs.update(expected_path_args) - expected_args = expected_args or [resource_type, "resource_or_id"] - self._verify2(mock_method, test_method, - method_args=method_args, - method_kwargs=method_kwargs, - expected_args=expected_args, - expected_kwargs=expected_kwargs) - - def verify_get(self, test_method, resource_type, value=None, args=None, - mock_method="openstack.proxy2.BaseProxy._get", - ignore_value=False, **kwargs): - the_value = value - if value is None: - the_value = [] if ignore_value else ["value"] - expected_args = kwargs.pop("expected_args", []) - expected_kwargs = kwargs.pop("expected_kwargs", {}) - method_kwargs = kwargs.pop("method_kwargs", kwargs) - if args: - expected_kwargs["args"] = args - if kwargs: - expected_kwargs["path_args"] = kwargs - if not expected_args: - expected_args = [resource_type] + the_value - self._verify2(mock_method, test_method, - method_args=the_value, - method_kwargs=method_kwargs or {}, - expected_args=expected_args, - expected_kwargs=expected_kwargs) - - def verify_head(self, test_method, resource_type, - mock_method="openstack.proxy2.BaseProxy._head", - value=None, **kwargs): - the_value = [value] if value is not None else [] - expected_kwargs = {"path_args": kwargs} if kwargs else {} - self._verify2(mock_method, test_method, - method_args=the_value, - method_kwargs=kwargs, - expected_args=[resource_type] + the_value, - expected_kwargs=expected_kwargs) - - def verify_find(self, test_method, resource_type, value=None, - mock_method="openstack.proxy2.BaseProxy._find", - path_args=None, **kwargs): - method_args = value or ["name_or_id"] - expected_kwargs = {} - - self._add_path_args_for_verify(path_args, method_args, expected_kwargs, - value=value) - - # TODO(briancurtin): if sub-tests worked in this mess of - # test dependencies, the following would be a lot easier to work with. - expected_kwargs["ignore_missing"] = False - self._verify2(mock_method, test_method, - method_args=method_args + [False], - expected_args=[resource_type, "name_or_id"], - expected_kwargs=expected_kwargs, - expected_result="result", - **kwargs) - - expected_kwargs["ignore_missing"] = True - self._verify2(mock_method, test_method, - method_args=method_args + [True], - expected_args=[resource_type, "name_or_id"], - expected_kwargs=expected_kwargs, - expected_result="result", - **kwargs) - - def verify_list(self, test_method, resource_type, paginated=False, - mock_method="openstack.proxy2.BaseProxy._list", - **kwargs): - expected_kwargs = kwargs.pop("expected_kwargs", {}) - expected_kwargs.update({"paginated": paginated}) - method_kwargs = kwargs.pop("method_kwargs", {}) - self._verify2(mock_method, test_method, - method_kwargs=method_kwargs, - expected_args=[resource_type], - expected_kwargs=expected_kwargs, - expected_result=["result"], - **kwargs) - - def verify_list_no_kwargs(self, test_method, resource_type, - paginated=False, - mock_method="openstack.proxy2.BaseProxy._list"): - self._verify2(mock_method, test_method, - method_kwargs={}, - expected_args=[resource_type], - expected_kwargs={"paginated": paginated}, - expected_result=["result"]) - - def verify_update(self, test_method, resource_type, value=None, - mock_method="openstack.proxy2.BaseProxy._update", - expected_result="result", path_args=None, **kwargs): - method_args = value or ["resource_or_id"] - method_kwargs = {"x": 1, "y": 2, "z": 3} - expected_args = kwargs.pop("expected_args", ["resource_or_id"]) - expected_kwargs = method_kwargs.copy() - - self._add_path_args_for_verify(path_args, method_args, expected_kwargs, - value=value) - - self._verify2(mock_method, test_method, - expected_result=expected_result, - method_args=method_args, - method_kwargs=method_kwargs, - expected_args=[resource_type] + expected_args, - expected_kwargs=expected_kwargs, - **kwargs) - - def verify_wait_for_status( - self, test_method, - mock_method="openstack.resource2.wait_for_status", **kwargs): - self._verify(mock_method, test_method, **kwargs) diff --git a/openstack/tests/unit/test_resource.py b/openstack/tests/unit/test_resource.py index 87f4009246..3396a9b785 100644 --- a/openstack/tests/unit/test_resource.py +++ b/openstack/tests/unit/test_resource.py @@ -10,1524 +10,3744 @@ # License for the specific language governing permissions and limitations # under the License. -import copy +import itertools import json -import os +import logging +from unittest import mock -from keystoneauth1 import session -import mock +from keystoneauth1 import adapter import requests -from testtools import matchers +from openstack import dns from openstack import exceptions -from openstack import format +from openstack import fields from openstack import resource from openstack.tests.unit import base from openstack import utils -fake_parent = 'robert' -fake_name = 'rey' -fake_id = 99 -fake_attr1 = 'lana' -fake_attr2 = 'del' +class FakeResponse: + def __init__(self, response, status_code=200, headers=None): + self.body = response + self.status_code = status_code + headers = headers if headers else {'content-type': 'application/json'} + self.headers = requests.structures.CaseInsensitiveDict(headers) + + def json(self): + return self.body + + +class TestComponentManager(base.TestCase): + def test_create_basic(self): + sot = resource._ComponentManager() + self.assertEqual(dict(), sot.attributes) + self.assertEqual(set(), sot._dirty) + + def test_create_unsynced(self): + attrs = {"hey": 1, "hi": 2, "hello": 3} + sync = False + + sot = resource._ComponentManager(attributes=attrs, synchronized=sync) + self.assertEqual(attrs, sot.attributes) + self.assertEqual(set(attrs.keys()), sot._dirty) + + def test_create_synced(self): + attrs = {"hey": 1, "hi": 2, "hello": 3} + sync = True + + sot = resource._ComponentManager(attributes=attrs, synchronized=sync) + self.assertEqual(attrs, sot.attributes) + self.assertEqual(set(), sot._dirty) + + def test_getitem(self): + key = "key" + value = "value" + attrs = {key: value} + + sot = resource._ComponentManager(attributes=attrs) + self.assertEqual(value, sot.__getitem__(key)) + + def test_setitem_new(self): + key = "key" + value = "value" + + sot = resource._ComponentManager() + sot.__setitem__(key, value) + + self.assertIn(key, sot.attributes) + self.assertIn(key, sot.dirty) + + def test_setitem_unchanged(self): + key = "key" + value = "value" + attrs = {key: value} + + sot = resource._ComponentManager(attributes=attrs, synchronized=True) + # This shouldn't end up in the dirty list since we're just re-setting. + sot.__setitem__(key, value) + + self.assertEqual(value, sot.attributes[key]) + self.assertNotIn(key, sot.dirty) + + def test_delitem(self): + key = "key" + value = "value" + attrs = {key: value} + + sot = resource._ComponentManager(attributes=attrs, synchronized=True) + sot.__delitem__(key) + + self.assertIsNone(sot.dirty[key]) + + def test_iter(self): + attrs = {"key": "value"} + sot = resource._ComponentManager(attributes=attrs) + self.assertCountEqual(iter(attrs), sot.__iter__()) -fake_resource = 'fake' -fake_resources = 'fakes' -fake_arguments = {'parent_name': fake_parent} -fake_base_path = '/fakes/%(parent_name)s/data' -fake_path = '/fakes/rey/data' + def test_len(self): + attrs = {"key": "value"} + sot = resource._ComponentManager(attributes=attrs) + self.assertEqual(len(attrs), sot.__len__()) -fake_data = {'id': fake_id, - 'enabled': True, - 'name': fake_name, - 'parent': fake_parent, - 'attr1': fake_attr1, - 'attr2': fake_attr2, - 'status': None} -fake_body = {fake_resource: fake_data} + def test_dirty(self): + key = "key" + key2 = "key2" + value = "value" + attrs = {key: value} + sot = resource._ComponentManager(attributes=attrs, synchronized=False) + self.assertEqual({key: value}, sot.dirty) + sot.__setitem__(key2, value) + self.assertEqual({key: value, key2: value}, sot.dirty) -class FakeParent(resource.Resource): - id_attribute = "name" - name = resource.prop('name') + def test_clean(self): + key = "key" + value = "value" + attrs = {key: value} + sot = resource._ComponentManager(attributes=attrs, synchronized=False) + self.assertEqual(attrs, sot.dirty) + sot.clean() -class FakeResource(resource.Resource): + self.assertEqual(dict(), sot.dirty) - resource_key = fake_resource - resources_key = fake_resources - base_path = fake_base_path - allow_create = allow_retrieve = allow_update = True - allow_delete = allow_list = allow_head = True +class Test_Request(base.TestCase): + def test_create(self): + uri = 1 + body = 2 + headers = 3 + + sot = resource._Request(uri, body, headers) - enabled = resource.prop('enabled', type=format.BoolStr) - name = resource.prop('name') - parent = resource.prop('parent_name') - first = resource.prop('attr1') - second = resource.prop('attr2') - third = resource.prop('attr3', alias='attr_three') - status = resource.prop('status') + self.assertEqual(uri, sot.url) + self.assertEqual(body, sot.body) + self.assertEqual(headers, sot.headers) -class FakeResourceNoKeys(FakeResource): +class TestQueryParameters(base.TestCase): + def test_create(self): + location = "location" + mapping = { + "first_name": "first-name", + "second_name": {"name": "second-name"}, + "third_name": {"name": "third", "type": int}, + } - resource_key = None - resources_key = None + sot = resource.QueryParameters(location, **mapping) + + self.assertEqual( + { + "location": "location", + "first_name": "first-name", + "second_name": {"name": "second-name"}, + "third_name": {"name": "third", "type": int}, + "limit": "limit", + "marker": "marker", + }, + sot._mapping, + ) + + def test_transpose_unmapped(self): + def _type(value, rtype): + self.assertIs(rtype, mock.sentinel.resource_type) + return value * 10 + + location = "location" + mapping = { + "first_name": "first-name", + "pet_name": {"name": "pet"}, + "answer": {"name": "answer", "type": int}, + "complex": {"type": _type}, + } + sot = resource.QueryParameters(location, **mapping) + result = sot._transpose( + { + "location": "Brooklyn", + "first_name": "Brian", + "pet_name": "Meow", + "answer": "42", + "last_name": "Curtin", + "complex": 1, + }, + mock.sentinel.resource_type, + ) + + # last_name isn't mapped and shouldn't be included + self.assertEqual( + { + "location": "Brooklyn", + "first-name": "Brian", + "pet": "Meow", + "answer": 42, + "complex": 10, + }, + result, + ) + + def test_transpose_not_in_query(self): + location = "location" + mapping = { + "first_name": "first-name", + "pet_name": {"name": "pet"}, + "answer": {"name": "answer", "type": int}, + } -class PropTests(base.TestCase): + sot = resource.QueryParameters(location, **mapping) + result = sot._transpose( + {"location": "Brooklyn"}, mock.sentinel.resource_type + ) + + # first_name not being in the query shouldn't affect results + self.assertEqual({"location": "Brooklyn"}, result) + + +class TestResource(base.TestCase): + def test_initialize_basic(self): + body = {"body": 1} + header = {"header": 2, "Location": "somewhere"} + uri = {"uri": 3} + computed = {"computed": 4} + everything = dict( + itertools.chain( + body.items(), + header.items(), + uri.items(), + computed.items(), + ) + ) + + mock_collect = mock.Mock() + mock_collect.return_value = body, header, uri, computed + + with mock.patch.object( + resource.Resource, "_collect_attrs", mock_collect + ): + sot = resource.Resource(_synchronized=False, **everything) + mock_collect.assert_called_once_with(everything) + self.assertIsNone(sot.location) + + self.assertIsInstance(sot._body, resource._ComponentManager) + self.assertEqual(body, sot._body.dirty) + self.assertIsInstance(sot._header, resource._ComponentManager) + self.assertEqual(header, sot._header.dirty) + self.assertIsInstance(sot._uri, resource._ComponentManager) + self.assertEqual(uri, sot._uri.dirty) + + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertFalse(sot.allow_list) + self.assertFalse(sot.allow_head) + self.assertEqual('PUT', sot.commit_method) + self.assertEqual('POST', sot.create_method) + + def test_repr(self): + a = {"a": 1} + b = {"b": 2} + c = {"c": 3} + d = {"d": 4} - def test_with_alias_and_type(self): class Test(resource.Resource): - attr = resource.prop("attr1", alias="attr2", type=bool) + def __init__(self): + self._body = mock.Mock() + self._body.attributes.items = mock.Mock(return_value=a.items()) + + self._header = mock.Mock() + self._header.attributes.items = mock.Mock( + return_value=b.items() + ) + + self._uri = mock.Mock() + self._uri.attributes.items = mock.Mock(return_value=c.items()) + + self._computed = mock.Mock() + self._computed.attributes.items = mock.Mock( + return_value=d.items() + ) + + the_repr = repr(Test()) + + # Don't test the arguments all together since the dictionary order + # they're rendered in can't be depended on, nor does it matter. + self.assertIn("openstack.tests.unit.test_resource.Test", the_repr) + self.assertIn("a=1", the_repr) + self.assertIn("b=2", the_repr) + self.assertIn("c=3", the_repr) + self.assertIn("d=4", the_repr) + + def test_equality(self): + class Example(resource.Resource): + x = resource.Body("x") + y = resource.Header("y") + z = resource.URI("z") + + e1 = Example(x=1, y=2, z=3) + e2 = Example(x=1, y=2, z=3) + e3 = Example(x=0, y=0, z=0) + + self.assertEqual(e1, e2) + self.assertNotEqual(e1, e3) + self.assertNotEqual(e1, None) + + def test__update(self): + sot = resource.Resource() + + body = "body" + header = "header" + uri = "uri" + computed = "computed" + + sot._collect_attrs = mock.Mock( + return_value=(body, header, uri, computed) + ) + sot._body.update = mock.Mock() + sot._header.update = mock.Mock() + sot._uri.update = mock.Mock() + sot._computed.update = mock.Mock() + + args = {"arg": 1} + sot._update(**args) + + sot._collect_attrs.assert_called_once_with(args) + sot._body.update.assert_called_once_with(body) + sot._header.update.assert_called_once_with(header) + sot._uri.update.assert_called_once_with(uri) + sot._computed.update.assert_called_with(computed) + + def test__consume_attrs(self): + serverside_key1 = "someKey1" + clientside_key1 = "some_key1" + serverside_key2 = "someKey2" + clientside_key2 = "some_key2" + value1 = "value1" + value2 = "value2" + mapping = { + serverside_key1: clientside_key1, + serverside_key2: clientside_key2, + } + + other_key = "otherKey" + other_value = "other" + attrs = { + clientside_key1: value1, + serverside_key2: value2, + other_key: other_value, + } + + sot = resource.Resource() + + result = sot._consume_attrs(mapping, attrs) + + # Make sure that the expected key was consumed and we're only + # left with the other stuff. + self.assertDictEqual({other_key: other_value}, attrs) + + # Make sure that after we've popped our relevant client-side + # key off that we are returning it keyed off of its server-side + # name. + self.assertDictEqual( + {serverside_key1: value1, serverside_key2: value2}, result + ) - t = Test(attrs={"attr2": 500}) + def test__mapping_defaults(self): + # Check that even on an empty class, we get the expected + # built-in attributes. - # Don't test with assertTrue because 500 evaluates to True. - # Need to test that bool(500) happened and attr2 *is* True. - self.assertIs(t.attr, True) + self.assertIn("location", resource.Resource._computed_mapping()) + self.assertIn("name", resource.Resource._body_mapping()) + self.assertIn("id", resource.Resource._body_mapping()) - def test_defaults(self): - new_default = "new_default" + def test__mapping_overrides(self): + # Iterating through the MRO used to wipe out overrides of mappings + # found in base classes. + new_name = "MyName" + new_id = "MyID" class Test(resource.Resource): - attr1 = resource.prop("attr1") - attr2 = resource.prop("attr2", default=new_default) + name = resource.Body(new_name) + id = resource.Body(new_id) - t = Test() + mapping = Test._body_mapping() - self.assertIsNone(t.attr1) - self.assertEqual(new_default, t.attr2) + self.assertEqual("name", mapping["MyName"]) + self.assertEqual("id", mapping["MyID"]) - # When the default value is passed in, it is left untouched. - # Check that attr2 is literally the same object we set as default. - t.attr2 = new_default - self.assertIs(new_default, t.attr2) + def test__body_mapping(self): + class Test(resource.Resource): + x = resource.Body("x") + y = resource.Body("y") + z = resource.Body("z") - not_default = 'not default' - t2 = Test({'attr2': not_default}) - self.assertEqual(not_default, t2.attr2) + self.assertIn("x", Test._body_mapping()) + self.assertIn("y", Test._body_mapping()) + self.assertIn("z", Test._body_mapping()) - # Assert that if the default is passed in, it overrides the previously - # set value (bug #1425996) - t2.attr2 = new_default - self.assertEqual(new_default, t2.attr2) + def test__header_mapping(self): + class Test(resource.Resource): + x = resource.Header("x") + y = resource.Header("y") + z = resource.Header("z") - def test_get_without_instance(self): - self.assertIsNone(FakeResource.name) + self.assertIn("x", Test._header_mapping()) + self.assertIn("y", Test._header_mapping()) + self.assertIn("z", Test._header_mapping()) - def test_set_ValueError(self): + def test__uri_mapping(self): class Test(resource.Resource): - attr = resource.prop("attr", type=int) + x = resource.URI("x") + y = resource.URI("y") + z = resource.URI("z") - t = Test() + self.assertIn("x", Test._uri_mapping()) + self.assertIn("y", Test._uri_mapping()) + self.assertIn("z", Test._uri_mapping()) - def should_raise(): - t.attr = "this is not an int" + def test__getattribute__id_in_body(self): + id = "lol" + sot = resource.Resource(id=id) - self.assertThat(should_raise, matchers.raises(ValueError)) + result = getattr(sot, "id") + self.assertEqual(result, id) - def test_set_TypeError(self): - class Type(object): - def __init__(self): - pass + def test__getattribute__id_with_alternate(self): + id = "lol" class Test(resource.Resource): - attr = resource.prop("attr", type=Type) + blah = resource.Body("blah", alternate_id=True) - t = Test() + sot = Test(blah=id) - def should_raise(): - t.attr = "this type takes no args" + result = getattr(sot, "id") + self.assertEqual(result, id) - self.assertThat(should_raise, matchers.raises(TypeError)) + def test__getattribute__id_without_alternate(self): + class Test(resource.Resource): + id = None - def test_resource_type(self): - class FakestResource(resource.Resource): - shortstop = resource.prop("shortstop", type=FakeResource) - third_base = resource.prop("third_base", type=FakeResource) + sot = Test() + self.assertIsNone(sot.id) - sot = FakestResource() - id1 = "Ernie Banks" - id2 = "Ron Santo" - sot.shortstop = id1 - sot.third_base = id2 + def test__alternate_id_None(self): + self.assertEqual("", resource.Resource._alternate_id()) - resource1 = FakeResource.new(id=id1) - self.assertEqual(resource1, sot.shortstop) - self.assertEqual(id1, sot.shortstop.id) - self.assertEqual(FakeResource, type(sot.shortstop)) + def test__alternate_id(self): + class Test(resource.Resource): + alt = resource.Body("the_alt", alternate_id=True) - resource2 = FakeResource.new(id=id2) - self.assertEqual(resource2, sot.third_base) - self.assertEqual(id2, sot.third_base.id) - self.assertEqual(FakeResource, type(sot.third_base)) + self.assertEqual("the_alt", Test._alternate_id()) - sot2 = FakestResource() - sot2.shortstop = resource1 - sot2.third_base = resource2 - self.assertEqual(resource1, sot2.shortstop) - self.assertEqual(id1, sot2.shortstop.id) - self.assertEqual(FakeResource, type(sot2.shortstop)) - self.assertEqual(resource2, sot2.third_base) - self.assertEqual(id2, sot2.third_base.id) - self.assertEqual(FakeResource, type(sot2.third_base)) + value1 = "lol" + sot = Test(alt=value1) + self.assertEqual(sot.alt, value1) + self.assertEqual(sot.id, value1) - body = { - "shortstop": id1, - "third_base": id2 - } - sot3 = FakestResource(body) - self.assertEqual(FakeResource({"id": id1}), sot3.shortstop) - self.assertEqual(FakeResource({"id": id2}), sot3.third_base) - - def test_set_alias_same_name(self): - class Test(resource.Resource): - attr = resource.prop("something", alias="attr") - - val = "hey" - args = {"something": val} - sot = Test(args) - - self.assertEqual(val, sot._attrs["something"]) - self.assertEqual(val, sot.attr) - - def test_property_is_none(self): - class Test(resource.Resource): - attr = resource.prop("something", type=dict) - - args = {"something": None} - sot = Test(args) - - self.assertIsNone(sot._attrs["something"]) - self.assertIsNone(sot.attr) - - -class HeaderTests(base.TestCase): - class Test(resource.Resource): - base_path = "/ramones" - service = "punk" - allow_create = True - allow_update = True - hey = resource.header("vocals") - ho = resource.header("guitar") - letsgo = resource.header("bass") - - def test_get(self): - val = "joey" - args = {"vocals": val} - sot = HeaderTests.Test({'headers': args}) - self.assertEqual(val, sot.hey) - self.assertIsNone(sot.ho) - self.assertIsNone(sot.letsgo) - - def test_set_new(self): - args = {"vocals": "joey", "bass": "deedee"} - sot = HeaderTests.Test({'headers': args}) - sot._reset_dirty() - sot.ho = "johnny" - self.assertEqual("johnny", sot.ho) - self.assertTrue(sot.is_dirty) - - def test_set_old(self): - args = {"vocals": "joey", "bass": "deedee"} - sot = HeaderTests.Test({'headers': args}) - sot._reset_dirty() - sot.letsgo = "cj" - self.assertEqual("cj", sot.letsgo) - self.assertTrue(sot.is_dirty) - - def test_set_brand_new(self): - sot = HeaderTests.Test({'headers': {}}) - sot._reset_dirty() - sot.ho = "johnny" - self.assertEqual("johnny", sot.ho) - self.assertTrue(sot.is_dirty) - self.assertEqual({'headers': {"guitar": "johnny"}}, sot) - - def test_1428342(self): - sot = HeaderTests.Test({'headers': - requests.structures.CaseInsensitiveDict()}) - - self.assertIsNone(sot.hey) - - def test_create_update_headers(self): - sot = HeaderTests.Test() - sot._reset_dirty() - sot.ho = "johnny" - sot.letsgo = "deedee" - response = mock.Mock() - response_body = {'id': 1} - response.json = mock.Mock(return_value=response_body) - response.headers = None - sess = mock.Mock() - sess.post = mock.Mock(return_value=response) - sess.put = mock.Mock(return_value=response) - - sot.create(sess) - headers = {'guitar': 'johnny', 'bass': 'deedee'} - sess.post.assert_called_with(HeaderTests.Test.base_path, - endpoint_filter=HeaderTests.Test.service, - headers=headers, - json={}) - - sot['id'] = 1 - sot.letsgo = "cj" - headers = {'guitar': 'johnny', 'bass': 'cj'} - sot.update(sess) - sess.put.assert_called_with('ramones/1', - endpoint_filter=HeaderTests.Test.service, - headers=headers, - json={}) - - -class ResourceTests(base.TestCase): + value2 = "rofl" + sot = Test(the_alt=value2) + self.assertEqual(sot.alt, value2) + self.assertEqual(sot.id, value2) - def setUp(self): - super(ResourceTests, self).setUp() - self.session = mock.Mock(spec=session.Session) - self.session.get_filter = mock.Mock(return_value={}) - - def assertCalledURL(self, method, url): - # call_args gives a tuple of *args and tuple of **kwargs. - # Check that the first arg in *args (the URL) has our url. - self.assertEqual(method.call_args[0][0], url) - - def test_empty_id(self): - resp = mock.Mock() - resp.json = mock.Mock(return_value=fake_body) - self.session.get.return_value = resp - - obj = FakeResource.new(**fake_arguments) - self.assertEqual(obj, obj.get(self.session)) - - self.assertEqual(fake_id, obj.id) - self.assertEqual(fake_name, obj['name']) - self.assertEqual(fake_attr1, obj['attr1']) - self.assertEqual(fake_attr2, obj['attr2']) - - self.assertEqual(fake_name, obj.name) - self.assertEqual(fake_attr1, obj.first) - self.assertEqual(fake_attr2, obj.second) - - def test_not_allowed(self): - class Nope(resource.Resource): - allow_create = allow_retrieve = allow_update = False - allow_delete = allow_list = allow_head = False - - nope = Nope() - - def cant_create(): - nope.create_by_id(1, 2) - - def cant_retrieve(): - nope.get_data_by_id(1, 2) - - def cant_update(): - nope.update_by_id(1, 2, 3) - - def cant_delete(): - nope.delete_by_id(1, 2) - - def cant_list(): - for i in nope.list(1): - pass - - def cant_head(): - nope.head_data_by_id(1, 2) - - self.assertThat(cant_create, - matchers.raises(exceptions.MethodNotSupported)) - self.assertThat(cant_retrieve, - matchers.raises(exceptions.MethodNotSupported)) - self.assertThat(cant_update, - matchers.raises(exceptions.MethodNotSupported)) - self.assertThat(cant_delete, - matchers.raises(exceptions.MethodNotSupported)) - self.assertThat(cant_list, - matchers.raises(exceptions.MethodNotSupported)) - self.assertThat(cant_head, - matchers.raises(exceptions.MethodNotSupported)) - - def _test_create_by_id(self, key, response_value, response_body, - attrs, json_body, response_headers=None): - - class FakeResource2(FakeResource): - resource_key = key - service = "my_service" + def test__alternate_id_from_other_property(self): + class Test(resource.Resource): + foo = resource.Body("foo") + bar = resource.Body("bar", alternate_id=True) + + # NOTE(redrobot): My expectation looking at the Test class defined + # in this test is that because the alternate_id parameter is + # is being set to True on the "bar" property of the Test class, + # then the _alternate_id() method should return the name of that "bar" + # property. + self.assertEqual("bar", Test._alternate_id()) + sot = Test(bar='bunnies') + self.assertEqual(sot.id, 'bunnies') + self.assertEqual(sot.bar, 'bunnies') + sot = Test(id='chickens', bar='bunnies') + self.assertEqual(sot.id, 'chickens') + self.assertEqual(sot.bar, 'bunnies') + + def test__get_id_instance(self): + class Test(resource.Resource): + id = resource.Body("id") - response = mock.Mock() - response.json = mock.Mock(return_value=response_body) - response.headers = response_headers - expected_resp = response_value.copy() - if response_headers: - expected_resp.update({'headers': response_headers}) - - sess = mock.Mock() - sess.put = mock.Mock(return_value=response) - sess.post = mock.Mock(return_value=response) - - resp = FakeResource2.create_by_id(sess, attrs) - self.assertEqual(expected_resp, resp) - sess.post.assert_called_with(FakeResource2.base_path, - endpoint_filter=FakeResource2.service, - json=json_body) - - r_id = "my_id" - resp = FakeResource2.create_by_id(sess, attrs, resource_id=r_id) - self.assertEqual(response_value, resp) - sess.put.assert_called_with( - utils.urljoin(FakeResource2.base_path, r_id), - endpoint_filter=FakeResource2.service, - json=json_body) - - path_args = {"parent_name": "my_name"} - resp = FakeResource2.create_by_id(sess, attrs, path_args=path_args) - self.assertEqual(response_value, resp) - sess.post.assert_called_with(FakeResource2.base_path % path_args, - endpoint_filter=FakeResource2.service, - json=json_body) - - resp = FakeResource2.create_by_id(sess, attrs, resource_id=r_id, - path_args=path_args) - self.assertEqual(response_value, resp) - sess.put.assert_called_with( - utils.urljoin(FakeResource2.base_path % path_args, r_id), - endpoint_filter=FakeResource2.service, - json=json_body) - - def test_create_without_resource_key(self): - key = None - response_value = {"a": 1, "b": 2, "c": 3} - response_body = response_value - attrs = response_value - json_body = attrs - self._test_create_by_id(key, response_value, response_body, - attrs, json_body) - - def test_create_with_response_headers(self): - key = None - response_value = {"a": 1, "b": 2, "c": 3} - response_body = response_value - response_headers = {'location': 'foo'} - attrs = response_value.copy() - json_body = attrs - self._test_create_by_id(key, response_value, response_body, - attrs, json_body, - response_headers=response_headers) - - def test_create_with_resource_key(self): - key = "my_key" - response_value = {"a": 1, "b": 2, "c": 3} - response_body = {key: response_value} - attrs = response_body - json_body = {key: attrs} - self._test_create_by_id(key, response_value, response_body, - attrs, json_body) - - def _test_get_data_by_id(self, key, response_value, response_body): - class FakeResource2(FakeResource): - resource_key = key - service = "my_service" + value = "id" + sot = Test(id=value) - response = mock.Mock() - response.json = mock.Mock(return_value=response_body) - - sess = mock.Mock() - sess.get = mock.Mock(return_value=response) - - r_id = "my_id" - resp = FakeResource2.get_data_by_id(sess, resource_id=r_id) - self.assertEqual(response_value, resp) - sess.get.assert_called_with( - utils.urljoin(FakeResource2.base_path, r_id), - endpoint_filter=FakeResource2.service) - - path_args = {"parent_name": "my_name"} - resp = FakeResource2.get_data_by_id(sess, resource_id=r_id, - path_args=path_args) - self.assertEqual(response_value, resp) - sess.get.assert_called_with( - utils.urljoin(FakeResource2.base_path % path_args, r_id), - endpoint_filter=FakeResource2.service) - - def test_get_data_without_resource_key(self): - key = None - response_value = {"a": 1, "b": 2, "c": 3} - response_body = response_value - self._test_get_data_by_id(key, response_value, response_body) - - def test_get_data_with_resource_key(self): - key = "my_key" - response_value = {"a": 1, "b": 2, "c": 3} - response_body = {key: response_value} - self._test_get_data_by_id(key, response_value, response_body) - - def _test_head_data_by_id(self, key, response_value): - class FakeResource2(FakeResource): - resource_key = key - service = "my_service" + self.assertEqual(value, sot._get_id(sot)) - response = mock.Mock() - response.headers = response_value - - sess = mock.Mock() - sess.head = mock.Mock(return_value=response) - - r_id = "my_id" - resp = FakeResource2.head_data_by_id(sess, resource_id=r_id) - self.assertEqual({'headers': response_value}, resp) - headers = {'Accept': ''} - sess.head.assert_called_with( - utils.urljoin(FakeResource2.base_path, r_id), - endpoint_filter=FakeResource2.service, - headers=headers) - - path_args = {"parent_name": "my_name"} - resp = FakeResource2.head_data_by_id(sess, resource_id=r_id, - path_args=path_args) - self.assertEqual({'headers': response_value}, resp) - headers = {'Accept': ''} - sess.head.assert_called_with( - utils.urljoin(FakeResource2.base_path % path_args, r_id), - endpoint_filter=FakeResource2.service, - headers=headers) - - def test_head_data_without_resource_key(self): - key = None - response_value = {"key1": "value1", "key2": "value2"} - self._test_head_data_by_id(key, response_value) - - def test_head_data_with_resource_key(self): - key = "my_key" - response_value = {"key1": "value1", "key2": "value2"} - self._test_head_data_by_id(key, response_value) - - def _test_update_by_id(self, key, response_value, response_body, - attrs, json_body, response_headers=None): - - class FakeResource2(FakeResource): - patch_update = True - resource_key = key - service = "my_service" + def test__get_id_instance_alternate(self): + class Test(resource.Resource): + attr = resource.Body("attr", alternate_id=True) - response = mock.Mock() - response.json = mock.Mock(return_value=response_body) - response.headers = response_headers - expected_resp = response_value.copy() - if response_headers: - expected_resp.update({'headers': response_headers}) - - sess = mock.Mock() - sess.patch = mock.Mock(return_value=response) - - r_id = "my_id" - resp = FakeResource2.update_by_id(sess, r_id, attrs) - self.assertEqual(expected_resp, resp) - sess.patch.assert_called_with( - utils.urljoin(FakeResource2.base_path, r_id), - endpoint_filter=FakeResource2.service, - json=json_body) - - path_args = {"parent_name": "my_name"} - resp = FakeResource2.update_by_id(sess, r_id, attrs, - path_args=path_args) - self.assertEqual(expected_resp, resp) - sess.patch.assert_called_with( - utils.urljoin(FakeResource2.base_path % path_args, r_id), - endpoint_filter=FakeResource2.service, - json=json_body) - - def test_update_without_resource_key(self): - key = None - response_value = {"a": 1, "b": 2, "c": 3} - response_body = response_value - attrs = response_value - json_body = attrs - self._test_update_by_id(key, response_value, response_body, - attrs, json_body) - - def test_update_with_resource_key(self): - key = "my_key" - response_value = {"a": 1, "b": 2, "c": 3} - response_body = {key: response_value} - attrs = response_value - json_body = {key: attrs} - self._test_update_by_id(key, response_value, response_body, - attrs, json_body) - - def test_update_with_response_headers(self): - key = "my_key" - response_value = {"a": 1, "b": 2, "c": 3} - response_body = {key: response_value} - response_headers = {'location': 'foo'} - attrs = response_value.copy() - json_body = {key: attrs} - self._test_update_by_id(key, response_value, response_body, - attrs, json_body, - response_headers=response_headers) - - def test_delete_by_id(self): - class FakeResource2(FakeResource): - service = "my_service" - - sess = mock.Mock() - sess.delete = mock.Mock(return_value=None) - - r_id = "my_id" - resp = FakeResource2.delete_by_id(sess, r_id) - self.assertIsNone(resp) - headers = {'Accept': ''} - sess.delete.assert_called_with( - utils.urljoin(FakeResource2.base_path, r_id), - endpoint_filter=FakeResource2.service, - headers=headers) - - path_args = {"parent_name": "my_name"} - resp = FakeResource2.delete_by_id(sess, r_id, path_args=path_args) - self.assertIsNone(resp) - headers = {'Accept': ''} - sess.delete.assert_called_with( - utils.urljoin(FakeResource2.base_path % path_args, r_id), - endpoint_filter=FakeResource2.service, - headers=headers) + value = "id" + sot = Test(attr=value) - def test_create(self): - resp = mock.Mock() - resp.json = mock.Mock(return_value=fake_body) - resp.headers = {'location': 'foo'} - self.session.post = mock.Mock(return_value=resp) - - # Create resource with subset of attributes in order to - # verify create refreshes all attributes from response. - obj = FakeResource.new(parent_name=fake_parent, - name=fake_name, - enabled=True, - attr1=fake_attr1) - - self.assertEqual(obj, obj.create(self.session)) - self.assertFalse(obj.is_dirty) - - last_req = self.session.post.call_args[1]["json"][ - FakeResource.resource_key] - - self.assertEqual(4, len(last_req)) - self.assertTrue(last_req['enabled']) - self.assertEqual(fake_parent, last_req['parent_name']) - self.assertEqual(fake_name, last_req['name']) - self.assertEqual(fake_attr1, last_req['attr1']) - - self.assertTrue(obj['enabled']) - self.assertEqual(fake_name, obj['name']) - self.assertEqual(fake_parent, obj['parent_name']) - self.assertEqual(fake_attr1, obj['attr1']) - self.assertEqual(fake_attr2, obj['attr2']) - self.assertIsNone(obj['status']) - - self.assertTrue(obj.enabled) - self.assertEqual(fake_id, obj.id) - self.assertEqual(fake_name, obj.name) - self.assertEqual(fake_parent, obj.parent_name) - self.assertEqual(fake_parent, obj.parent) - self.assertEqual(fake_attr1, obj.first) - self.assertEqual(fake_attr1, obj.attr1) - self.assertEqual(fake_attr2, obj.second) - self.assertEqual(fake_attr2, obj.attr2) - self.assertIsNone(obj.status) - self.assertEqual('foo', obj.location) - - def test_get(self): - resp = mock.Mock() - resp.json = mock.Mock(return_value=fake_body) - resp.headers = {'location': 'foo'} - self.session.get = mock.Mock(return_value=resp) - - # Create resource with subset of attributes in order to - # verify get refreshes all attributes from response. - obj = FakeResource.from_id(str(fake_id)) - obj['parent_name'] = fake_parent - - self.assertEqual(obj, obj.get(self.session)) - - # Check that the proper URL is being built. - self.assertCalledURL(self.session.get, - os.path.join(fake_base_path % fake_arguments, - str(fake_id))[1:]) - - self.assertTrue(obj['enabled']) - self.assertEqual(fake_name, obj['name']) - self.assertEqual(fake_parent, obj['parent_name']) - self.assertEqual(fake_attr1, obj['attr1']) - self.assertEqual(fake_attr2, obj['attr2']) - self.assertIsNone(obj['status']) - - self.assertTrue(obj.enabled) - self.assertEqual(fake_id, obj.id) - self.assertEqual(fake_name, obj.name) - self.assertEqual(fake_parent, obj.parent_name) - self.assertEqual(fake_parent, obj.parent) - self.assertEqual(fake_attr1, obj.first) - self.assertEqual(fake_attr1, obj.attr1) - self.assertEqual(fake_attr2, obj.second) - self.assertEqual(fake_attr2, obj.attr2) - self.assertIsNone(obj.status) - self.assertIsNone(obj.location) - - def test_get_by_id(self): - resp = mock.Mock() - resp.json = mock.Mock(return_value=fake_body) - self.session.get = mock.Mock(return_value=resp) - - obj = FakeResource.get_by_id(self.session, fake_id, - path_args=fake_arguments) - - # Check that the proper URL is being built. - self.assertCalledURL(self.session.get, - os.path.join(fake_base_path % fake_arguments, - str(fake_id))[1:]) - - self.assertEqual(fake_id, obj.id) - self.assertEqual(fake_name, obj['name']) - self.assertEqual(fake_attr1, obj['attr1']) - self.assertEqual(fake_attr2, obj['attr2']) - - self.assertEqual(fake_name, obj.name) - self.assertEqual(fake_attr1, obj.first) - self.assertEqual(fake_attr2, obj.second) - - def test_get_by_id_with_headers(self): - header1 = "fake-value1" - header2 = "fake-value2" - headers = {"header1": header1, - "header2": header2} - - resp = mock.Mock(headers=headers) - resp.json = mock.Mock(return_value=fake_body) - self.session.get = mock.Mock(return_value=resp) - - class FakeResource2(FakeResource): - header1 = resource.header("header1") - header2 = resource.header("header2") - - obj = FakeResource2.get_by_id(self.session, fake_id, - path_args=fake_arguments, - include_headers=True) - - self.assertCalledURL(self.session.get, - os.path.join(fake_base_path % fake_arguments, - str(fake_id))[1:]) - - self.assertEqual(fake_id, obj.id) - self.assertEqual(fake_name, obj['name']) - self.assertEqual(fake_attr1, obj['attr1']) - self.assertEqual(fake_attr2, obj['attr2']) - self.assertEqual(header1, obj['headers']['header1']) - self.assertEqual(header2, obj['headers']['header2']) - - self.assertEqual(fake_name, obj.name) - self.assertEqual(fake_attr1, obj.first) - self.assertEqual(fake_attr2, obj.second) - self.assertEqual(header1, obj.header1) - self.assertEqual(header2, obj.header2) - - def test_head_by_id(self): - class FakeResource2(FakeResource): - header1 = resource.header("header1") - header2 = resource.header("header2") - - resp = mock.Mock(headers={"header1": "one", "header2": "two"}) - self.session.head = mock.Mock(return_value=resp) - - obj = FakeResource2.head_by_id(self.session, fake_id, - path_args=fake_arguments) - - self.assertCalledURL(self.session.head, - os.path.join(fake_base_path % fake_arguments, - str(fake_id))[1:]) - - self.assertEqual('one', obj['headers']['header1']) - self.assertEqual('two', obj['headers']['header2']) - - self.assertEqual('one', obj.header1) - self.assertEqual('two', obj.header2) - - def test_patch_update(self): - class FakeResourcePatch(FakeResource): - patch_update = True - - resp = mock.Mock() - resp.json = mock.Mock(return_value=fake_body) - resp.headers = {'location': 'foo'} - self.session.patch = mock.Mock(return_value=resp) - - # Create resource with subset of attributes in order to - # verify update refreshes all attributes from response. - obj = FakeResourcePatch.new(id=fake_id, parent_name=fake_parent, - name=fake_name, attr1=fake_attr1) - self.assertTrue(obj.is_dirty) - - self.assertEqual(obj, obj.update(self.session)) - self.assertFalse(obj.is_dirty) - - self.assertCalledURL(self.session.patch, - os.path.join(fake_base_path % fake_arguments, - str(fake_id))[1:]) - - last_req = self.session.patch.call_args[1]["json"][ - FakeResource.resource_key] - - self.assertEqual(3, len(last_req)) - self.assertEqual(fake_parent, last_req['parent_name']) - self.assertEqual(fake_name, last_req['name']) - self.assertEqual(fake_attr1, last_req['attr1']) - - self.assertTrue(obj['enabled']) - self.assertEqual(fake_name, obj['name']) - self.assertEqual(fake_parent, obj['parent_name']) - self.assertEqual(fake_attr1, obj['attr1']) - self.assertEqual(fake_attr2, obj['attr2']) - self.assertIsNone(obj['status']) - - self.assertTrue(obj.enabled) - self.assertEqual(fake_id, obj.id) - self.assertEqual(fake_name, obj.name) - self.assertEqual(fake_parent, obj.parent_name) - self.assertEqual(fake_parent, obj.parent) - self.assertEqual(fake_attr1, obj.first) - self.assertEqual(fake_attr1, obj.attr1) - self.assertEqual(fake_attr2, obj.second) - self.assertEqual(fake_attr2, obj.attr2) - self.assertIsNone(obj.status) - self.assertEqual('foo', obj.location) - - def test_put_update(self): - class FakeResourcePut(FakeResource): - # This is False by default, but explicit for this test. - patch_update = False - - resp = mock.Mock() - resp.json = mock.Mock(return_value=fake_body) - resp.headers = {'location': 'foo'} - self.session.put = mock.Mock(return_value=resp) - - # Create resource with subset of attributes in order to - # verify update refreshes all attributes from response. - obj = FakeResourcePut.new(id=fake_id, parent_name=fake_parent, - name=fake_name, attr1=fake_attr1) - self.assertTrue(obj.is_dirty) - - self.assertEqual(obj, obj.update(self.session)) - self.assertFalse(obj.is_dirty) - - self.assertCalledURL(self.session.put, - os.path.join(fake_base_path % fake_arguments, - str(fake_id))[1:]) - - last_req = self.session.put.call_args[1]["json"][ - FakeResource.resource_key] - - self.assertEqual(3, len(last_req)) - self.assertEqual(fake_parent, last_req['parent_name']) - self.assertEqual(fake_name, last_req['name']) - self.assertEqual(fake_attr1, last_req['attr1']) - - self.assertTrue(obj['enabled']) - self.assertEqual(fake_name, obj['name']) - self.assertEqual(fake_parent, obj['parent_name']) - self.assertEqual(fake_attr1, obj['attr1']) - self.assertEqual(fake_attr2, obj['attr2']) - self.assertIsNone(obj['status']) - - self.assertTrue(obj.enabled) - self.assertEqual(fake_id, obj.id) - self.assertEqual(fake_name, obj.name) - self.assertEqual(fake_parent, obj.parent_name) - self.assertEqual(fake_parent, obj.parent) - self.assertEqual(fake_attr1, obj.first) - self.assertEqual(fake_attr1, obj.attr1) - self.assertEqual(fake_attr2, obj.second) - self.assertEqual(fake_attr2, obj.attr2) - self.assertIsNone(obj.status) - self.assertEqual('foo', obj.location) - - def test_update_early_exit(self): - obj = FakeResource() - obj._dirty = [] # Bail out early if there's nothing to update. - - self.assertIsNone(obj.update("session")) - - def test_update_no_id_attribute(self): - obj = FakeResource.existing(id=1, attr="value1", - parent_name=fake_parent) - obj.first = "value2" # Make it dirty - obj.update_by_id = mock.Mock(return_value=dict()) - # If no id_attribute is returned in the update response, make sure - # we handle the resulting KeyError. - self.assertEqual(obj, obj.update("session")) + self.assertEqual(value, sot._get_id(sot)) - def test_delete(self): - obj = FakeResource({"id": fake_id, "parent_name": fake_parent}) - obj.delete(self.session) - - self.assertCalledURL(self.session.delete, - os.path.join(fake_base_path % fake_arguments, - str(fake_id))[1:]) - - def _test_list(self, resource_class): - results = [fake_data.copy(), fake_data.copy(), fake_data.copy()] - for i in range(len(results)): - results[i]['id'] = fake_id + i - if resource_class.resources_key is not None: - body = {resource_class.resources_key: - self._get_expected_results()} - sentinel = {resource_class.resources_key: []} - else: - body = self._get_expected_results() - sentinel = [] - resp1 = mock.Mock() - resp1.json = mock.Mock(return_value=body) - resp2 = mock.Mock() - resp2.json = mock.Mock(return_value=sentinel) - self.session.get.side_effect = [resp1, resp2] + def test__get_id_value(self): + value = "id" + self.assertEqual(value, resource.Resource._get_id(value)) - objs = list(resource_class.list(self.session, path_args=fake_arguments, - paginated=True)) - - params = {'limit': 3, 'marker': results[-1]['id']} - self.assertEqual(params, self.session.get.call_args[1]['params']) - self.assertEqual(3, len(objs)) - for obj in objs: - self.assertIn(obj.id, range(fake_id, fake_id + 3)) - self.assertEqual(fake_name, obj['name']) - self.assertEqual(fake_name, obj.name) - self.assertIsInstance(obj, FakeResource) - - def _get_expected_results(self): - results = [fake_data.copy(), fake_data.copy(), fake_data.copy()] - for i in range(len(results)): - results[i]['id'] = fake_id + i - return results - - def test_list_keyed_resource(self): - self._test_list(FakeResource) - - def test_list_non_keyed_resource(self): - self._test_list(FakeResourceNoKeys) - - def _test_list_call_count(self, paginated): - # Test that we've only made one call to receive all data - results = [fake_data.copy(), fake_data.copy(), fake_data.copy()] - resp = mock.Mock() - resp.json = mock.Mock(return_value={fake_resources: results}) - attrs = {"get.return_value": resp} - session = mock.Mock(**attrs) - - list(FakeResource.list(session, params={'limit': len(results) + 1}, - path_args=fake_arguments, - paginated=paginated)) - - # Ensure we only made one call to complete this. - self.assertEqual(1, session.get.call_count) - - def test_list_bail_out(self): - # When we get less data than limit, make sure we made one call - self._test_list_call_count(True) - - def test_list_nonpaginated(self): - # When we call with paginated=False, make sure we made one call - self._test_list_call_count(False) - - def test_determine_limit(self): - full_page = [fake_data.copy(), fake_data.copy(), fake_data.copy()] - last_page = [fake_data.copy()] - - session = mock.Mock() - session.get = mock.Mock() - full_response = mock.Mock() - response_body = {FakeResource.resources_key: full_page} - full_response.json = mock.Mock(return_value=response_body) - last_response = mock.Mock() - response_body = {FakeResource.resources_key: last_page} - last_response.json = mock.Mock(return_value=response_body) - pages = [full_response, full_response, last_response] - session.get.side_effect = pages - - # Don't specify a limit. Resource.list will determine the limit - # is 3 based on the first `full_page`. - results = list(FakeResource.list(session, path_args=fake_arguments, - paginated=True)) - - self.assertEqual(session.get.call_count, len(pages)) - self.assertEqual(len(full_page + full_page + last_page), len(results)) - - def test_empty_list(self): - page = [] - - session = mock.Mock() - session.get = mock.Mock() - full_response = mock.Mock() - response_body = {FakeResource.resources_key: page} - full_response.json = mock.Mock(return_value=response_body) - pages = [full_response] - session.get.side_effect = pages - - results = list(FakeResource.list(session, path_args=fake_arguments, - paginated=True)) - - self.assertEqual(session.get.call_count, len(pages)) - self.assertEqual(len(page), len(results)) - - def test_attrs_name(self): - obj = FakeResource() - - self.assertIsNone(obj.name) - del obj.name + def test__attributes(self): + class Test(resource.Resource): + foo = resource.Header('foo') + bar = resource.Body('bar', aka='_bar') + bar_local = resource.Body('bar_remote') + + sot = Test() + + self.assertEqual( + sorted( + ['foo', 'bar', '_bar', 'bar_local', 'id', 'name', 'location'] + ), + sorted(sot._attributes()), + ) + + self.assertEqual( + sorted(['foo', 'bar', 'bar_local', 'id', 'name', 'location']), + sorted(sot._attributes(include_aliases=False)), + ) + + self.assertEqual( + sorted( + ['foo', 'bar', '_bar', 'bar_remote', 'id', 'name', 'location'] + ), + sorted(sot._attributes(remote_names=True)), + ) + + self.assertEqual( + sorted(['bar', '_bar', 'bar_local', 'id', 'name', 'location']), + sorted(sot._attributes(components=(fields.Body, fields.Computed))), + ) + + self.assertEqual( + ('foo',), + tuple(sot._attributes(components=(fields.Header,))), + ) + + def test__attributes_iterator(self): + class Parent(resource.Resource): + foo = resource.Header('foo') + bar = resource.Body('bar', aka='_bar') + + class Child(Parent): + foo1 = resource.Header('foo1') + bar1 = resource.Body('bar1') + + sot = Child() + expected = ['foo', 'bar', 'foo1', 'bar1'] + + for attr, component in sot._attributes_iterator(): + if attr in expected: + expected.remove(attr) + self.assertEqual([], expected) + + expected = ['foo', 'foo1'] + + # Check we iterate only over headers + for attr, component in sot._attributes_iterator( + components=(fields.Header,) + ): + if attr in expected: + expected.remove(attr) + self.assertEqual([], expected) def test_to_dict(self): - kwargs = { - 'enabled': True, - 'name': 'FOO', - 'parent': 'dad', - 'attr1': 'BAR', - 'attr2': ['ZOO', 'BAZ'], - 'status': 'Active', - 'headers': { - 'key': 'value' - } + class Test(resource.Resource): + foo = resource.Header('foo') + bar = resource.Body('bar', aka='_bar') + + res = Test(id='FAKE_ID') + + expected = { + 'id': 'FAKE_ID', + 'name': None, + 'location': None, + 'foo': None, + 'bar': None, + '_bar': None, } - obj = FakeResource(kwargs) - res = obj.to_dict() - self.assertIsInstance(res, dict) - self.assertTrue(res['enabled']) - self.assertEqual('FOO', res['name']) - self.assertEqual('dad', res['parent']) - self.assertEqual('BAR', res['attr1']) - self.assertEqual(['ZOO', 'BAZ'], res['attr2']) - self.assertEqual('Active', res['status']) - self.assertNotIn('headers', res) - - def test_composite_attr_happy(self): - obj = FakeResource.existing(**{'attr3': '3'}) + self.assertEqual(expected, res.to_dict()) - try: - self.assertEqual('3', obj.third) - except AttributeError: - self.fail("third was not found as expected") + def test_to_dict_nested(self): + class Test(resource.Resource): + foo = resource.Header('foo') + bar = resource.Body('bar') + a_list = resource.Body('a_list') + + class Sub(resource.Resource): + sub = resource.Body('foo') + + sub = Sub(id='ANOTHER_ID', foo='bar') + + res = Test(id='FAKE_ID', bar=sub, a_list=[sub]) + + expected = { + 'id': 'FAKE_ID', + 'name': None, + 'location': None, + 'foo': None, + 'bar': { + 'id': 'ANOTHER_ID', + 'name': None, + 'sub': 'bar', + 'location': None, + }, + 'a_list': [ + { + 'id': 'ANOTHER_ID', + 'name': None, + 'sub': 'bar', + 'location': None, + } + ], + } + self.assertEqual(expected, res.to_dict()) + a_munch = res.to_dict(_to_munch=True) + self.assertEqual(a_munch.bar.id, 'ANOTHER_ID') + self.assertEqual(a_munch.bar.sub, 'bar') + self.assertEqual(a_munch.a_list[0].id, 'ANOTHER_ID') + self.assertEqual(a_munch.a_list[0].sub, 'bar') + + def test_to_dict_no_body(self): + class Test(resource.Resource): + foo = resource.Header('foo') + bar = resource.Body('bar') - def test_composite_attr_fallback(self): - obj = FakeResource.existing(**{'attr_three': '3'}) + res = Test(id='FAKE_ID') - try: - self.assertEqual('3', obj.third) - except AttributeError: - self.fail("third was not found in fallback as expected") + expected = { + 'location': None, + 'foo': None, + } + self.assertEqual(expected, res.to_dict(body=False)) + + def test_to_dict_no_header(self): + class Test(resource.Resource): + foo = resource.Header('foo') + bar = resource.Body('bar') + + res = Test(id='FAKE_ID') + + expected = { + 'id': 'FAKE_ID', + 'name': None, + 'bar': None, + 'location': None, + } + self.assertEqual(expected, res.to_dict(headers=False)) + + def test_to_dict_ignore_none(self): + class Test(resource.Resource): + foo = resource.Header('foo') + bar = resource.Body('bar') + + res = Test(id='FAKE_ID', bar='BAR') + + expected = { + 'id': 'FAKE_ID', + 'bar': 'BAR', + } + self.assertEqual(expected, res.to_dict(ignore_none=True)) + + def test_to_dict_with_mro(self): + class Parent(resource.Resource): + foo = resource.Header('foo') + bar = resource.Body('bar', aka='_bar') + + class Child(Parent): + foo_new = resource.Header('foo_baz_server') + bar_new = resource.Body('bar_baz_server') + + res = Child(id='FAKE_ID', bar='test') + + expected = { + 'foo': None, + 'bar': 'test', + '_bar': 'test', + 'foo_new': None, + 'bar_new': None, + 'id': 'FAKE_ID', + 'location': None, + 'name': None, + } + self.assertEqual(expected, res.to_dict()) + + def test_to_dict_with_unknown_attrs_in_body(self): + class Test(resource.Resource): + foo = resource.Body('foo') + _allow_unknown_attrs_in_body = True - def test_id_del(self): + res = Test(id='FAKE_ID', foo='FOO', bar='BAR') + expected = { + 'id': 'FAKE_ID', + 'name': None, + 'location': None, + 'foo': 'FOO', + 'bar': 'BAR', + } + self.assertEqual(expected, res.to_dict()) + + def test_json_dumps_from_resource(self): class Test(resource.Resource): - id_attribute = "my_id" + foo = resource.Body('foo_remote') - attrs = {"my_id": 100} - t = Test(attrs=attrs) + res = Test(foo='bar') - self.assertEqual(attrs["my_id"], t.id) - del t.id - self.assertTrue(Test.id_attribute not in t._attrs) + expected = '{"foo": "bar", "id": null, "location": null, "name": null}' - def test_from_name_with_name(self): - name = "Ernie Banks" + actual = json.dumps(res, sort_keys=True) + self.assertEqual(expected, actual) - obj = FakeResource.from_name(name) - self.assertEqual(name, obj.name) + response = FakeResponse({'foo': 'new_bar'}) + res._translate_response(response) - def test_from_id_with_name(self): - name = "Sandy Koufax" + expected = ( + '{"foo": "new_bar", "id": null, "location": null, "name": null}' + ) + actual = json.dumps(res, sort_keys=True) + self.assertEqual(expected, actual) - obj = FakeResource.from_id(name) - self.assertEqual(name, obj.id) + def test_items(self): + class Test(resource.Resource): + foo = resource.Body('foo') + bar = resource.Body('bar') + foot = resource.Body('foot') - def test_from_id_with_object(self): - name = "Mickey Mantle" - obj = FakeResource.new(name=name) + data = {'foo': 'bar', 'bar': 'foo\n', 'foot': 'a:b:c:d'} - new_obj = FakeResource.from_id(obj) - self.assertIs(new_obj, obj) - self.assertEqual(obj.name, new_obj.name) + res = Test(**data) + for k, v in res.items(): + expected = data.get(k) + if expected: + self.assertEqual(v, expected) - def test_from_id_with_bad_value(self): - def should_raise(): - FakeResource.from_id(3.14) + def test_access_by_aka(self): + class Test(resource.Resource): + foo = resource.Header('foo_remote', aka='foo_alias') + + res = Test(foo='bar', name='test') + + self.assertEqual('bar', res['foo_alias']) + self.assertEqual('bar', res.foo_alias) + self.assertTrue('foo' in res.keys()) + self.assertTrue('foo_alias' in res.keys()) + expected = utils.Munch( + { + 'id': None, + 'name': 'test', + 'location': None, + 'foo': 'bar', + 'foo_alias': 'bar', + } + ) + actual = utils.Munch(res) + self.assertEqual(expected, actual) + self.assertEqual(expected, res.toDict()) + self.assertEqual(expected, res.to_dict()) + self.assertDictEqual(expected, res) + self.assertDictEqual(expected, dict(res)) + + def test_access_by_resource_name(self): + class Test(resource.Resource): + blah = resource.Body("blah_resource") - self.assertThat(should_raise, matchers.raises(ValueError)) + sot = Test(blah='dummy') - def test_dirty_list(self): + result = sot["blah_resource"] + self.assertEqual(result, sot.blah) + + def test_to_dict_value_error(self): class Test(resource.Resource): - attr = resource.prop("attr") + foo = resource.Header('foo') + bar = resource.Body('bar') + + res = Test(id='FAKE_ID') + + err = self.assertRaises( + ValueError, res.to_dict, body=False, headers=False, computed=False + ) + self.assertEqual( + 'At least one of `body`, `headers` or `computed` must be True', + str(err), + ) + + def test_to_dict_with_mro_no_override(self): + class Parent(resource.Resource): + header = resource.Header('HEADER') + body = resource.Body('BODY') + + class Child(Parent): + # The following two properties are not supposed to be overridden + # by the parent class property values. + header = resource.Header('ANOTHER_HEADER') + body = resource.Body('ANOTHER_BODY') + + res = Child(id='FAKE_ID', body='BODY_VALUE', header='HEADER_VALUE') + + expected = { + 'body': 'BODY_VALUE', + 'header': 'HEADER_VALUE', + 'id': 'FAKE_ID', + 'location': None, + 'name': None, + } + self.assertEqual(expected, res.to_dict()) - # Check if dirty after setting by prop - sot1 = Test() - self.assertFalse(sot1.is_dirty) - sot1.attr = 1 - self.assertTrue(sot1.is_dirty) + def test_new(self): + class Test(resource.Resource): + attr = resource.Body("attr") - # Check if dirty after setting by mapping - sot2 = Test() - sot2["attr"] = 1 - self.assertTrue(sot1.is_dirty) + value = "value" + sot = Test.new(attr=value) - # Check if dirty after creation - sot3 = Test({"attr": 1}) - self.assertTrue(sot3.is_dirty) + self.assertIn("attr", sot._body.dirty) + self.assertEqual(value, sot.attr) - def test_update_attrs(self): + def test_existing(self): class Test(resource.Resource): - moe = resource.prop("the-attr") - larry = resource.prop("the-attr2") - curly = resource.prop("the-attr3", type=int) - shemp = resource.prop("the-attr4") + attr = resource.Body("attr") + + value = "value" + sot = Test.existing(attr=value) - value1 = "one" - value2 = "two" - value3 = "3" - value4 = "fore" - value5 = "fiver" + self.assertNotIn("attr", sot._body.dirty) + self.assertEqual(value, sot.attr) - sot = Test({"the-attr": value1}) + def test_from_munch_new(self): + class Test(resource.Resource): + attr = resource.Body("body_attr") - sot.update_attrs({"the-attr2": value2, "notprop": value4}) - self.assertTrue(sot.is_dirty) - self.assertEqual(value1, sot.moe) - self.assertEqual(value1, sot["the-attr"]) - self.assertEqual(value2, sot.larry) - self.assertEqual(value4, sot.notprop) + value = "value" + orig = utils.Munch(body_attr=value) + sot = Test._from_munch(orig, synchronized=False) - sot._reset_dirty() + self.assertIn("body_attr", sot._body.dirty) + self.assertEqual(value, sot.attr) - sot.update_attrs(curly=value3) - self.assertTrue(sot.is_dirty) - self.assertEqual(int, type(sot.curly)) - self.assertEqual(int(value3), sot.curly) + def test_from_munch_existing(self): + class Test(resource.Resource): + attr = resource.Body("body_attr") - sot._reset_dirty() + value = "value" + orig = utils.Munch(body_attr=value) + sot = Test._from_munch(orig) - sot.update_attrs(**{"the-attr4": value5}) - self.assertTrue(sot.is_dirty) - self.assertEqual(value5, sot.shemp) + self.assertNotIn("body_attr", sot._body.dirty) + self.assertEqual(value, sot.attr) - def test_get_id(self): + def test__prepare_request_with_id(self): + class Test(resource.Resource): + base_path = "/something" + body_attr = resource.Body("x") + header_attr = resource.Header("y") + + the_id = "id" + body_value = "body" + header_value = "header" + sot = Test( + id=the_id, + body_attr=body_value, + header_attr=header_value, + _synchronized=False, + ) + + result = sot._prepare_request(requires_id=True) + + self.assertEqual("something/id", result.url) + self.assertEqual({"x": body_value, "id": the_id}, result.body) + self.assertEqual({"y": header_value}, result.headers) + + def test__prepare_request_with_id_marked_clean(self): class Test(resource.Resource): - pass + base_path = "/something" + body_attr = resource.Body("x") + header_attr = resource.Header("y") - ID = "an id" - res = Test({"id": ID}) + the_id = "id" + body_value = "body" + header_value = "header" + sot = Test( + id=the_id, + body_attr=body_value, + header_attr=header_value, + _synchronized=False, + ) + sot._body._dirty.discard("id") - self.assertEqual(ID, resource.Resource.get_id(ID)) - self.assertEqual(ID, resource.Resource.get_id(res)) + result = sot._prepare_request(requires_id=True) - def test_convert_ids(self): - class TestResourceFoo(resource.Resource): - pass + self.assertEqual("something/id", result.url) + self.assertEqual({"x": body_value}, result.body) + self.assertEqual({"y": header_value}, result.headers) - class TestResourceBar(resource.Resource): - pass + def test__prepare_request_missing_id(self): + sot = resource.Resource(id=None) - resfoo = TestResourceFoo({'id': 'FAKEFOO'}) - resbar = TestResourceBar({'id': 'FAKEBAR'}) + self.assertRaises( + exceptions.InvalidRequest, sot._prepare_request, requires_id=True + ) - self.assertIsNone(resource.Resource.convert_ids(None)) - attrs = { - 'key1': 'value1' - } - self.assertEqual(attrs, resource.Resource.convert_ids(attrs)) + def test__prepare_request_with_resource_key(self): + key = "key" - attrs = { - 'foo': resfoo, - 'bar': resbar, - 'other': 'whatever', - } - res = resource.Resource.convert_ids(attrs) - self.assertEqual('FAKEFOO', res['foo']) - self.assertEqual('FAKEBAR', res['bar']) - self.assertEqual('whatever', res['other']) + class Test(resource.Resource): + base_path = "/something" + resource_key = key + body_attr = resource.Body("x") + header_attr = resource.Header("y") - def test_repr(self): - fr = FakeResource() - fr._loaded = False - fr.first = "hey" - fr.second = "hi" - fr.third = "nah" - the_repr = repr(fr) - the_repr = the_repr.replace('openstack.tests.unit.test_resource.', '') - result = eval(the_repr) - self.assertEqual(fr._loaded, result._loaded) - self.assertEqual(fr.first, result.first) - self.assertEqual(fr.second, result.second) - self.assertEqual(fr.third, result.third) + body_value = "body" + header_value = "header" + sot = Test( + body_attr=body_value, header_attr=header_value, _synchronized=False + ) - def test_id_attribute(self): - faker = FakeResource(fake_data) - self.assertEqual(fake_id, faker.id) - faker.id_attribute = 'name' - self.assertEqual(fake_name, faker.id) - faker.id_attribute = 'attr1' - self.assertEqual(fake_attr1, faker.id) - faker.id_attribute = 'attr2' - self.assertEqual(fake_attr2, faker.id) - faker.id_attribute = 'id' - self.assertEqual(fake_id, faker.id) + result = sot._prepare_request(requires_id=False, prepend_key=True) - def test_name_attribute(self): - class Person_ES(resource.Resource): - name_attribute = "nombre" - nombre = resource.prop('nombre') + self.assertEqual("/something", result.url) + self.assertEqual({key: {"x": body_value}}, result.body) + self.assertEqual({"y": header_value}, result.headers) - name = "Brian" - args = {'nombre': name} + def test__prepare_request_with_override_key(self): + default_key = "key" + override_key = "other_key" - person = Person_ES(args) - self.assertEqual(name, person.nombre) - self.assertEqual(name, person.name) + class Test(resource.Resource): + base_path = "/something" + resource_key = default_key + body_attr = resource.Body("x") + header_attr = resource.Header("y") + + body_value = "body" + header_value = "header" + sot = Test( + body_attr=body_value, header_attr=header_value, _synchronized=False + ) + + result = sot._prepare_request( + requires_id=False, + prepend_key=True, + resource_request_key=override_key, + ) + + self.assertEqual("/something", result.url) + self.assertEqual({override_key: {"x": body_value}}, result.body) + self.assertEqual({"y": header_value}, result.headers) + + def test__prepare_request_with_patch(self): + class Test(resource.Resource): + commit_jsonpatch = True + base_path = "/something" + x = resource.Body("x") + y = resource.Body("y") - new_name = "Julien" - person.name = new_name - self.assertEqual(new_name, person.nombre) - self.assertEqual(new_name, person.name) + the_id = "id" + sot = Test.existing(id=the_id, x=1, y=2) + sot.x = 3 - def test_boolstr_prop(self): - faker = FakeResource(fake_data) - self.assertTrue(faker.enabled) - self.assertTrue(faker['enabled']) + result = sot._prepare_request(requires_id=True, patch=True) - faker._attrs['enabled'] = False - self.assertFalse(faker.enabled) - self.assertFalse(faker['enabled']) + self.assertEqual("something/id", result.url) + self.assertEqual( + [{'op': 'replace', 'path': '/x', 'value': 3}], result.body + ) - # should fail fast - def set_invalid(): - faker.enabled = 'INVALID' - self.assertRaises(ValueError, set_invalid) + def test__prepare_request_with_patch_not_synchronized(self): + class Test(resource.Resource): + commit_jsonpatch = True + base_path = "/something" + x = resource.Body("x") + y = resource.Body("y") + the_id = "id" + sot = Test.new(id=the_id, x=1) -class ResourceMapping(base.TestCase): + result = sot._prepare_request(requires_id=True, patch=True) - def test__getitem(self): - value = 10 + self.assertEqual("something/id", result.url) + self.assertEqual( + [{'op': 'add', 'path': '/x', 'value': 1}], result.body + ) + def test__prepare_request_with_patch_params(self): class Test(resource.Resource): - attr = resource.prop("attr") + commit_jsonpatch = True + base_path = "/something" + x = resource.Body("x") + y = resource.Body("y") + + the_id = "id" + sot = Test.existing(id=the_id, x=1, y=2) + sot.x = 3 - t = Test(attrs={"attr": value}) + params = [('foo', 'bar'), ('life', 42)] - self.assertEqual(value, t["attr"]) + result = sot._prepare_request( + requires_id=True, patch=True, params=params + ) - def test__setitem__existing_item_changed(self): + self.assertEqual("something/id?foo=bar&life=42", result.url) + self.assertEqual( + [{'op': 'replace', 'path': '/x', 'value': 3}], result.body + ) + def test__translate_response_no_body(self): class Test(resource.Resource): - pass + attr = resource.Header("attr") - t = Test() - key = "attr" - value = 1 - t[key] = value + response = FakeResponse({}, headers={"attr": "value"}) - self.assertEqual(value, t._attrs[key]) - self.assertTrue(key in t._dirty) + sot = Test() + + sot._translate_response(response, has_body=False) - def test__setitem__existing_item_unchanged(self): + self.assertEqual(dict(), sot._header.dirty) + self.assertEqual("value", sot.attr) + def test__translate_response_with_body_no_resource_key(self): class Test(resource.Resource): - pass + attr = resource.Body("attr") - key = "attr" - value = 1 - t = Test(attrs={key: value}) - t._reset_dirty() # Clear dirty list so this checks as unchanged. - t[key] = value + body = {"attr": "value"} + response = FakeResponse(body) + + sot = Test() + sot._filter_component = mock.Mock(side_effect=[body, dict()]) - self.assertEqual(value, t._attrs[key]) - self.assertTrue(key not in t._dirty) + sot._translate_response(response, has_body=True) - def test__setitem__new_item(self): + self.assertEqual("value", sot.attr) + self.assertEqual(dict(), sot._body.dirty) + self.assertEqual(dict(), sot._header.dirty) + + def test__translate_response_with_body_with_resource_key(self): + key = "key" class Test(resource.Resource): - pass + resource_key = key + attr = resource.Body("attr") - t = Test() - key = "attr" - value = 1 - t[key] = value + body = {"attr": "value"} + response = FakeResponse({key: body}) - self.assertEqual(value, t._attrs[key]) - self.assertTrue(key in t._dirty) + sot = Test() + sot._filter_component = mock.Mock(side_effect=[body, dict()]) + + sot._translate_response(response, has_body=True) - def test__delitem__(self): + self.assertEqual("value", sot.attr) + self.assertEqual(dict(), sot._body.dirty) + self.assertEqual(dict(), sot._header.dirty) + def test_cant_do_anything(self): class Test(resource.Resource): - pass + allow_create = False + allow_fetch = False + allow_commit = False + allow_delete = False + allow_head = False + allow_list = False - key = "attr" - value = 1 - t = Test(attrs={key: value}) + sot = Test() - del t[key] + # The first argument to all of these operations is the session, + # but we raise before we get to it so just pass anything in. + self.assertRaises(exceptions.MethodNotSupported, sot.create, "") + self.assertRaises(exceptions.MethodNotSupported, sot.fetch, "") + self.assertRaises(exceptions.MethodNotSupported, sot.delete, "") + self.assertRaises(exceptions.MethodNotSupported, sot.head, "") + + # list is a generator so you need to begin consuming + # it in order to exercise the failure. + the_list = sot.list("") + self.assertRaises(exceptions.MethodNotSupported, next, the_list) + + # Update checks the dirty list first before even trying to see + # if the call can be made, so fake a dirty list. + sot._body = mock.Mock() + sot._body.dirty = mock.Mock(return_value={"x": "y"}) + self.assertRaises(exceptions.MethodNotSupported, sot.commit, "") + + def test_unknown_attrs_under_props_create(self): + class Test(resource.Resource): + properties = resource.Body("properties") + _store_unknown_attrs_as_properties = True - self.assertTrue(key not in t._attrs) - self.assertTrue(key in t._dirty) + sot = Test.new( + **{ + 'dummy': 'value', + } + ) + self.assertDictEqual({'dummy': 'value'}, sot.properties) + self.assertDictEqual({'dummy': 'value'}, sot.to_dict()['properties']) + self.assertDictEqual({'dummy': 'value'}, sot['properties']) + self.assertEqual('value', sot['properties']['dummy']) + + sot = Test.new(**{'dummy': 'value', 'properties': 'a,b,c'}) + self.assertDictEqual( + {'dummy': 'value', 'properties': 'a,b,c'}, sot.properties + ) + self.assertDictEqual( + {'dummy': 'value', 'properties': 'a,b,c'}, + sot.to_dict()['properties'], + ) + + sot = Test.new(**{'properties': None}) + self.assertIsNone(sot.properties) + self.assertIsNone(sot.to_dict()['properties']) + + def test_unknown_attrs_not_stored(self): + class Test(resource.Resource): + properties = resource.Body("properties") - def test__len__(self): + sot = Test.new( + **{ + 'dummy': 'value', + } + ) + self.assertIsNone(sot.properties) + def test_unknown_attrs_not_stored1(self): class Test(resource.Resource): - pass + _store_unknown_attrs_as_properties = True - attrs = {"a": 1, "b": 2, "c": 3} - t = Test(attrs=attrs) + sot = Test.new( + **{ + 'dummy': 'value', + } + ) + self.assertRaises(KeyError, sot.__getitem__, 'properties') + + def test_unknown_attrs_under_props_set(self): + class Test(resource.Resource): + properties = resource.Body("properties") + _store_unknown_attrs_as_properties = True - self.assertEqual(len(attrs.keys()), len(t)) + sot = Test.new( + **{ + 'dummy': 'value', + } + ) - def test__iter__(self): + sot['properties'] = {'dummy': 'new_value'} + self.assertEqual('new_value', sot['properties']['dummy']) + sot.properties = {'dummy': 'new_value1'} + self.assertEqual('new_value1', sot['properties']['dummy']) + def test_unknown_attrs_prepare_request_unpacked(self): class Test(resource.Resource): - pass + properties = resource.Body("properties") + _store_unknown_attrs_as_properties = True + + # Unknown attribute given as root attribute + sot = Test.new(**{'dummy': 'value', 'properties': 'a,b,c'}) - attrs = {"a": 1, "b": 2, "c": 3} - t = Test(attrs=attrs) + request_body = sot._prepare_request(requires_id=False).body + self.assertEqual('value', request_body['dummy']) + self.assertEqual('a,b,c', request_body['properties']) - for attr in t: - self.assertEqual(attrs[attr], t[attr]) + # properties are already a dict + sot = Test.new( + **{'properties': {'properties': 'a,b,c', 'dummy': 'value'}} + ) - def _test_resource_serialization(self, session_method, resource_method): - attr_type = resource.Resource + request_body = sot._prepare_request(requires_id=False).body + self.assertEqual('value', request_body['dummy']) + self.assertEqual('a,b,c', request_body['properties']) + def test_unknown_attrs_prepare_request_no_unpack_dict(self): + # if props type is not None - ensure no unpacking is done class Test(resource.Resource): - allow_create = True - attr = resource.prop("attr", type=attr_type) + properties = resource.Body("properties", type=dict) + + sot = Test.new( + **{'properties': {'properties': 'a,b,c', 'dummy': 'value'}} + ) + + request_body = sot._prepare_request(requires_id=False).body + self.assertDictEqual( + {'dummy': 'value', 'properties': 'a,b,c'}, + request_body['properties'], + ) + + def test_unknown_attrs_prepare_request_patch_unpacked(self): + class Test(resource.Resource): + properties = resource.Body("properties") + _store_unknown_attrs_as_properties = True + commit_jsonpatch = True + + sot = Test.existing(**{'dummy': 'value', 'properties': 'a,b,c'}) + + sot._update(**{'properties': {'dummy': 'new_value'}}) + + request_body = sot._prepare_request(requires_id=False, patch=True).body + self.assertDictEqual( + {'path': '/dummy', 'value': 'new_value', 'op': 'replace'}, + request_body[0], + ) + + def test_unknown_attrs_under_props_translate_response(self): + class Test(resource.Resource): + properties = resource.Body("properties") + _store_unknown_attrs_as_properties = True + + body = {'dummy': 'value', 'properties': 'a,b,c'} + response = FakeResponse(body) - the_id = 123 sot = Test() - sot.attr = resource.Resource({"id": the_id}) - self.assertEqual(attr_type, type(sot.attr)) - - def fake_call(*args, **kwargs): - attrs = kwargs["json"] - try: - json.dumps(attrs) - except TypeError as e: - self.fail("Unable to serialize _attrs: %s" % e) - resp = mock.Mock() - resp.json = mock.Mock(return_value=attrs) - return resp - session = mock.Mock() - setattr(session, session_method, mock.Mock(side_effect=fake_call)) + sot._translate_response(response, has_body=True) - if resource_method == "create_by_id": - session.create_by_id(session, sot._attrs) - elif resource_method == "update_by_id": - session.update_by_id(session, None, sot._attrs) + self.assertDictEqual( + {'dummy': 'value', 'properties': 'a,b,c'}, sot.properties + ) - def test_create_serializes_resource_types(self): - self._test_resource_serialization("post", "create_by_id") + def test_unknown_attrs_in_body_create(self): + class Test(resource.Resource): + known_param = resource.Body("known_param") + _allow_unknown_attrs_in_body = True - def test_update_serializes_resource_types(self): - self._test_resource_serialization("patch", "update_by_id") + sot = Test.new(**{'known_param': 'v1', 'unknown_param': 'v2'}) + self.assertEqual('v1', sot.known_param) + self.assertEqual('v2', sot.unknown_param) + def test_unknown_attrs_in_body_not_stored(self): + class Test(resource.Resource): + known_param = resource.Body("known_param") + properties = resource.Body("properties") -class FakeResponse(object): - def __init__(self, response): - self.body = response + sot = Test.new(**{'known_param': 'v1', 'unknown_param': 'v2'}) + self.assertEqual('v1', sot.known_param) + self.assertNotIn('unknown_param', sot) - def json(self): - return self.body + def test_unknown_attrs_in_body_set(self): + class Test(resource.Resource): + known_param = resource.Body("known_param") + _allow_unknown_attrs_in_body = True + + sot = Test.new( + **{ + 'known_param': 'v1', + } + ) + sot['unknown_param'] = 'v2' + self.assertEqual('v1', sot.known_param) + self.assertEqual('v2', sot.unknown_param) -class TestFind(base.TestCase): - NAME = 'matrix' - ID = 'Fishburne' - PROP = 'attribute2' + def test_unknown_attrs_in_body_not_allowed_to_set(self): + class Test(resource.Resource): + known_param = resource.Body("known_param") + _allow_unknown_attrs_in_body = False + sot = Test.new( + **{ + 'known_param': 'v1', + } + ) + try: + sot['unknown_param'] = 'v2' + except KeyError: + self.assertEqual('v1', sot.known_param) + self.assertNotIn('unknown_param', sot) + return + self.fail( + "Parameter 'unknown_param' unexpectedly set through the " + "dict interface" + ) + + def test_unknown_attrs_in_body_translate_response(self): + class Test(resource.Resource): + known_param = resource.Body("known_param") + _allow_unknown_attrs_in_body = True + + body = {'known_param': 'v1', 'unknown_param': 'v2'} + response = FakeResponse(body) + + sot = Test() + sot._translate_response(response, has_body=True) + + self.assertEqual('v1', sot.known_param) + self.assertEqual('v2', sot.unknown_param) + + def test_unknown_attrs_not_in_body_translate_response(self): + class Test(resource.Resource): + known_param = resource.Body("known_param") + _allow_unknown_attrs_in_body = False + + body = {'known_param': 'v1', 'unknown_param': 'v2'} + response = FakeResponse(body) + + sot = Test() + sot._translate_response(response, has_body=True) + + self.assertEqual('v1', sot.known_param) + self.assertNotIn('unknown_param', sot) + + +class TestResourceActions(base.TestCase): def setUp(self): - super(TestFind, self).setUp() - self.mock_session = mock.Mock() - self.mock_get = mock.Mock() - self.mock_session.get = self.mock_get - self.matrix = {'id': self.ID, 'name': self.NAME, 'prop': self.PROP} - - def test_name(self): - self.mock_get.side_effect = [ - exceptions.NotFoundException(), - FakeResponse({FakeResource.resources_key: [self.matrix]}) + super().setUp() + + self.service_name = "service" + self.base_path = "base_path" + + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + resources_key = 'resources' + allow_create = True + allow_fetch = True + allow_head = True + allow_commit = True + allow_delete = True + allow_list = True + + self.test_class = Test + + self.request = mock.Mock(spec=resource._Request) + self.request.url = "uri" + self.request.body = "body" + self.request.headers = "headers" + + self.response = FakeResponse({}) + + self.sot = Test(id="id") + self.sot._prepare_request = mock.Mock(return_value=self.request) + self.sot._translate_response = mock.Mock() + + self.session = mock.Mock(spec=adapter.Adapter) + self.session.create = mock.Mock(return_value=self.response) + self.session.get = mock.Mock(return_value=self.response) + self.session.put = mock.Mock(return_value=self.response) + self.session.patch = mock.Mock(return_value=self.response) + self.session.post = mock.Mock(return_value=self.response) + self.session.delete = mock.Mock(return_value=self.response) + self.session.head = mock.Mock(return_value=self.response) + self.session.session = self.session + self.session._get_connection = mock.Mock(return_value=self.cloud) + self.session.default_microversion = None + self.session.retriable_status_codes = None + + self.endpoint_data = mock.Mock( + max_microversion='1.99', min_microversion=None + ) + self.session.get_endpoint_data.return_value = self.endpoint_data + + def _test_create( + self, + cls, + requires_id=False, + prepend_key=False, + microversion=None, + base_path=None, + params=None, + id_marked_dirty=True, + explicit_microversion=None, + resource_request_key=None, + resource_response_key=None, + ): + id = "id" if requires_id else None + sot = cls(id=id) + sot._prepare_request = mock.Mock(return_value=self.request) + sot._translate_response = mock.Mock() + + params = params or {} + kwargs = params.copy() + if explicit_microversion is not None: + kwargs['microversion'] = explicit_microversion + microversion = explicit_microversion + result = sot.create( + self.session, + prepend_key=prepend_key, + base_path=base_path, + resource_request_key=resource_request_key, + resource_response_key=resource_response_key, + **kwargs, + ) + + id_is_dirty = 'id' in sot._body._dirty + self.assertEqual(id_marked_dirty, id_is_dirty) + prepare_kwargs = {} + if resource_request_key is not None: + prepare_kwargs['resource_request_key'] = resource_request_key + + sot._prepare_request.assert_called_once_with( + requires_id=requires_id, + prepend_key=prepend_key, + base_path=base_path, + **prepare_kwargs, + ) + if requires_id: + self.session.put.assert_called_once_with( + self.request.url, + json=self.request.body, + headers=self.request.headers, + microversion=microversion, + params=params, + ) + else: + self.session.post.assert_called_once_with( + self.request.url, + json=self.request.body, + headers=self.request.headers, + microversion=microversion, + params=params, + ) + + self.assertEqual(sot.microversion, microversion) + sot._translate_response.assert_called_once_with( + self.response, + has_body=sot.has_body, + resource_response_key=resource_response_key, + ) + self.assertEqual(result, sot) + + def test_put_create(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_create = True + create_method = 'PUT' + + self._test_create(Test, requires_id=True, prepend_key=True) + + def test_put_create_exclude_id(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_create = True + create_method = 'PUT' + create_exclude_id_from_body = True + + self._test_create( + Test, requires_id=True, prepend_key=True, id_marked_dirty=False + ) + + def test_put_create_with_microversion(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_create = True + create_method = 'PUT' + _max_microversion = '1.42' + + self._test_create( + Test, requires_id=True, prepend_key=True, microversion='1.42' + ) + + def test_put_create_with_explicit_microversion(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_create = True + create_method = 'PUT' + _max_microversion = '1.99' + + self._test_create( + Test, + requires_id=True, + prepend_key=True, + explicit_microversion='1.42', + ) + + def test_put_create_with_params(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_create = True + create_method = 'PUT' + + self._test_create( + Test, requires_id=True, prepend_key=True, params={'answer': 42} + ) + + def test_post_create(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_create = True + create_method = 'POST' + + self._test_create(Test, requires_id=False, prepend_key=True) + + def test_post_create_override_request_key(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_create = True + create_method = 'POST' + resource_key = 'SomeKey' + + self._test_create( + Test, + requires_id=False, + prepend_key=True, + resource_request_key="OtherKey", + ) + + def test_post_create_override_response_key(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_create = True + create_method = 'POST' + resource_key = 'SomeKey' + + self._test_create( + Test, + requires_id=False, + prepend_key=True, + resource_response_key="OtherKey", + ) + + def test_post_create_override_key_both(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_create = True + create_method = 'POST' + resource_key = 'SomeKey' + + self._test_create( + Test, + requires_id=False, + prepend_key=True, + resource_request_key="OtherKey", + resource_response_key="SomeOtherKey", + ) + + def test_post_create_base_path(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_create = True + create_method = 'POST' + + self._test_create( + Test, requires_id=False, prepend_key=True, base_path='dummy' + ) + + def test_post_create_with_params(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_create = True + create_method = 'POST' + + self._test_create( + Test, requires_id=False, prepend_key=True, params={'answer': 42} + ) + + def test_fetch(self): + result = self.sot.fetch(self.session) + + self.sot._prepare_request.assert_called_once_with( + requires_id=True, base_path=None + ) + self.session.get.assert_called_once_with( + self.request.url, microversion=None, params={}, skip_cache=False + ) + + self.assertIsNone(self.sot.microversion) + self.sot._translate_response.assert_called_once_with( + self.response, error_message=None, resource_response_key=None + ) + self.assertEqual(result, self.sot) + + def test_fetch_with_override_key(self): + result = self.sot.fetch(self.session, resource_response_key="SomeKey") + + self.sot._prepare_request.assert_called_once_with( + requires_id=True, base_path=None + ) + self.session.get.assert_called_once_with( + self.request.url, microversion=None, params={}, skip_cache=False + ) + + self.assertIsNone(self.sot.microversion) + self.sot._translate_response.assert_called_once_with( + self.response, error_message=None, resource_response_key="SomeKey" + ) + self.assertEqual(result, self.sot) + + def test_fetch_with_params(self): + result = self.sot.fetch(self.session, fields='a,b') + + self.sot._prepare_request.assert_called_once_with( + requires_id=True, base_path=None + ) + self.session.get.assert_called_once_with( + self.request.url, + microversion=None, + params={'fields': 'a,b'}, + skip_cache=False, + ) + + self.assertIsNone(self.sot.microversion) + self.sot._translate_response.assert_called_once_with( + self.response, error_message=None, resource_response_key=None + ) + self.assertEqual(result, self.sot) + + def test_fetch_with_microversion(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_fetch = True + _max_microversion = '1.42' + + sot = Test(id='id') + sot._prepare_request = mock.Mock(return_value=self.request) + sot._translate_response = mock.Mock() + + result = sot.fetch(self.session) + + sot._prepare_request.assert_called_once_with( + requires_id=True, base_path=None + ) + self.session.get.assert_called_once_with( + self.request.url, microversion='1.42', params={}, skip_cache=False + ) + + self.assertEqual(sot.microversion, '1.42') + sot._translate_response.assert_called_once_with( + self.response, error_message=None, resource_response_key=None + ) + self.assertEqual(result, sot) + + def test_fetch_with_explicit_microversion(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_fetch = True + _max_microversion = '1.99' + + sot = Test(id='id') + sot._prepare_request = mock.Mock(return_value=self.request) + sot._translate_response = mock.Mock() + + result = sot.fetch(self.session, microversion='1.42') + + sot._prepare_request.assert_called_once_with( + requires_id=True, base_path=None + ) + self.session.get.assert_called_once_with( + self.request.url, microversion='1.42', params={}, skip_cache=False + ) + + self.assertEqual(sot.microversion, '1.42') + sot._translate_response.assert_called_once_with( + self.response, error_message=None, resource_response_key=None + ) + self.assertEqual(result, sot) + + def test_fetch_not_requires_id(self): + result = self.sot.fetch(self.session, False) + + self.sot._prepare_request.assert_called_once_with( + requires_id=False, base_path=None + ) + self.session.get.assert_called_once_with( + self.request.url, microversion=None, params={}, skip_cache=False + ) + + self.sot._translate_response.assert_called_once_with( + self.response, error_message=None, resource_response_key=None + ) + self.assertEqual(result, self.sot) + + def test_fetch_base_path(self): + result = self.sot.fetch(self.session, False, base_path='dummy') + + self.sot._prepare_request.assert_called_once_with( + requires_id=False, base_path='dummy' + ) + self.session.get.assert_called_once_with( + self.request.url, microversion=None, params={}, skip_cache=False + ) + + self.sot._translate_response.assert_called_once_with( + self.response, error_message=None, resource_response_key=None + ) + self.assertEqual(result, self.sot) + + def test_head(self): + result = self.sot.head(self.session) + + self.sot._prepare_request.assert_called_once_with(base_path=None) + self.session.head.assert_called_once_with( + self.request.url, microversion=None + ) + + self.assertIsNone(self.sot.microversion) + self.sot._translate_response.assert_called_once_with( + self.response, + has_body=False, + ) + self.assertEqual(result, self.sot) + + def test_head_base_path(self): + result = self.sot.head(self.session, base_path='dummy') + + self.sot._prepare_request.assert_called_once_with(base_path='dummy') + self.session.head.assert_called_once_with( + self.request.url, microversion=None + ) + + self.assertIsNone(self.sot.microversion) + self.sot._translate_response.assert_called_once_with( + self.response, + has_body=False, + ) + self.assertEqual(result, self.sot) + + def test_head_with_microversion(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_head = True + _max_microversion = '1.42' + + sot = Test(id='id') + sot._prepare_request = mock.Mock(return_value=self.request) + sot._translate_response = mock.Mock() + + result = sot.head(self.session) + + sot._prepare_request.assert_called_once_with(base_path=None) + self.session.head.assert_called_once_with( + self.request.url, microversion='1.42' + ) + + self.assertEqual(sot.microversion, '1.42') + sot._translate_response.assert_called_once_with( + self.response, + has_body=False, + ) + self.assertEqual(result, sot) + + def _test_commit( + self, + commit_method='PUT', + prepend_key=True, + has_body=True, + microversion=None, + commit_args=None, + expected_args=None, + base_path=None, + explicit_microversion=None, + ): + self.sot.commit_method = commit_method + + # Need to make sot look dirty so we can attempt an update + self.sot._body = mock.Mock() + self.sot._body.dirty = mock.Mock(return_value={"x": "y"}) + + commit_args = commit_args or {} + if explicit_microversion is not None: + commit_args['microversion'] = explicit_microversion + microversion = explicit_microversion + self.sot.commit( + self.session, + prepend_key=prepend_key, + has_body=has_body, + base_path=base_path, + **commit_args, + ) + + self.sot._prepare_request.assert_called_once_with( + prepend_key=prepend_key, base_path=base_path + ) + + if commit_method == 'PATCH': + self.session.patch.assert_called_once_with( + self.request.url, + json=self.request.body, + headers=self.request.headers, + microversion=microversion, + **(expected_args or {}), + ) + elif commit_method == 'POST': + self.session.post.assert_called_once_with( + self.request.url, + json=self.request.body, + headers=self.request.headers, + microversion=microversion, + **(expected_args or {}), + ) + elif commit_method == 'PUT': + self.session.put.assert_called_once_with( + self.request.url, + json=self.request.body, + headers=self.request.headers, + microversion=microversion, + **(expected_args or {}), + ) + + self.assertEqual(self.sot.microversion, microversion) + self.sot._translate_response.assert_called_once_with( + self.response, + has_body=has_body, + ) + + def test_commit_put(self): + self._test_commit(commit_method='PUT', prepend_key=True, has_body=True) + + def test_commit_patch(self): + self._test_commit( + commit_method='PATCH', prepend_key=False, has_body=False + ) + + def test_commit_base_path(self): + self._test_commit( + commit_method='PUT', + prepend_key=True, + has_body=True, + base_path='dummy', + ) + + def test_commit_patch_retry_on_conflict(self): + self._test_commit( + commit_method='PATCH', + commit_args={'retry_on_conflict': True}, + expected_args={'retriable_status_codes': {409}}, + ) + + def test_commit_put_retry_on_conflict(self): + self._test_commit( + commit_method='PUT', + commit_args={'retry_on_conflict': True}, + expected_args={'retriable_status_codes': {409}}, + ) + + def test_commit_patch_no_retry_on_conflict(self): + self.session.retriable_status_codes = {409, 503} + self._test_commit( + commit_method='PATCH', + commit_args={'retry_on_conflict': False}, + expected_args={'retriable_status_codes': {503}}, + ) + + def test_commit_put_no_retry_on_conflict(self): + self.session.retriable_status_codes = {409, 503} + self._test_commit( + commit_method='PATCH', + commit_args={'retry_on_conflict': False}, + expected_args={'retriable_status_codes': {503}}, + ) + + def test_commit_put_explicit_microversion(self): + self._test_commit( + commit_method='PUT', + prepend_key=True, + has_body=True, + explicit_microversion='1.42', + ) + + def test_commit_not_dirty(self): + self.sot._body = mock.Mock() + self.sot._body.dirty = dict() + self.sot._header = mock.Mock() + self.sot._header.dirty = dict() + + self.sot.commit(self.session) + + self.session.put.assert_not_called() + + def test_patch_with_sdk_names(self): + class Test(resource.Resource): + allow_patch = True + + id = resource.Body('id') + attr = resource.Body('attr') + nested = resource.Body('renamed') + other = resource.Body('other') + + test_patch = [ + {'path': '/attr', 'op': 'replace', 'value': 'new'}, + {'path': '/nested/dog', 'op': 'remove'}, + {'path': '/nested/cat', 'op': 'add', 'value': 'meow'}, ] + expected = [ + {'path': '/attr', 'op': 'replace', 'value': 'new'}, + {'path': '/renamed/dog', 'op': 'remove'}, + {'path': '/renamed/cat', 'op': 'add', 'value': 'meow'}, + ] + sot = Test.existing(id=1, attr=42, nested={'dog': 'bark'}) + sot.patch(self.session, test_patch) + self.session.patch.assert_called_once_with( + '/1', json=expected, headers=mock.ANY, microversion=None + ) - result = FakeResource.find(self.mock_session, self.NAME, - path_args=fake_arguments) + def test_patch_with_server_names(self): + class Test(resource.Resource): + allow_patch = True - self.assertEqual(self.NAME, result.name) - self.assertEqual(self.PROP, result.prop) + id = resource.Body('id') + attr = resource.Body('attr') + nested = resource.Body('renamed') + other = resource.Body('other') - def test_id(self): - self.mock_get.side_effect = [ - FakeResponse({FakeResource.resource_key: self.matrix}) + test_patch = [ + {'path': '/attr', 'op': 'replace', 'value': 'new'}, + {'path': '/renamed/dog', 'op': 'remove'}, + {'path': '/renamed/cat', 'op': 'add', 'value': 'meow'}, ] + sot = Test.existing(id=1, attr=42, nested={'dog': 'bark'}) + sot.patch(self.session, test_patch) + self.session.patch.assert_called_once_with( + '/1', json=test_patch, headers=mock.ANY, microversion=None + ) - result = FakeResource.find(self.mock_session, self.ID, - path_args=fake_arguments) + def test_patch_with_changed_fields(self): + class Test(resource.Resource): + allow_patch = True - self.assertEqual(self.ID, result.id) - self.assertEqual(self.PROP, result.prop) + attr = resource.Body('attr') + nested = resource.Body('renamed') + other = resource.Body('other') - path = "fakes/" + fake_parent + "/data/" + self.ID - self.mock_get.assert_any_call(path, endpoint_filter=None) + sot = Test.existing(id=1, attr=42, nested={'dog': 'bark'}) + sot.attr = 'new' + sot.patch(self.session, {'path': '/renamed/dog', 'op': 'remove'}) - def test_id_no_retrieve(self): - self.mock_get.side_effect = [ - FakeResponse({FakeResource.resources_key: [self.matrix]}) + expected = [ + {'path': '/attr', 'op': 'replace', 'value': 'new'}, + {'path': '/renamed/dog', 'op': 'remove'}, ] + self.session.patch.assert_called_once_with( + '/1', json=expected, headers=mock.ANY, microversion=None + ) - class NoRetrieveResource(FakeResource): - allow_retrieve = False + def test_delete(self): + result = self.sot.delete(self.session) - result = NoRetrieveResource.find(self.mock_session, self.ID, - path_args=fake_arguments) + self.sot._prepare_request.assert_called_once_with() + self.session.delete.assert_called_once_with( + self.request.url, headers='headers', microversion=None + ) - self.assertEqual(self.ID, result.id) - self.assertEqual(self.PROP, result.prop) + self.sot._translate_response.assert_called_once_with( + self.response, + has_body=False, + error_message=None, + ) + self.assertEqual(result, self.sot) - def test_dups(self): - dupe = self.matrix.copy() - dupe['id'] = 'different' - self.mock_get.side_effect = [ - # Raise a 404 first so we get out of the ID search and into name. - exceptions.NotFoundException(), - FakeResponse({FakeResource.resources_key: [self.matrix, dupe]}) - ] + def test_delete_with_microversion(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_delete = True + _max_microversion = '1.42' + + sot = Test(id='id') + sot._prepare_request = mock.Mock(return_value=self.request) + sot._translate_response = mock.Mock() + + result = sot.delete(self.session) + + sot._prepare_request.assert_called_once_with() + self.session.delete.assert_called_once_with( + self.request.url, headers='headers', microversion='1.42' + ) + + sot._translate_response.assert_called_once_with( + self.response, + has_body=False, + error_message=None, + ) + self.assertEqual(result, sot) + + def test_delete_with_explicit_microversion(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + allow_delete = True + _max_microversion = '1.99' - self.assertRaises(exceptions.DuplicateResource, FakeResource.find, - self.mock_session, self.NAME) + sot = Test(id='id') + sot._prepare_request = mock.Mock(return_value=self.request) + sot._translate_response = mock.Mock() - def test_id_attribute_find(self): - floater = {'ip_address': "127.0.0.1", 'prop': self.PROP} - self.mock_get.side_effect = [ - FakeResponse({FakeResource.resource_key: floater}) - ] + result = sot.delete(self.session, microversion='1.42') + + sot._prepare_request.assert_called_once_with() + self.session.delete.assert_called_once_with( + self.request.url, headers='headers', microversion='1.42' + ) + + sot._translate_response.assert_called_once_with( + self.response, has_body=False, error_message=None + ) + self.assertEqual(result, sot) + + # NOTE: As list returns a generator, testing it requires consuming + # the generator. Wrap calls to self.sot.list in a `list` + # and then test the results as a list of responses. + def test_list_empty_response(self): + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {"resources": []} + + self.session.get.return_value = mock_response + + result = list(self.sot.list(self.session)) + + self.session.get.assert_called_once_with( + self.base_path, + headers={"Accept": "application/json"}, + params={}, + microversion=None, + ) + + self.assertEqual([], result) + + def test_list_one_page_response_paginated(self): + id_value = 1 + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.return_value = {"resources": [{"id": id_value}]} - FakeResource.id_attribute = 'ip_address' - FakeResource.id_attribute = 'ip_address' - result = FakeResource.find(self.mock_session, "127.0.0.1", - path_args=fake_arguments) - self.assertEqual("127.0.0.1", result.id) - self.assertEqual(self.PROP, result.prop) + self.session.get.return_value = mock_response - FakeResource.id_attribute = 'id' + # Ensure that we break out of the loop on a paginated call + # that still only results in one page of data. + results = list(self.sot.list(self.session, paginated=True)) - p = {'ip_address': "127.0.0.1"} - path = fake_path + "?limit=2" - self.mock_get.called_once_with(path, params=p, endpoint_filter=None) + self.assertEqual(1, len(results)) - def test_nada(self): - self.mock_get.side_effect = [ - exceptions.NotFoundException(), - FakeResponse({FakeResource.resources_key: []}) + self.assertEqual(1, len(self.session.get.call_args_list)) + self.assertEqual(id_value, results[0].id) + self.assertIsInstance(results[0], self.test_class) + + def test_list_one_page_response_not_paginated(self): + id_value = 1 + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {"resources": [{"id": id_value}]} + + self.session.get.return_value = mock_response + + results = list(self.sot.list(self.session, paginated=False)) + + self.session.get.assert_called_once_with( + self.base_path, + headers={"Accept": "application/json"}, + params={}, + microversion=None, + ) + + self.assertEqual(1, len(results)) + self.assertEqual(id_value, results[0].id) + self.assertIsInstance(results[0], self.test_class) + + def test_list_one_page_response_resources_key(self): + key = "resources" + + class Test(self.test_class): + resources_key = key + + id_value = 1 + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.json.return_value = {key: [{"id": id_value}]} + mock_response.links = [] + + self.session.get.return_value = mock_response + + sot = Test() + + results = list(sot.list(self.session)) + + self.session.get.assert_called_once_with( + self.base_path, + headers={"Accept": "application/json"}, + params={}, + microversion=None, + ) + + self.assertEqual(1, len(results)) + self.assertEqual(id_value, results[0].id) + self.assertIsInstance(results[0], self.test_class) + + def test_list_response_paginated_without_links(self): + ids = [1, 2] + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.return_value = { + "resources": [{"id": ids[0]}], + "resources_links": [ + { + "href": "https://example.com/next-url", + "rel": "next", + } + ], + } + mock_response2 = mock.Mock() + mock_response2.status_code = 200 + mock_response2.links = {} + mock_response2.json.return_value = { + "resources": [{"id": ids[1]}], + } + + self.session.get.side_effect = [mock_response, mock_response2] + + results = list(self.sot.list(self.session, paginated=True)) + + self.assertEqual(2, len(results)) + self.assertEqual(ids[0], results[0].id) + self.assertEqual(ids[1], results[1].id) + self.assertEqual( + mock.call( + 'base_path', + headers={'Accept': 'application/json'}, + params={}, + microversion=None, + ), + self.session.get.mock_calls[0], + ) + self.assertEqual( + mock.call( + 'https://example.com/next-url', + headers={'Accept': 'application/json'}, + params={}, + microversion=None, + ), + self.session.get.mock_calls[1], + ) + self.assertEqual(2, len(self.session.get.call_args_list)) + self.assertIsInstance(results[0], self.test_class) + + def test_list_response_paginated_with_links(self): + ids = [1, 2] + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.side_effect = [ + { + "resources": [{"id": ids[0]}], + "resources_links": [ + { + "href": "https://example.com/next-url", + "rel": "next", + } + ], + }, + { + "resources": [{"id": ids[1]}], + }, + ] + + self.session.get.return_value = mock_response + + results = list(self.sot.list(self.session, paginated=True)) + + self.assertEqual(2, len(results)) + self.assertEqual(ids[0], results[0].id) + self.assertEqual(ids[1], results[1].id) + self.assertEqual( + mock.call( + 'base_path', + headers={'Accept': 'application/json'}, + params={}, + microversion=None, + ), + self.session.get.mock_calls[0], + ) + self.assertEqual( + mock.call( + 'https://example.com/next-url', + headers={'Accept': 'application/json'}, + params={}, + microversion=None, + ), + self.session.get.mock_calls[2], + ) + self.assertEqual(2, len(self.session.get.call_args_list)) + self.assertIsInstance(results[0], self.test_class) + + def test_list_response_paginated_with_links_and_query(self): + q_limit = 1 + ids = [1, 2] + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.side_effect = [ + { + "resources": [{"id": ids[0]}], + "resources_links": [ + { + "href": f"https://example.com/next-url?limit={q_limit}", + "rel": "next", + } + ], + }, + { + "resources": [{"id": ids[1]}], + }, + { + "resources": [], + }, + ] + + self.session.get.return_value = mock_response + + class Test(self.test_class): + _query_mapping = resource.QueryParameters("limit") + + results = list(Test.list(self.session, paginated=True, limit=q_limit)) + + self.assertEqual(2, len(results)) + self.assertEqual(ids[0], results[0].id) + self.assertEqual(ids[1], results[1].id) + self.assertEqual( + mock.call( + 'base_path', + headers={'Accept': 'application/json'}, + params={ + 'limit': q_limit, + }, + microversion=None, + ), + self.session.get.mock_calls[0], + ) + self.assertEqual( + mock.call( + 'https://example.com/next-url', + headers={'Accept': 'application/json'}, + params={ + 'limit': [str(q_limit)], + }, + microversion=None, + ), + self.session.get.mock_calls[2], + ) + + self.assertEqual(3, len(self.session.get.call_args_list)) + self.assertIsInstance(results[0], self.test_class) + + def test_list_response_paginated_with_next_field(self): + """Test pagination with a 'next' field in the response. + + Glance doesn't return a 'links' field in the response. Instead, it + returns a 'first' field and, if there are more pages, a 'next' field in + the response body. Ensure we correctly parse these. + """ + + class Test(resource.Resource): + service = self.service_name + base_path = '/foos/bars' + resources_key = 'bars' + allow_list = True + _query_mapping = resource.QueryParameters("wow") + + ids = [1, 2] + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.side_effect = [ + { + "bars": [{"id": ids[0]}], + "first": "/v2/foos/bars?wow=cool", + "next": "/v2/foos/bars?marker=baz&wow=cool", + }, + { + "bars": [{"id": ids[1]}], + "first": "/v2/foos/bars?wow=cool", + }, ] - self.assertIsNone(FakeResource.find(self.mock_session, self.NAME)) + self.session.get.return_value = mock_response + + results = list(Test.list(self.session, paginated=True, wow="cool")) + + self.assertEqual(2, len(results)) + self.assertEqual(ids[0], results[0].id) + self.assertEqual(ids[1], results[1].id) + self.assertEqual( + mock.call( + Test.base_path, + headers={'Accept': 'application/json'}, + params={'wow': 'cool'}, + microversion=None, + ), + self.session.get.mock_calls[0], + ) + self.assertEqual( + mock.call( + '/foos/bars', + headers={'Accept': 'application/json'}, + params={'wow': ['cool'], 'marker': ['baz']}, + microversion=None, + ), + self.session.get.mock_calls[2], + ) + + self.assertEqual(2, len(self.session.get.call_args_list)) + self.assertIsInstance(results[0], Test) + + def test_list_response_paginated_with_max_items(self): + """Test pagination with a 'max_items' in the response. + + The limit variable is used in two meanings. + To make it clear, we add the max_items parameter and + use this value to determine the number of resources to be returned. + """ + ids = [1, 2, 3, 4] + + def make_mock_response(): + resp = mock.Mock() + resp.status_code = 200 + resp.links = {} + resp.json.return_value = { + "resources": [ + {"id": 1}, + {"id": 2}, + {"id": 3}, + {"id": 4}, + ], + } + return resp - def test_no_name(self): - self.mock_get.side_effect = [ - exceptions.NotFoundException(), - FakeResponse({FakeResource.resources_key: [self.matrix]}) + self.session.get.side_effect = [ + make_mock_response(), + make_mock_response(), + make_mock_response(), ] - FakeResource.name_attribute = None - self.assertIsNone(FakeResource.find(self.mock_session, self.NAME)) + # Since the limit value is 3 but the max_items value is 2, two + # resources are returned. + results = self.sot.list( + self.session, limit=3, paginated=True, max_items=2 + ) + + result0 = next(results) + self.assertEqual(result0.id, ids[0]) + result1 = next(results) + self.assertEqual(result1.id, ids[1]) + self.session.get.assert_called_with( + self.base_path, + headers={"Accept": "application/json"}, + params={"limit": 3}, + microversion=None, + ) + self.assertRaises(StopIteration, next, results) + + # max_items is set and limit in unset (so limit defaults to max_items) + results = self.sot.list(self.session, paginated=True, max_items=2) + result0 = next(results) + self.assertEqual(result0.id, ids[0]) + result1 = next(results) + self.assertEqual(result1.id, ids[1]) + self.session.get.assert_called_with( + self.base_path, + headers={"Accept": "application/json"}, + params={"limit": 2}, + microversion=None, + ) + self.assertRaises(StopIteration, next, results) + + # both max_items and limit are set, and max_items is greater than limit + # (the opposite of this test: we should see multiple requests for limit + # resources each time) + results = self.sot.list( + self.session, limit=1, paginated=True, max_items=3 + ) + result0 = next(results) + self.assertEqual(result0.id, ids[0]) + result1 = next(results) + self.assertEqual(result1.id, ids[1]) + result2 = next(results) + self.assertEqual(result2.id, ids[2]) + self.session.get.assert_called_with( + self.base_path, + headers={"Accept": "application/json"}, + params={"limit": 1}, + microversion=None, + ) + self.assertRaises(StopIteration, next, results) + + def test_list_response_paginated_with_microversions(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + resources_key = 'resources' + allow_list = True + _max_microversion = '1.42' + + ids = [1, 2] + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.return_value = { + "resources": [{"id": ids[0]}], + "resources_links": [ + { + "href": "https://example.com/next-url", + "rel": "next", + } + ], + } + mock_response2 = mock.Mock() + mock_response2.status_code = 200 + mock_response2.links = {} + mock_response2.json.return_value = { + "resources": [{"id": ids[1]}], + } - def test_nada_not_ignored(self): - self.mock_get.side_effect = [ - exceptions.NotFoundException(), - FakeResponse({FakeResource.resources_key: []}) + self.session.get.side_effect = [mock_response, mock_response2] + + results = list(Test.list(self.session, paginated=True)) + + self.assertEqual(2, len(results)) + self.assertEqual(ids[0], results[0].id) + self.assertEqual(ids[1], results[1].id) + self.assertEqual( + mock.call( + 'base_path', + headers={'Accept': 'application/json'}, + params={}, + microversion='1.42', + ), + self.session.get.mock_calls[0], + ) + self.assertEqual( + mock.call( + 'https://example.com/next-url', + headers={'Accept': 'application/json'}, + params={}, + microversion='1.42', + ), + self.session.get.mock_calls[1], + ) + self.assertEqual(2, len(self.session.get.call_args_list)) + self.assertIsInstance(results[0], Test) + self.assertEqual('1.42', results[0].microversion) + + def test_list_multi_page_response_not_paginated(self): + ids = [1, 2] + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.json.side_effect = [ + {"resources": [{"id": ids[0]}]}, + {"resources": [{"id": ids[1]}]}, ] - self.assertRaises(exceptions.ResourceNotFound, FakeResource.find, - self.mock_session, self.NAME, ignore_missing=False) + self.session.get.return_value = mock_response + + results = list(self.sot.list(self.session, paginated=False)) + + self.assertEqual(1, len(results)) + self.assertEqual(ids[0], results[0].id) + self.assertIsInstance(results[0], self.test_class) + + def test_list_paginated_infinite_loop(self): + q_limit = 1 + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.side_effect = [ + { + "resources": [{"id": 1}], + }, + { + "resources": [{"id": 1}], + }, + ] + + self.session.get.return_value = mock_response + + class Test(self.test_class): + _query_mapping = resource.QueryParameters("limit") + + res = Test.list(self.session, paginated=True, limit=q_limit) + + self.assertRaises(exceptions.SDKException, list, res) + + def test_list_query_params(self): + id = 1 + qp = "query param!" + qp_name = "query-param" + uri_param = "uri param!" + + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.return_value = {"resources": [{"id": id}]} + + mock_empty = mock.Mock() + mock_empty.status_code = 200 + mock_empty.links = {} + mock_empty.json.return_value = {"resources": []} + + self.session.get.side_effect = [mock_response, mock_empty] + + class Test(self.test_class): + _query_mapping = resource.QueryParameters(query_param=qp_name) + base_path = "/%(something)s/blah" + something = resource.URI("something") + + results = list( + Test.list( + self.session, + paginated=True, + query_param=qp, + something=uri_param, + ) + ) + + self.assertEqual(1, len(results)) + # Verify URI attribute is set on the resource + self.assertEqual(results[0].something, uri_param) + + # Look at the `params` argument to each of the get calls that + # were made. + self.assertEqual( + self.session.get.call_args_list[0][1]["params"], {qp_name: qp} + ) + + self.assertEqual( + self.session.get.call_args_list[0][0][0], + Test.base_path % {"something": uri_param}, + ) + + def test_list_with_injected_headers(self): + mock_empty = mock.Mock() + mock_empty.status_code = 200 + mock_empty.json.return_value = {"resources": []} + + self.session.get.side_effect = [mock_empty] + + _ = list( + self.test_class.list(self.session, headers={'X-Test': 'value'}) + ) + + expected = {'Accept': 'application/json', 'X-Test': 'value'} + self.assertEqual( + expected, self.session.get.call_args.kwargs['headers'] + ) + + @mock.patch.object(resource.Resource, 'list') + def test_list_dns_with_headers(self, mock_resource_list): + dns.v2._base.Resource.list( + self.session, + project_id='1234', + all_projects=True, + ) + + expected = { + 'x-auth-sudo-project-id': '1234', + 'x-auth-all-projects': 'True', + } + self.assertEqual( + expected, mock_resource_list.call_args.kwargs['headers'] + ) + + def test_allow_invalid_list_params(self): + qp = "query param!" + qp_name = "query-param" + uri_param = "uri param!" + + mock_empty = mock.Mock() + mock_empty.status_code = 200 + mock_empty.links = {} + mock_empty.json.return_value = {"resources": []} + + self.session.get.side_effect = [mock_empty] + + class Test(self.test_class): + _query_mapping = resource.QueryParameters(query_param=qp_name) + base_path = "/%(something)s/blah" + something = resource.URI("something") + + list( + Test.list( + self.session, + paginated=True, + query_param=qp, + allow_unknown_params=True, + something=uri_param, + something_wrong=True, + ) + ) + self.session.get.assert_called_once_with( + f"/{uri_param}/blah", + headers={'Accept': 'application/json'}, + microversion=None, + params={qp_name: qp}, + ) + + def test_list_client_filters(self): + qp = "query param!" + uri_param = "uri param!" + + mock_empty = mock.Mock() + mock_empty.status_code = 200 + mock_empty.links = {} + mock_empty.json.return_value = { + "resources": [ + {"a": "1", "b": "1"}, + {"a": "1", "b": "2"}, + ] + } + + self.session.get.side_effect = [mock_empty] + + class Test(self.test_class): + _query_mapping = resource.QueryParameters('a') + base_path = "/%(something)s/blah" + something = resource.URI("something") + a = resource.Body("a") + b = resource.Body("b") + + res = list( + Test.list( + self.session, + paginated=True, + query_param=qp, + allow_unknown_params=True, + something=uri_param, + a='1', + b='2', + ) + ) + self.session.get.assert_called_once_with( + f"/{uri_param}/blah", + headers={'Accept': 'application/json'}, + microversion=None, + params={'a': '1'}, + ) + self.assertEqual(1, len(res)) + self.assertEqual("2", res[0].b) + + def test_values_as_list_params(self): + id = 1 + qp = "query param!" + qp_name = "query-param" + uri_param = "uri param!" + + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.return_value = {"resources": [{"id": id}]} + + mock_empty = mock.Mock() + mock_empty.status_code = 200 + mock_empty.links = {} + mock_empty.json.return_value = {"resources": []} + + self.session.get.side_effect = [mock_response, mock_empty] + + class Test(self.test_class): + _query_mapping = resource.QueryParameters(query_param=qp_name) + base_path = "/%(something)s/blah" + something = resource.URI("something") + + results = list( + Test.list( + self.session, + paginated=True, + something=uri_param, + **{qp_name: qp}, + ) + ) + + self.assertEqual(1, len(results)) + + # Look at the `params` argument to each of the get calls that + # were made. + self.assertEqual( + self.session.get.call_args_list[0][1]["params"], {qp_name: qp} + ) + + self.assertEqual( + self.session.get.call_args_list[0][0][0], + Test.base_path % {"something": uri_param}, + ) + + def test_values_as_list_params_precedence(self): + id = 1 + qp = "query param!" + qp2 = "query param!!!!!" + qp_name = "query-param" + uri_param = "uri param!" + + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.return_value = {"resources": [{"id": id}]} + + mock_empty = mock.Mock() + mock_empty.status_code = 200 + mock_empty.links = {} + mock_empty.json.return_value = {"resources": []} + + self.session.get.side_effect = [mock_response, mock_empty] + + class Test(self.test_class): + _query_mapping = resource.QueryParameters(query_param=qp_name) + base_path = "/%(something)s/blah" + something = resource.URI("something") + + results = list( + Test.list( + self.session, + paginated=True, + query_param=qp2, + something=uri_param, + **{qp_name: qp}, + ) + ) + + self.assertEqual(1, len(results)) + + # Look at the `params` argument to each of the get calls that + # were made. + self.assertEqual( + self.session.get.call_args_list[0][1]["params"], {qp_name: qp2} + ) + + self.assertEqual( + self.session.get.call_args_list[0][0][0], + Test.base_path % {"something": uri_param}, + ) + + def test_list_multi_page_response_paginated(self): + ids = [1, 2] + resp1 = mock.Mock() + resp1.status_code = 200 + resp1.links = {} + resp1.json.return_value = { + "resources": [{"id": ids[0]}], + "resources_links": [ + { + "href": "https://example.com/next-url", + "rel": "next", + } + ], + } + resp2 = mock.Mock() + resp2.status_code = 200 + resp2.links = {} + resp2.json.return_value = { + "resources": [{"id": ids[1]}], + "resources_links": [ + { + "href": "https://example.com/next-url", + "rel": "next", + } + ], + } + resp3 = mock.Mock() + resp3.status_code = 200 + resp3.links = {} + resp3.json.return_value = {"resources": []} + + self.session.get.side_effect = [resp1, resp2, resp3] + + results = self.sot.list(self.session, paginated=True) + + result0 = next(results) + self.assertEqual(result0.id, ids[0]) + self.session.get.assert_called_with( + self.base_path, + headers={"Accept": "application/json"}, + params={}, + microversion=None, + ) + + result1 = next(results) + self.assertEqual(result1.id, ids[1]) + self.session.get.assert_called_with( + 'https://example.com/next-url', + headers={"Accept": "application/json"}, + params={}, + microversion=None, + ) + + self.assertRaises(StopIteration, next, results) + self.session.get.assert_called_with( + 'https://example.com/next-url', + headers={"Accept": "application/json"}, + params={}, + microversion=None, + ) + + def test_list_multi_page_no_early_termination(self): + # This tests verifies that multipages are not early terminated. + # APIs can set max_limit to the number of items returned in each + # query. If that max_limit is smaller than the limit given by the + # user, the return value would contain less items than the limit, + # but that doesn't stand to reason that there are no more records, + # we should keep trying to get more results. + ids = [1, 2, 3, 4] + resp1 = mock.Mock() + resp1.status_code = 200 + resp1.links = {} + resp1.json.return_value = { + # API's max_limit is set to 2. + "resources": [{"id": ids[0]}, {"id": ids[1]}], + } + resp2 = mock.Mock() + resp2.status_code = 200 + resp2.links = {} + resp2.json.return_value = { + # API's max_limit is set to 2. + "resources": [{"id": ids[2]}, {"id": ids[3]}], + } + resp3 = mock.Mock() + resp3.status_code = 200 + resp3.json.return_value = { + "resources": [], + } + + self.session.get.side_effect = [resp1, resp2, resp3] + + results = self.sot.list(self.session, limit=3, paginated=True) + + # First page constains only two items, less than the limit given + result0 = next(results) + self.assertEqual(result0.id, ids[0]) + result1 = next(results) + self.assertEqual(result1.id, ids[1]) + self.session.get.assert_called_with( + self.base_path, + headers={"Accept": "application/json"}, + params={"limit": 3}, + microversion=None, + ) + + # Second page contains another two items + result2 = next(results) + self.assertEqual(result2.id, ids[2]) + result3 = next(results) + self.assertEqual(result3.id, ids[3]) + self.session.get.assert_called_with( + self.base_path, + headers={"Accept": "application/json"}, + params={"limit": 3, "marker": 2}, + microversion=None, + ) + + # Ensure we're done after those four items + self.assertRaises(StopIteration, next, results) + + # Ensure we've given the last try to get more results + self.session.get.assert_called_with( + self.base_path, + headers={"Accept": "application/json"}, + params={"limit": 3, "marker": 4}, + microversion=None, + ) + + # Ensure we made three calls to get this done + self.assertEqual(3, len(self.session.get.call_args_list)) + + def test_list_multi_page_inferred_additional(self): + # If we explicitly request a limit and we receive EXACTLY that + # amount of results and there is no next link, we make one additional + # call to check to see if there are more records and the service is + # just sad. + # NOTE(mordred) In a perfect world we would not do this. But it's 2018 + # and I don't think anyone has any illusions that we live in a perfect + # world anymore. + ids = [1, 2, 3] + resp1 = mock.Mock() + resp1.status_code = 200 + resp1.links = {} + resp1.json.return_value = { + "resources": [{"id": ids[0]}, {"id": ids[1]}], + } + resp2 = mock.Mock() + resp2.status_code = 200 + resp2.links = {} + resp2.json.return_value = {"resources": [{"id": ids[2]}]} + + self.session.get.side_effect = [resp1, resp2] + + results = self.sot.list(self.session, limit=2, paginated=True) + # Get the first page's two items + result0 = next(results) + self.assertEqual(result0.id, ids[0]) + result1 = next(results) + self.assertEqual(result1.id, ids[1]) + self.session.get.assert_called_with( + self.base_path, + headers={"Accept": "application/json"}, + params={"limit": 2}, + microversion=None, + ) + + result2 = next(results) + self.assertEqual(result2.id, ids[2]) + self.session.get.assert_called_with( + self.base_path, + headers={"Accept": "application/json"}, + params={'limit': 2, 'marker': 2}, + microversion=None, + ) + + # Ensure we're done after those three items + # In python3.7, PEP 479 is enabled for all code, and StopIteration + # raised directly from code is turned into a RuntimeError. + # Something about how mock is implemented triggers that here. + self.assertRaises((StopIteration, RuntimeError), next, results) + + # Ensure we only made two calls to get this done + self.assertEqual(3, len(self.session.get.call_args_list)) + + def test_list_multi_page_header_count(self): + class Test(self.test_class): + resources_key = None + pagination_key = 'X-Container-Object-Count' + + self.sot = Test() + + # Swift returns a total number of objects in a header and we compare + # that against the total number returned to know if we need to fetch + # more objects. + ids = [1, 2, 3] + resp1 = mock.Mock() + resp1.status_code = 200 + resp1.links = {} + resp1.headers = {'X-Container-Object-Count': 3} + resp1.json.return_value = [{"id": ids[0]}, {"id": ids[1]}] + resp2 = mock.Mock() + resp2.status_code = 200 + resp2.links = {} + resp2.headers = {'X-Container-Object-Count': 3} + resp2.json.return_value = [{"id": ids[2]}] + + self.session.get.side_effect = [resp1, resp2] + + results = self.sot.list(self.session, paginated=True) + + # Get the first page's two items + result0 = next(results) + self.assertEqual(result0.id, ids[0]) + result1 = next(results) + self.assertEqual(result1.id, ids[1]) + self.session.get.assert_called_with( + self.base_path, + headers={"Accept": "application/json"}, + params={}, + microversion=None, + ) + + result2 = next(results) + self.assertEqual(result2.id, ids[2]) + self.session.get.assert_called_with( + self.base_path, + headers={"Accept": "application/json"}, + params={'marker': 2}, + microversion=None, + ) + + # Ensure we're done after those three items + self.assertRaises(StopIteration, next, results) + + # Ensure we only made two calls to get this done + self.assertEqual(2, len(self.session.get.call_args_list)) + + def test_list_multi_page_link_header(self): + # Swift returns a total number of objects in a header and we compare + # that against the total number returned to know if we need to fetch + # more objects. + ids = [1, 2, 3] + resp1 = mock.Mock() + resp1.status_code = 200 + resp1.links = { + 'next': {'uri': 'https://example.com/next-url', 'rel': 'next'} + } + resp1.headers = {} + resp1.json.return_value = { + "resources": [{"id": ids[0]}, {"id": ids[1]}], + } + resp2 = mock.Mock() + resp2.status_code = 200 + resp2.links = {} + resp2.headers = {} + resp2.json.return_value = {"resources": [{"id": ids[2]}]} + + self.session.get.side_effect = [resp1, resp2] + results = self.sot.list(self.session, paginated=True) + + # Get the first page's two items + result0 = next(results) + self.assertEqual(result0.id, ids[0]) + result1 = next(results) + self.assertEqual(result1.id, ids[1]) + self.session.get.assert_called_with( + self.base_path, + headers={"Accept": "application/json"}, + params={}, + microversion=None, + ) + + result2 = next(results) + self.assertEqual(result2.id, ids[2]) + self.session.get.assert_called_with( + 'https://example.com/next-url', + headers={"Accept": "application/json"}, + params={}, + microversion=None, + ) + + # Ensure we're done after those three items + self.assertRaises(StopIteration, next, results) + + # Ensure we only made two calls to get this done + self.assertEqual(2, len(self.session.get.call_args_list)) + + def test_bulk_create_invalid_data_passed(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + create_method = 'POST' + allow_create = True + + Test._prepare_request = mock.Mock() + self.assertRaises(ValueError, Test.bulk_create, self.session, []) + self.assertRaises(ValueError, Test.bulk_create, self.session, None) + self.assertRaises(ValueError, Test.bulk_create, self.session, object) + self.assertRaises(ValueError, Test.bulk_create, self.session, {}) + self.assertRaises(ValueError, Test.bulk_create, self.session, "hi!") + self.assertRaises(ValueError, Test.bulk_create, self.session, ["hi!"]) + + def _test_bulk_create( + self, cls, http_method, microversion=None, base_path=None, **params + ): + req1 = mock.Mock() + req2 = mock.Mock() + req1.body = {'name': 'resource1'} + req2.body = {'name': 'resource2'} + req1.url = 'uri' + req2.url = 'uri' + req1.headers = 'headers' + req2.headers = 'headers' + + request_body = { + "tests": [ + {'name': 'resource1', 'id': 'id1'}, + {'name': 'resource2', 'id': 'id2'}, + ] + } -class TestWaitForStatus(base.TestCase): + cls._prepare_request = mock.Mock(side_effect=[req1, req2]) + mock_response = mock.Mock() + mock_response.status_code = 200 + mock_response.links = {} + mock_response.json.return_value = request_body + http_method.return_value = mock_response + + res = list( + cls.bulk_create( + self.session, + [{'name': 'resource1'}, {'name': 'resource2'}], + base_path=base_path, + **params, + ) + ) + + self.assertEqual(len(res), 2) + self.assertEqual(res[0].id, 'id1') + self.assertEqual(res[1].id, 'id2') + http_method.assert_called_once_with( + self.request.url, + json={'tests': [req1.body, req2.body]}, + headers=self.request.headers, + microversion=microversion, + params=params, + ) + + def test_bulk_create_post(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + create_method = 'POST' + allow_create = True + resources_key = 'tests' - def __init__(self, *args, **kwargs): - super(TestWaitForStatus, self).__init__(*args, **kwargs) - self.build = FakeResponse(self.body_with_status(fake_body, 'BUILD')) - self.active = FakeResponse(self.body_with_status(fake_body, 'ACTIVE')) - self.error = FakeResponse(self.body_with_status(fake_body, 'ERROR')) + self._test_bulk_create(Test, self.session.post) + + def test_bulk_create_put(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + create_method = 'PUT' + allow_create = True + resources_key = 'tests' + + self._test_bulk_create(Test, self.session.put) + + def test_bulk_create_with_params(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + create_method = 'POST' + allow_create = True + resources_key = 'tests' + + self._test_bulk_create(Test, self.session.post, answer=42) + + def test_bulk_create_with_microversion(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + create_method = 'POST' + allow_create = True + resources_key = 'tests' + _max_microversion = '1.42' + + self._test_bulk_create(Test, self.session.post, microversion='1.42') + + def test_bulk_create_with_base_path(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + create_method = 'POST' + allow_create = True + resources_key = 'tests' + + self._test_bulk_create(Test, self.session.post, base_path='dummy') + + def test_bulk_create_fail(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + create_method = 'POST' + allow_create = False + resources_key = 'tests' + + self.assertRaises( + exceptions.MethodNotSupported, + Test.bulk_create, + self.session, + [{'name': 'name'}], + ) + + def test_bulk_create_fail_on_request(self): + class Test(resource.Resource): + service = self.service_name + base_path = self.base_path + create_method = 'POST' + allow_create = True + resources_key = 'tests' + + response = FakeResponse({}, status_code=409) + response.content = ( + '{"TestError": {"message": "Failed to parse ' + 'request. Required attribute \'foo\' not ' + 'specified", "type": "HTTPBadRequest", ' + '"detail": ""}}' + ) + response.reason = 'Bad Request' + self.session.post.return_value = response + self.assertRaises( + exceptions.ConflictException, + Test.bulk_create, + self.session, + [{'name': 'name'}], + ) + + +class TestResourceFind(base.TestCase): + result = 1 + + class Base(resource.Resource): + @classmethod + def existing(cls, **kwargs): + response = mock.Mock() + response.status_code = 404 + raise exceptions.NotFoundException('Not Found', response=response) + + @classmethod + def list(cls, session, **params): + return [] + + class OneResult(Base): + @classmethod + def _get_one_match(cls, *args): + return TestResourceFind.result + + class NoResults(Base): + @classmethod + def _get_one_match(cls, *args): + return None + + class OneResultWithQueryParams(OneResult): + _query_mapping = resource.QueryParameters('name') def setUp(self): - super(TestWaitForStatus, self).setUp() - self.sess = mock.Mock() + super().setUp() + self.no_results = self.NoResults + self.one_result = self.OneResult + self.one_result_with_qparams = self.OneResultWithQueryParams - def body_with_status(self, body, status): - body_copy = copy.deepcopy(body) - body_copy[fake_resource]['status'] = status - return body_copy + def test_find_short_circuit(self): + value = 1 - def test_wait_for_status_nothing(self): - self.sess.get = mock.Mock() - sot = FakeResource.new(**fake_data) - sot.status = 'ACTIVE' + class Test(resource.Resource): + @classmethod + def existing(cls, **kwargs): + mock_match = mock.Mock() + mock_match.fetch.return_value = value + return mock_match - self.assertEqual(sot, resource.wait_for_status( - self.sess, sot, 'ACTIVE', [], 1, 2)) - self.assertEqual([], self.sess.get.call_args_list) + result = Test.find(self.cloud.compute, "name") - def test_wait_for_status(self): - self.sess.get = mock.Mock() - self.sess.get.side_effect = [self.build, self.active] - sot = FakeResource.new(**fake_data) + self.assertEqual(result, value) - self.assertEqual(sot, resource.wait_for_status( - self.sess, sot, 'ACTIVE', [], 1, 2)) + def test_no_match_raise(self): + self.assertRaises( + exceptions.NotFoundException, + self.no_results.find, + self.cloud.compute, + "name", + ignore_missing=False, + ) - def test_wait_for_status_timeout(self): - self.sess.get = mock.Mock() - self.sess.get.side_effect = [self.build, self.build] - sot = FakeResource.new(**fake_data) + def test_no_match_return(self): + self.assertIsNone( + self.no_results.find( + self.cloud.compute, "name", ignore_missing=True + ) + ) - self.assertRaises(exceptions.ResourceTimeout, resource.wait_for_status, - self.sess, sot, 'ACTIVE', ['ERROR'], 1, 2) + def test_find_result_name_not_in_query_parameters(self): + with ( + mock.patch.object( + self.one_result, + 'existing', + side_effect=self.OneResult.existing, + ) as mock_existing, + mock.patch.object( + self.one_result, 'list', side_effect=self.OneResult.list + ) as mock_list, + ): + self.assertEqual( + self.result, self.one_result.find(self.cloud.compute, "name") + ) + mock_existing.assert_called_once_with( + id='name', connection=mock.ANY + ) + mock_list.assert_called_once_with(mock.ANY) - def test_wait_for_status_failures(self): - self.sess.get = mock.Mock() - self.sess.get.side_effect = [self.build, self.error] - sot = FakeResource.new(**fake_data) + def test_find_result_name_in_query_parameters(self): + self.assertEqual( + self.result, + self.one_result_with_qparams.find(self.cloud.compute, "name"), + ) - self.assertRaises(exceptions.ResourceFailure, resource.wait_for_status, - self.sess, sot, 'ACTIVE', ['ERROR'], 1, 2) + def test_match_empty_results(self): + self.assertIsNone(resource.Resource._get_one_match("name", [])) - def test_wait_for_status_no_status(self): - class FakeResourceNoStatus(resource.Resource): - allow_retrieve = True + def test_no_match_by_name(self): + the_name = "Brian" - sot = FakeResourceNoStatus.new(id=123) + match = mock.Mock(spec=resource.Resource) + match.name = the_name - self.assertRaises(AttributeError, resource.wait_for_status, - self.sess, sot, 'ACTIVE', ['ERROR'], 1, 2) + result = resource.Resource._get_one_match("Richard", [match]) + self.assertIsNone(result, match) -class TestWaitForDelete(base.TestCase): + def test_single_match_by_name(self): + the_name = "Brian" - def test_wait_for_delete(self): - sess = mock.Mock() - sot = FakeResource.new(**fake_data) - sot.get = mock.Mock() - sot.get.side_effect = [ - sot, - exceptions.NotFoundException()] + match = mock.Mock(spec=resource.Resource) + match.name = the_name - self.assertEqual(sot, resource.wait_for_delete(sess, sot, 1, 2)) + result = resource.Resource._get_one_match(the_name, [match]) - def test_wait_for_delete_fail(self): - sess = mock.Mock() - sot = FakeResource.new(**fake_data) - sot.get = mock.Mock(return_value=sot) + self.assertIs(result, match) + + def test_single_match_by_id(self): + the_id = "Brian" + + match = mock.Mock(spec=resource.Resource) + match.id = the_id + + result = resource.Resource._get_one_match(the_id, [match]) + + self.assertIs(result, match) + + def test_single_match_by_alternate_id(self): + the_id = "Richard" + + class Test(resource.Resource): + other_id = resource.Body("other_id", alternate_id=True) + + match = Test(other_id=the_id) + result = Test._get_one_match(the_id, [match]) + + self.assertIs(result, match) + + def test_multiple_matches(self): + the_id = "Brian" + + match = mock.Mock(spec=resource.Resource) + match.id = the_id + + self.assertRaises( + exceptions.DuplicateResource, + resource.Resource._get_one_match, + the_id, + [match, match], + ) + + def test_list_no_base_path(self): + with mock.patch.object(self.Base, "list") as list_mock: + self.Base.find(self.cloud.compute, "name") + + list_mock.assert_called_with(self.cloud.compute) + + def test_list_base_path(self): + with mock.patch.object(self.Base, "list") as list_mock: + self.Base.find( + self.cloud.compute, "name", list_base_path='/dummy/list' + ) + + list_mock.assert_called_with( + self.cloud.compute, base_path='/dummy/list' + ) + + +class TestWait(base.TestCase): + def setUp(self): + super().setUp() + + handler = logging.StreamHandler(self._log_stream) + formatter = logging.Formatter('%(asctime)s %(name)-32s %(message)s') + handler.setFormatter(formatter) + + logger = logging.getLogger('openstack.iterate_timeout') + logger.setLevel(logging.DEBUG) + logger.addHandler(handler) + + @staticmethod + def _fake_resource(statuses=None, progresses=None, *, attribute='status'): + if statuses is None: + statuses = ['building', 'building', 'building', 'active'] + + def fetch(*args, **kwargs): + # when we get to the last status, keep returning that + if statuses: + setattr(fake_resource, attribute, statuses.pop(0)) + + if progresses: + fake_resource.progress = progresses.pop(0) + + return fake_resource + + spec = ['id', attribute, 'fetch'] + if progresses: + spec.append('progress') + + fake_resource = mock.Mock(spec=spec) + setattr(fake_resource, attribute, statuses.pop(0)) + fake_resource.fetch.side_effect = fetch + + return fake_resource + + +class TestWaitForStatus(TestWait): + def test_immediate_status(self): + status = "loling" + res = mock.Mock(spec=['id', 'status']) + res.status = status + + result = resource.wait_for_status( + self.cloud.compute, + res, + status, + None, + interval=1, + wait=1, + ) + + self.assertEqual(res, result) + + def test_immediate_status_case(self): + status = "LOLing" + res = mock.Mock(spec=['id', 'status']) + res.status = status + + result = resource.wait_for_status( + self.cloud.compute, + res, + 'lOling', + None, + interval=1, + wait=1, + ) + + self.assertEqual(res, result) + + def test_immediate_status_different_attribute(self): + status = "loling" + res = mock.Mock(spec=['id', 'mood']) + res.mood = status + + result = resource.wait_for_status( + self.cloud.compute, + res, + status, + None, + interval=1, + wait=1, + attribute='mood', + ) + + self.assertEqual(res, result) + + def test_status_match(self): + status = "loling" + + # other gets past the first check, two anothers gets through + # the sleep loop, and the third matches + statuses = ["first", "other", "another", "another", status] + res = self._fake_resource(statuses) + + result = resource.wait_for_status( + mock.Mock(), + res, + status, + None, + interval=1, + wait=5, + ) + + self.assertEqual(result, res) + + def test_status_match_with_none(self): + status = "loling" + + # apparently, None is a correct state in some cases + statuses = [None, "other", None, "another", status] + res = self._fake_resource(statuses) + + result = resource.wait_for_status( + mock.Mock(), + res, + status, + None, + interval=1, + wait=5, + ) + + self.assertEqual(result, res) + + def test_status_match_none(self): + status = None + + # apparently, None can be expected status in some cases + statuses = ["first", "other", "another", "another", status] + res = self._fake_resource(statuses) + + result = resource.wait_for_status( + mock.Mock(), + res, + status, + None, + interval=1, + wait=5, + ) + + self.assertEqual(result, res) + + def test_status_match_different_attribute(self): + status = "loling" + + statuses = ["first", "other", "another", "another", status] + res = self._fake_resource(statuses, attribute='mood') + + result = resource.wait_for_status( + mock.Mock(), + res, + status, + None, + interval=1, + wait=5, + attribute='mood', + ) + + self.assertEqual(result, res) + + def test_status_fails(self): + failure = "crying" + + statuses = ["success", "other", failure] + res = self._fake_resource(statuses) + + self.assertRaises( + exceptions.ResourceFailure, + resource.wait_for_status, + mock.Mock(), + res, + "loling", + [failure], + interval=1, + wait=5, + ) + + def test_status_fails_different_attribute(self): + failure = "crying" + + statuses = ["success", "other", failure] + res = self._fake_resource(statuses, attribute='mood') + + self.assertRaises( + exceptions.ResourceFailure, + resource.wait_for_status, + mock.Mock(), + res, + "loling", + [failure.upper()], + interval=1, + wait=5, + attribute='mood', + ) + + def test_timeout(self): + status = "loling" + + # The first "other" gets past the first check, and then three + # pairs of "other" statuses run through the sleep counter loop, + # after which time should be up. This is because we have a + # one second interval and three second waiting period. + statuses = ["other"] * 7 + res = self._fake_resource(statuses) + + self.assertRaises( + exceptions.ResourceTimeout, + resource.wait_for_status, + self.cloud.compute, + res, + status, + None, + 0.01, + 0.1, + ) + + def test_no_sleep(self): + statuses = ["other"] + res = self._fake_resource(statuses) + + self.assertRaises( + exceptions.ResourceTimeout, + resource.wait_for_status, + self.cloud.compute, + res, + "status", + None, + interval=0, + wait=-1, + ) + + def test_callback(self): + """Callback is called with 'progress' attribute.""" + statuses = ['building', 'building', 'building', 'building', 'active'] + progresses = [0, 25, 50, 100] + res = self._fake_resource(statuses=statuses, progresses=progresses) + + callback = mock.Mock() + + result = resource.wait_for_status( + mock.Mock(), + res, + 'active', + None, + interval=0.1, + wait=1, + callback=callback, + ) + + self.assertEqual(result, res) + callback.assert_has_calls([mock.call(x) for x in progresses]) + + def test_callback_without_progress(self): + """Callback is called with 0 if 'progress' attribute is missing.""" + statuses = ['building', 'building', 'building', 'building', 'active'] + res = self._fake_resource(statuses=statuses) + + callback = mock.Mock() + + result = resource.wait_for_status( + mock.Mock(), + res, + 'active', + None, + interval=0.1, + wait=1, + callback=callback, + ) + + self.assertEqual(result, res) + # there are 5 statuses but only 3 callback calls since the initial + # status and final status don't result in calls + callback.assert_has_calls([mock.call(0)] * 3) + + +class TestWaitForDelete(TestWait): + def test_success_not_found(self): + response = mock.Mock() + response.headers = {} + response.status_code = 404 + res = mock.Mock() + res.fetch.side_effect = [ + res, + res, + exceptions.NotFoundException('Not Found', response), + ] - self.assertRaises(exceptions.ResourceTimeout, resource.wait_for_delete, - sess, sot, 1, 2) + result = resource.wait_for_delete(self.cloud.compute, res, 1, 3) + + self.assertEqual(result, res) + + def test_status(self): + """Successful deletion indicated by status.""" + statuses = ['active', 'deleting', 'deleting', 'deleting', 'deleted'] + res = self._fake_resource(statuses=statuses) + + result = resource.wait_for_delete( + mock.Mock(), + res, + interval=0.1, + wait=1, + ) + + self.assertEqual(result, res) + + def test_callback(self): + """Callback is called with 'progress' attribute.""" + statuses = ['active', 'deleting', 'deleting', 'deleting', 'deleted'] + progresses = [0, 25, 50, 100] + res = self._fake_resource(statuses=statuses, progresses=progresses) + + callback = mock.Mock() + + result = resource.wait_for_delete( + mock.Mock(), + res, + interval=1, + wait=5, + callback=callback, + ) + + self.assertEqual(result, res) + callback.assert_has_calls([mock.call(x) for x in progresses]) + + def test_callback_without_progress(self): + """Callback is called with 0 if 'progress' attribute is missing.""" + statuses = ['active', 'deleting', 'deleting', 'deleting', 'deleted'] + res = self._fake_resource(statuses=statuses) + + callback = mock.Mock() + + result = resource.wait_for_delete( + mock.Mock(), + res, + interval=1, + wait=5, + callback=callback, + ) + + self.assertEqual(result, res) + # there are 5 statuses but only 3 callback calls since the initial + # status and final status don't result in calls + callback.assert_has_calls([mock.call(0)] * 3) + + def test_timeout(self): + res = mock.Mock() + res.status = 'ACTIVE' + res.fetch.return_value = res + + self.assertRaises( + exceptions.ResourceTimeout, + resource.wait_for_delete, + self.cloud.compute, + res, + 0.1, + 0.3, + ) + + +@mock.patch.object(resource.Resource, '_get_microversion', autospec=True) +class TestAssertMicroversionFor(base.TestCase): + session = mock.Mock() + res = resource.Resource() + + def test_compatible(self, mock_get_ver): + mock_get_ver.return_value = '1.42' + + self.assertEqual( + '1.42', + self.res._assert_microversion_for(self.session, '1.6'), + ) + mock_get_ver.assert_called_once_with(self.session) + + def test_incompatible(self, mock_get_ver): + mock_get_ver.return_value = '1.1' + + self.assertRaisesRegex( + exceptions.NotSupported, + '1.6 is required, but 1.1 will be used', + self.res._assert_microversion_for, + self.session, + '1.6', + ) + mock_get_ver.assert_called_once_with(self.session) + + def test_custom_message(self, mock_get_ver): + mock_get_ver.return_value = '1.1' + + self.assertRaisesRegex( + exceptions.NotSupported, + 'boom.*1.6 is required, but 1.1 will be used', + self.res._assert_microversion_for, + self.session, + '1.6', + error_message='boom', + ) + mock_get_ver.assert_called_once_with(self.session) + + def test_none(self, mock_get_ver): + mock_get_ver.return_value = None + + self.assertRaisesRegex( + exceptions.NotSupported, + '1.6 is required, but the default version', + self.res._assert_microversion_for, + self.session, + '1.6', + ) + mock_get_ver.assert_called_once_with(self.session) diff --git a/openstack/tests/unit/test_resource2.py b/openstack/tests/unit/test_resource2.py deleted file mode 100644 index c4055f5316..0000000000 --- a/openstack/tests/unit/test_resource2.py +++ /dev/null @@ -1,1507 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import itertools - -import mock -import six - -from openstack import exceptions -from openstack import format -from openstack import resource2 -from openstack import session -from openstack.tests.unit import base - - -class TestComponent(base.TestCase): - - class ExampleComponent(resource2._BaseComponent): - key = "_example" - - # Since we're testing ExampleComponent, which is as isolated as we - # can test _BaseComponent due to it's needing to be a data member - # of a class that has an attribute on the parent class named `key`, - # each test has to implement a class with a name that is the same - # as ExampleComponent.key, which should be a dict containing the - # keys and values to test against. - - def test_implementations(self): - self.assertEqual("_body", resource2.Body.key) - self.assertEqual("_header", resource2.Header.key) - self.assertEqual("_uri", resource2.URI.key) - - def test_creation(self): - sot = resource2._BaseComponent("name", type=int, default=1, - alternate_id=True) - - self.assertEqual("name", sot.name) - self.assertEqual(int, sot.type) - self.assertEqual(1, sot.default) - self.assertTrue(sot.alternate_id) - - def test_get_no_instance(self): - sot = resource2._BaseComponent("test") - - # Test that we short-circuit everything when given no instance. - result = sot.__get__(None, None) - self.assertIsNone(result) - - # NOTE: Some tests will use a default=1 setting when testing result - # values that should be None because the default-for-default is also None. - def test_get_name_None(self): - name = "name" - - class Parent(object): - _example = {name: None} - - instance = Parent() - sot = TestComponent.ExampleComponent(name, default=1) - - # Test that we short-circuit any typing of a None value. - result = sot.__get__(instance, None) - self.assertIsNone(result) - - def test_get_default(self): - expected_result = 123 - - class Parent(object): - _example = {} - - instance = Parent() - # NOTE: type=dict but the default value is an int. If we didn't - # short-circuit the typing part of __get__ it would fail. - sot = TestComponent.ExampleComponent("name", type=dict, - default=expected_result) - - # Test that we directly return any default value. - result = sot.__get__(instance, None) - self.assertEqual(expected_result, result) - - def test_get_name_untyped(self): - name = "name" - expected_result = 123 - - class Parent(object): - _example = {name: expected_result} - - instance = Parent() - sot = TestComponent.ExampleComponent("name") - - # Test that we return any the value as it is set. - result = sot.__get__(instance, None) - self.assertEqual(expected_result, result) - - # The code path for typing after a raw value has been found is the same. - def test_get_name_typed(self): - name = "name" - value = "123" - - class Parent(object): - _example = {name: value} - - instance = Parent() - sot = TestComponent.ExampleComponent("name", type=int) - - # Test that we run the underlying value through type conversion. - result = sot.__get__(instance, None) - self.assertEqual(int(value), result) - - def test_get_name_formatter(self): - name = "name" - value = "123" - expected_result = "one hundred twenty three" - - class Parent(object): - _example = {name: value} - - class FakeFormatter(object): - @classmethod - def deserialize(cls, value): - return expected_result - - instance = Parent() - sot = TestComponent.ExampleComponent("name", type=FakeFormatter) - - # Mock out issubclass rather than having an actual format.Formatter - # This can't be mocked via decorator, isolate it to wrapping the call. - mock_issubclass = mock.Mock(return_value=True) - module = six.moves.builtins.__name__ - with mock.patch("%s.issubclass" % module, mock_issubclass): - result = sot.__get__(instance, None) - self.assertEqual(expected_result, result) - - def test_set_name_untyped(self): - name = "name" - expected_value = "123" - - class Parent(object): - _example = {} - - instance = Parent() - sot = TestComponent.ExampleComponent("name") - - # Test that we don't run the value through type conversion. - sot.__set__(instance, expected_value) - self.assertEqual(expected_value, instance._example[name]) - - def test_set_name_typed(self): - expected_value = "123" - - class Parent(object): - _example = {} - - instance = Parent() - - # The type we give to ExampleComponent has to be an actual type, - # not an instance, so we can't get the niceties of a mock.Mock - # instance that would allow us to call `assert_called_once_with` to - # ensure that we're sending the value through the type. - # Instead, we use this tiny version of a similar thing. - class FakeType(object): - calls = [] - - def __init__(self, arg): - FakeType.calls.append(arg) - - sot = TestComponent.ExampleComponent("name", type=FakeType) - - # Test that we run the value through type conversion. - sot.__set__(instance, expected_value) - self.assertEqual([expected_value], FakeType.calls) - - def test_set_name_formatter(self): - expected_value = "123" - - class Parent(object): - _example = {} - - instance = Parent() - - # As with test_set_name_typed, create a pseudo-Mock to track what - # gets called on the type. - class FakeFormatter(format.Formatter): - calls = [] - - @classmethod - def serialize(cls, arg): - FakeFormatter.calls.append(arg) - - sot = TestComponent.ExampleComponent("name", type=FakeFormatter) - - # Test that we run the value through type conversion. - sot.__set__(instance, expected_value) - self.assertEqual([expected_value], FakeFormatter.calls) - - def test_delete_name(self): - name = "name" - expected_value = "123" - - class Parent(object): - _example = {name: expected_value} - - instance = Parent() - - sot = TestComponent.ExampleComponent("name") - - sot.__delete__(instance) - - self.assertNotIn(name, instance._example) - - def test_delete_name_doesnt_exist(self): - name = "name" - expected_value = "123" - - class Parent(object): - _example = {"what": expected_value} - - instance = Parent() - - sot = TestComponent.ExampleComponent(name) - - sot.__delete__(instance) - - self.assertNotIn(name, instance._example) - - -class TestComponentManager(base.TestCase): - - def test_create_basic(self): - sot = resource2._ComponentManager() - self.assertEqual(dict(), sot.attributes) - self.assertEqual(set(), sot._dirty) - - def test_create_unsynced(self): - attrs = {"hey": 1, "hi": 2, "hello": 3} - sync = False - - sot = resource2._ComponentManager(attributes=attrs, synchronized=sync) - self.assertEqual(attrs, sot.attributes) - self.assertEqual(set(attrs.keys()), sot._dirty) - - def test_create_synced(self): - attrs = {"hey": 1, "hi": 2, "hello": 3} - sync = True - - sot = resource2._ComponentManager(attributes=attrs, synchronized=sync) - self.assertEqual(attrs, sot.attributes) - self.assertEqual(set(), sot._dirty) - - def test_getitem(self): - key = "key" - value = "value" - attrs = {key: value} - - sot = resource2._ComponentManager(attributes=attrs) - self.assertEqual(value, sot.__getitem__(key)) - - def test_setitem_new(self): - key = "key" - value = "value" - - sot = resource2._ComponentManager() - sot.__setitem__(key, value) - - self.assertIn(key, sot.attributes) - self.assertIn(key, sot.dirty) - - def test_setitem_unchanged(self): - key = "key" - value = "value" - attrs = {key: value} - - sot = resource2._ComponentManager(attributes=attrs, synchronized=True) - # This shouldn't end up in the dirty list since we're just re-setting. - sot.__setitem__(key, value) - - self.assertEqual(value, sot.attributes[key]) - self.assertNotIn(key, sot.dirty) - - def test_delitem(self): - key = "key" - value = "value" - attrs = {key: value} - - sot = resource2._ComponentManager(attributes=attrs, synchronized=True) - sot.__delitem__(key) - - self.assertIsNone(sot.dirty[key]) - - def test_iter(self): - attrs = {"key": "value"} - sot = resource2._ComponentManager(attributes=attrs) - self.assertItemsEqual(iter(attrs), sot.__iter__()) - - def test_len(self): - attrs = {"key": "value"} - sot = resource2._ComponentManager(attributes=attrs) - self.assertEqual(len(attrs), sot.__len__()) - - def test_dirty(self): - key = "key" - key2 = "key2" - value = "value" - attrs = {key: value} - sot = resource2._ComponentManager(attributes=attrs, synchronized=False) - self.assertEqual({key: value}, sot.dirty) - - sot.__setitem__(key2, value) - self.assertEqual({key: value, key2: value}, sot.dirty) - - def test_clean(self): - key = "key" - value = "value" - attrs = {key: value} - sot = resource2._ComponentManager(attributes=attrs, synchronized=False) - self.assertEqual(attrs, sot.dirty) - - sot.clean() - - self.assertEqual(dict(), sot.dirty) - - -class Test_Request(base.TestCase): - - def test_create(self): - uri = 1 - body = 2 - headers = 3 - - sot = resource2._Request(uri, body, headers) - - self.assertEqual(uri, sot.uri) - self.assertEqual(body, sot.body) - self.assertEqual(headers, sot.headers) - - -class TestQueryParameters(base.TestCase): - - def test_create(self): - location = "location" - mapping = {"first_name": "first-name"} - - sot = resource2.QueryParameters(location, **mapping) - - self.assertEqual({"location": "location", - "first_name": "first-name", - "limit": "limit", - "marker": "marker"}, - sot._mapping) - - def test_transpose_unmapped(self): - location = "location" - mapping = {"first_name": "first-name"} - - sot = resource2.QueryParameters(location, **mapping) - result = sot._transpose({"location": "Brooklyn", - "first_name": "Brian", - "last_name": "Curtin"}) - - # last_name isn't mapped and shouldn't be included - self.assertEqual({"location": "Brooklyn", "first-name": "Brian"}, - result) - - def test_transpose_not_in_query(self): - location = "location" - mapping = {"first_name": "first-name"} - - sot = resource2.QueryParameters(location, **mapping) - result = sot._transpose({"location": "Brooklyn"}) - - # first_name not being in the query shouldn't affect results - self.assertEqual({"location": "Brooklyn"}, - result) - - -class TestResource(base.TestCase): - - def test_initialize_basic(self): - body = {"body": 1} - header = {"header": 2, "Location": "somewhere"} - uri = {"uri": 3} - everything = dict(itertools.chain(body.items(), header.items(), - uri.items())) - - mock_collect = mock.Mock() - mock_collect.return_value = body, header, uri - - with mock.patch.object(resource2.Resource, - "_collect_attrs", mock_collect): - sot = resource2.Resource(_synchronized=False, **everything) - mock_collect.assert_called_once_with(everything) - self.assertEqual("somewhere", sot.location) - - self.assertIsInstance(sot._body, resource2._ComponentManager) - self.assertEqual(body, sot._body.dirty) - self.assertIsInstance(sot._header, resource2._ComponentManager) - self.assertEqual(header, sot._header.dirty) - self.assertIsInstance(sot._uri, resource2._ComponentManager) - self.assertEqual(uri, sot._uri.dirty) - - self.assertFalse(sot.allow_create) - self.assertFalse(sot.allow_get) - self.assertFalse(sot.allow_update) - self.assertFalse(sot.allow_delete) - self.assertFalse(sot.allow_list) - self.assertFalse(sot.allow_head) - self.assertFalse(sot.patch_update) - self.assertFalse(sot.put_create) - - def test_repr(self): - a = {"a": 1} - b = {"b": 2} - c = {"c": 3} - - class Test(resource2.Resource): - def __init__(self): - self._body = mock.Mock() - self._body.attributes.items = mock.Mock( - return_value=a.items()) - - self._header = mock.Mock() - self._header.attributes.items = mock.Mock( - return_value=b.items()) - - self._uri = mock.Mock() - self._uri.attributes.items = mock.Mock( - return_value=c.items()) - - the_repr = repr(Test()) - - # Don't test the arguments all together since the dictionary order - # they're rendered in can't be depended on, nor does it matter. - self.assertIn("openstack.tests.unit.test_resource2.Test", the_repr) - self.assertIn("a=1", the_repr) - self.assertIn("b=2", the_repr) - self.assertIn("c=3", the_repr) - - def test_equality(self): - class Example(resource2.Resource): - x = resource2.Body("x") - y = resource2.Header("y") - z = resource2.URI("z") - - e1 = Example(x=1, y=2, z=3) - e2 = Example(x=1, y=2, z=3) - e3 = Example(x=0, y=0, z=0) - - self.assertEqual(e1, e2) - self.assertNotEqual(e1, e3) - - def test__update(self): - sot = resource2.Resource() - - body = "body" - header = "header" - uri = "uri" - - sot._collect_attrs = mock.Mock(return_value=(body, header, uri)) - sot._body.update = mock.Mock() - sot._header.update = mock.Mock() - sot._uri.update = mock.Mock() - - args = {"arg": 1} - sot._update(**args) - - sot._collect_attrs.assert_called_once_with(args) - sot._body.update.assert_called_once_with(body) - sot._header.update.assert_called_once_with(header) - sot._uri.update.assert_called_once_with(uri) - - def test__collect_attrs(self): - sot = resource2.Resource() - - expected_attrs = ["body", "header", "uri"] - - sot._consume_attrs = mock.Mock() - sot._consume_attrs.side_effect = expected_attrs - - # It'll get passed an empty dict at the least. - actual_attrs = sot._collect_attrs(dict()) - - self.assertItemsEqual(expected_attrs, actual_attrs) - - def test__consume_attrs(self): - serverside_key1 = "someKey1" - clientside_key1 = "some_key1" - serverside_key2 = "someKey2" - clientside_key2 = "some_key2" - value1 = "value1" - value2 = "value2" - mapping = {clientside_key1: serverside_key1, - clientside_key2: serverside_key2} - - other_key = "otherKey" - other_value = "other" - attrs = {clientside_key1: value1, - serverside_key2: value2, - other_key: other_value} - - sot = resource2.Resource() - - result = sot._consume_attrs(mapping, attrs) - - # Make sure that the expected key was consumed and we're only - # left with the other stuff. - self.assertDictEqual({other_key: other_value}, attrs) - - # Make sure that after we've popped our relevant client-side - # key off that we are returning it keyed off of its server-side - # name. - self.assertDictEqual({serverside_key1: value1, - serverside_key2: value2}, result) - - def test__mapping_defaults(self): - # Check that even on an empty class, we get the expected - # built-in attributes. - - self.assertIn("location", resource2.Resource._header_mapping()) - self.assertIn("name", resource2.Resource._body_mapping()) - self.assertIn("id", resource2.Resource._body_mapping()) - - def test__mapping_overrides(self): - # Iterating through the MRO used to wipe out overrides of mappings - # found in base classes. - new_name = "MyName" - new_id = "MyID" - - class Test(resource2.Resource): - name = resource2.Body(new_name) - id = resource2.Body(new_id) - - mapping = Test._body_mapping() - - self.assertEqual(new_name, mapping["name"]) - self.assertEqual(new_id, mapping["id"]) - - def test__body_mapping(self): - class Test(resource2.Resource): - x = resource2.Body("x") - y = resource2.Body("y") - z = resource2.Body("z") - - self.assertIn("x", Test._body_mapping()) - self.assertIn("y", Test._body_mapping()) - self.assertIn("z", Test._body_mapping()) - - def test__header_mapping(self): - class Test(resource2.Resource): - x = resource2.Header("x") - y = resource2.Header("y") - z = resource2.Header("z") - - self.assertIn("x", Test._header_mapping()) - self.assertIn("y", Test._header_mapping()) - self.assertIn("z", Test._header_mapping()) - - def test__uri_mapping(self): - class Test(resource2.Resource): - x = resource2.URI("x") - y = resource2.URI("y") - z = resource2.URI("z") - - self.assertIn("x", Test._uri_mapping()) - self.assertIn("y", Test._uri_mapping()) - self.assertIn("z", Test._uri_mapping()) - - def test__getattribute__id_in_body(self): - id = "lol" - sot = resource2.Resource(id=id) - - result = getattr(sot, "id") - self.assertEqual(result, id) - - def test__getattribute__id_with_alternate(self): - id = "lol" - - class Test(resource2.Resource): - blah = resource2.Body("blah", alternate_id=True) - - sot = Test(blah=id) - - result = getattr(sot, "id") - self.assertEqual(result, id) - - def test__getattribute__id_without_alternate(self): - class Test(resource2.Resource): - id = None - - sot = Test() - self.assertIsNone(sot.id) - - def test__alternate_id_None(self): - self.assertEqual("", resource2.Resource._alternate_id()) - - def test__alternate_id(self): - class Test(resource2.Resource): - alt = resource2.Body("the_alt", alternate_id=True) - - self.assertTrue("the_alt", Test._alternate_id()) - - value1 = "lol" - sot = Test(alt=value1) - self.assertEqual(sot.alt, value1) - self.assertEqual(sot.id, value1) - - value2 = "rofl" - sot = Test(the_alt=value2) - self.assertEqual(sot.alt, value2) - self.assertEqual(sot.id, value2) - - def test__get_id_instance(self): - class Test(resource2.Resource): - id = resource2.Body("id") - - value = "id" - sot = Test(id=value) - - self.assertEqual(value, sot._get_id(sot)) - - def test__get_id_instance_alternate(self): - class Test(resource2.Resource): - attr = resource2.Body("attr", alternate_id=True) - - value = "id" - sot = Test(attr=value) - - self.assertEqual(value, sot._get_id(sot)) - - def test__get_id_value(self): - value = "id" - self.assertEqual(value, resource2.Resource._get_id(value)) - - def test_to_dict(self): - - class Test(resource2.Resource): - foo = resource2.Header('foo') - bar = resource2.Body('bar') - - res = Test(id='FAKE_ID') - - expected = { - 'id': 'FAKE_ID', - 'name': None, - 'location': None, - 'foo': None, - 'bar': None - } - self.assertEqual(expected, res.to_dict()) - - def test_to_dict_no_body(self): - - class Test(resource2.Resource): - foo = resource2.Header('foo') - bar = resource2.Body('bar') - - res = Test(id='FAKE_ID') - - expected = { - 'location': None, - 'foo': None, - } - self.assertEqual(expected, res.to_dict(body=False)) - - def test_to_dict_no_header(self): - - class Test(resource2.Resource): - foo = resource2.Header('foo') - bar = resource2.Body('bar') - - res = Test(id='FAKE_ID') - - expected = { - 'id': 'FAKE_ID', - 'name': None, - 'bar': None - } - self.assertEqual(expected, res.to_dict(headers=False)) - - def test_to_dict_ignore_none(self): - - class Test(resource2.Resource): - foo = resource2.Header('foo') - bar = resource2.Body('bar') - - res = Test(id='FAKE_ID', bar='BAR') - - expected = { - 'id': 'FAKE_ID', - 'bar': 'BAR', - } - self.assertEqual(expected, res.to_dict(ignore_none=True)) - - def test_to_dict_with_mro(self): - - class Parent(resource2.Resource): - foo = resource2.Header('foo') - bar = resource2.Body('bar') - - class Child(Parent): - foo_new = resource2.Header('foo_baz_server') - bar_new = resource2.Body('bar_baz_server') - - res = Child(id='FAKE_ID') - - expected = { - 'foo': None, - 'bar': None, - 'foo_new': None, - 'bar_new': None, - 'id': 'FAKE_ID', - 'location': None, - 'name': None - } - self.assertEqual(expected, res.to_dict()) - - def test_to_dict_value_error(self): - - class Test(resource2.Resource): - foo = resource2.Header('foo') - bar = resource2.Body('bar') - - res = Test(id='FAKE_ID') - - err = self.assertRaises(ValueError, - res.to_dict, body=False, headers=False) - self.assertEqual('At least one of `body` or `headers` must be True', - six.text_type(err)) - - def test_to_dict_with_mro_no_override(self): - - class Parent(resource2.Resource): - header = resource2.Header('HEADER') - body = resource2.Body('BODY') - - class Child(Parent): - # The following two properties are not supposed to be overridden - # by the parent class property values. - header = resource2.Header('ANOTHER_HEADER') - body = resource2.Body('ANOTHER_BODY') - - res = Child(id='FAKE_ID', body='BODY_VALUE', header='HEADER_VALUE') - - expected = { - 'body': 'BODY_VALUE', - 'header': 'HEADER_VALUE', - 'id': 'FAKE_ID', - 'location': None, - 'name': None - } - self.assertEqual(expected, res.to_dict()) - - def test_new(self): - class Test(resource2.Resource): - attr = resource2.Body("attr") - - value = "value" - sot = Test.new(attr=value) - - self.assertIn("attr", sot._body.dirty) - self.assertEqual(value, sot.attr) - - def test_existing(self): - class Test(resource2.Resource): - attr = resource2.Body("attr") - - value = "value" - sot = Test.existing(attr=value) - - self.assertNotIn("attr", sot._body.dirty) - self.assertEqual(value, sot.attr) - - def test__prepare_request_with_id(self): - class Test(resource2.Resource): - base_path = "/something" - body_attr = resource2.Body("x") - header_attr = resource2.Header("y") - - the_id = "id" - body_value = "body" - header_value = "header" - sot = Test(id=the_id, body_attr=body_value, header_attr=header_value, - _synchronized=False) - - result = sot._prepare_request(requires_id=True) - - self.assertEqual("something/id", result.uri) - self.assertEqual({"x": body_value, "id": the_id}, result.body) - self.assertEqual({"y": header_value}, result.headers) - - def test__prepare_request_missing_id(self): - sot = resource2.Resource(id=None) - - self.assertRaises(exceptions.InvalidRequest, - sot._prepare_request, requires_id=True) - - def test__prepare_request_with_key(self): - key = "key" - - class Test(resource2.Resource): - base_path = "/something" - resource_key = key - body_attr = resource2.Body("x") - header_attr = resource2.Header("y") - - body_value = "body" - header_value = "header" - sot = Test(body_attr=body_value, header_attr=header_value, - _synchronized=False) - - result = sot._prepare_request(requires_id=False, prepend_key=True) - - self.assertEqual("/something", result.uri) - self.assertEqual({key: {"x": body_value}}, result.body) - self.assertEqual({"y": header_value}, result.headers) - - def test__filter_component(self): - client_name = "client_name" - server_name = "serverName" - value = "value" - # Include something in the mapping that we don't receive - # so the branch that looks at existence in the compoment is checked. - mapping = {client_name: server_name, "other": "blah"} - component = {server_name: value, "something": "else"} - - sot = resource2.Resource() - result = sot._filter_component(component, mapping) - - # The something:else mapping should not make it into here. - self.assertEqual({server_name: value}, result) - - def test__translate_response_no_body(self): - class Test(resource2.Resource): - attr = resource2.Header("attr") - - response = mock.Mock() - response.headers = dict() - - sot = Test() - sot._filter_component = mock.Mock(return_value={"attr": "value"}) - - sot._translate_response(response, has_body=False) - - self.assertEqual(dict(), sot._header.dirty) - self.assertEqual("value", sot.attr) - - def test__translate_response_with_body_no_resource_key(self): - class Test(resource2.Resource): - attr = resource2.Body("attr") - - body = {"attr": "value"} - response = mock.Mock() - response.headers = dict() - response.json.return_value = body - - sot = Test() - sot._filter_component = mock.Mock(side_effect=[body, dict()]) - - sot._translate_response(response, has_body=True) - - self.assertEqual("value", sot.attr) - self.assertEqual(dict(), sot._body.dirty) - self.assertEqual(dict(), sot._header.dirty) - - def test__translate_response_with_body_with_resource_key(self): - key = "key" - - class Test(resource2.Resource): - resource_key = key - attr = resource2.Body("attr") - - body = {"attr": "value"} - response = mock.Mock() - response.headers = dict() - response.json.return_value = {key: body} - - sot = Test() - sot._filter_component = mock.Mock(side_effect=[body, dict()]) - - sot._translate_response(response, has_body=True) - - self.assertEqual("value", sot.attr) - self.assertEqual(dict(), sot._body.dirty) - self.assertEqual(dict(), sot._header.dirty) - - def test_cant_do_anything(self): - class Test(resource2.Resource): - allow_create = False - allow_get = False - allow_update = False - allow_delete = False - allow_head = False - allow_list = False - - sot = Test() - - # The first argument to all of these operations is the session, - # but we raise before we get to it so just pass anything in. - self.assertRaises(exceptions.MethodNotSupported, sot.create, "") - self.assertRaises(exceptions.MethodNotSupported, sot.get, "") - self.assertRaises(exceptions.MethodNotSupported, sot.delete, "") - self.assertRaises(exceptions.MethodNotSupported, sot.head, "") - - # list is a generator so you need to begin consuming - # it in order to exercise the failure. - the_list = sot.list("") - self.assertRaises(exceptions.MethodNotSupported, next, the_list) - - # Update checks the dirty list first before even trying to see - # if the call can be made, so fake a dirty list. - sot._body = mock.Mock() - sot._body.dirty = mock.Mock(return_value={"x": "y"}) - self.assertRaises(exceptions.MethodNotSupported, sot.update, "") - - -class TestResourceActions(base.TestCase): - - def setUp(self): - super(TestResourceActions, self).setUp() - - self.service_name = "service" - self.base_path = "base_path" - - class Test(resource2.Resource): - service = self.service_name - base_path = self.base_path - allow_create = True - allow_get = True - allow_head = True - allow_update = True - allow_delete = True - allow_list = True - - self.test_class = Test - - self.request = mock.Mock(spec=resource2._Request) - self.request.uri = "uri" - self.request.body = "body" - self.request.headers = "headers" - - self.response = mock.Mock() - - self.sot = Test(id="id") - self.sot._prepare_request = mock.Mock(return_value=self.request) - self.sot._translate_response = mock.Mock() - - self.session = mock.Mock(spec=session.Session) - self.session.create = mock.Mock(return_value=self.response) - self.session.get = mock.Mock(return_value=self.response) - self.session.put = mock.Mock(return_value=self.response) - self.session.patch = mock.Mock(return_value=self.response) - self.session.post = mock.Mock(return_value=self.response) - self.session.delete = mock.Mock(return_value=self.response) - self.session.head = mock.Mock(return_value=self.response) - - def _test_create(self, cls, requires_id=False, prepend_key=False): - id = "id" if requires_id else None - sot = cls(id=id) - sot._prepare_request = mock.Mock(return_value=self.request) - sot._translate_response = mock.Mock() - - result = sot.create(self.session, prepend_key=prepend_key) - - sot._prepare_request.assert_called_once_with( - requires_id=requires_id, prepend_key=prepend_key) - if requires_id: - self.session.put.assert_called_once_with( - self.request.uri, - endpoint_filter=self.service_name, - json=self.request.body, headers=self.request.headers) - else: - self.session.post.assert_called_once_with( - self.request.uri, - endpoint_filter=self.service_name, - json=self.request.body, headers=self.request.headers) - - sot._translate_response.assert_called_once_with(self.response) - self.assertEqual(result, sot) - - def test_put_create(self): - class Test(resource2.Resource): - service = self.service_name - base_path = self.base_path - allow_create = True - put_create = True - - self._test_create(Test, requires_id=True, prepend_key=True) - - def test_post_create(self): - class Test(resource2.Resource): - service = self.service_name - base_path = self.base_path - allow_create = True - put_create = False - - self._test_create(Test, requires_id=False, prepend_key=True) - - def test_get(self): - result = self.sot.get(self.session) - - self.sot._prepare_request.assert_called_once_with(requires_id=True) - self.session.get.assert_called_once_with( - self.request.uri, endpoint_filter=self.service_name) - - self.sot._translate_response.assert_called_once_with(self.response) - self.assertEqual(result, self.sot) - - def test_get_not_requires_id(self): - result = self.sot.get(self.session, False) - - self.sot._prepare_request.assert_called_once_with(requires_id=False) - self.session.get.assert_called_once_with( - self.request.uri, endpoint_filter=self.service_name) - - self.sot._translate_response.assert_called_once_with(self.response) - self.assertEqual(result, self.sot) - - def test_head(self): - result = self.sot.head(self.session) - - self.sot._prepare_request.assert_called_once_with() - self.session.head.assert_called_once_with( - self.request.uri, - endpoint_filter=self.service_name, - headers={"Accept": ""}) - - self.sot._translate_response.assert_called_once_with(self.response) - self.assertEqual(result, self.sot) - - def _test_update(self, patch_update=False, prepend_key=True, - has_body=True): - self.sot.patch_update = patch_update - - # Need to make sot look dirty so we can attempt an update - self.sot._body = mock.Mock() - self.sot._body.dirty = mock.Mock(return_value={"x": "y"}) - - self.sot.update(self.session, prepend_key=prepend_key, - has_body=has_body) - - self.sot._prepare_request.assert_called_once_with( - prepend_key=prepend_key) - - if patch_update: - self.session.patch.assert_called_once_with( - self.request.uri, - endpoint_filter=self.service_name, - json=self.request.body, headers=self.request.headers) - else: - self.session.put.assert_called_once_with( - self.request.uri, - endpoint_filter=self.service_name, - json=self.request.body, headers=self.request.headers) - - self.sot._translate_response.assert_called_once_with( - self.response, has_body=has_body) - - def test_update_put(self): - self._test_update(patch_update=False, prepend_key=True, has_body=True) - - def test_update_patch(self): - self._test_update(patch_update=True, prepend_key=False, has_body=False) - - def test_update_not_dirty(self): - self.sot._body = mock.Mock() - self.sot._body.dirty = dict() - self.sot._header = mock.Mock() - self.sot._header.dirty = dict() - - self.sot.update(self.session) - - self.session.put.assert_not_called() - - def test_delete(self): - result = self.sot.delete(self.session) - - self.sot._prepare_request.assert_called_once_with() - self.session.delete.assert_called_once_with( - self.request.uri, - endpoint_filter=self.service_name, - headers={"Accept": ""}) - - self.sot._translate_response.assert_called_once_with( - self.response, has_body=False) - self.assertEqual(result, self.sot) - - # NOTE: As list returns a generator, testing it requires consuming - # the generator. Wrap calls to self.sot.list in a `list` - # and then test the results as a list of responses. - def test_list_empty_response(self): - mock_response = mock.Mock() - mock_response.json.return_value = [] - - self.session.get.return_value = mock_response - - result = list(self.sot.list(self.session)) - - self.session.get.assert_called_once_with( - self.base_path, - endpoint_filter=self.service_name, - headers={"Accept": "application/json"}, - params={}) - - self.assertEqual([], result) - - def test_list_one_page_response_paginated(self): - id_value = 1 - mock_response = mock.Mock() - mock_response.json.side_effect = [[{"id": id_value}], - []] - - self.session.get.return_value = mock_response - - # Ensure that we break out of the loop on a paginated call - # that still only results in one page of data. - results = list(self.sot.list(self.session, paginated=True)) - - self.assertEqual(1, len(results)) - - # Look at the `params` argument to each of the get calls that - # were made. - self.session.get.call_args_list[0][1]["params"] = {} - self.session.get.call_args_list[1][1]["params"] = {"marker": id_value} - self.assertEqual(id_value, results[0].id) - self.assertIsInstance(results[0], self.test_class) - - def test_list_one_page_response_not_paginated(self): - id_value = 1 - mock_response = mock.Mock() - mock_response.json.return_value = [{"id": id_value}] - - self.session.get.return_value = mock_response - - results = list(self.sot.list(self.session, paginated=False)) - - self.session.get.assert_called_once_with( - self.base_path, - endpoint_filter=self.service_name, - headers={"Accept": "application/json"}, - params={}) - - self.assertEqual(1, len(results)) - self.assertEqual(id_value, results[0].id) - self.assertIsInstance(results[0], self.test_class) - - def test_list_one_page_response_resources_key(self): - key = "resources" - - class Test(self.test_class): - resources_key = key - - id_value = 1 - mock_response = mock.Mock() - mock_response.json.return_value = {key: [{"id": id_value}]} - - self.session.get.return_value = mock_response - - sot = Test() - - results = list(sot.list(self.session)) - - self.session.get.assert_called_once_with( - self.base_path, - endpoint_filter=self.service_name, - headers={"Accept": "application/json"}, - params={}) - - self.assertEqual(1, len(results)) - self.assertEqual(id_value, results[0].id) - self.assertIsInstance(results[0], self.test_class) - - def test_list_multi_page_response_not_paginated(self): - ids = [1, 2] - mock_response = mock.Mock() - mock_response.json.side_effect = [[{"id": ids[0]}], - [{"id": ids[1]}]] - - self.session.get.return_value = mock_response - - results = list(self.sot.list(self.session, paginated=False)) - - self.assertEqual(1, len(results)) - self.assertEqual(ids[0], results[0].id) - self.assertIsInstance(results[0], self.test_class) - - def test_list_query_params(self): - id = 1 - qp = "query param!" - qp_name = "query-param" - uri_param = "uri param!" - - mock_response = mock.Mock() - mock_response.json.side_effect = [[{"id": id}], - []] - - self.session.get.return_value = mock_response - - class Test(self.test_class): - _query_mapping = resource2.QueryParameters(query_param=qp_name) - base_path = "/%(something)s/blah" - something = resource2.URI("something") - - results = list(Test.list(self.session, paginated=True, - query_param=qp, something=uri_param)) - - self.assertEqual(1, len(results)) - - # Look at the `params` argument to each of the get calls that - # were made. - self.session.get.call_args_list[0][1]["params"] = {qp_name: qp} - - self.assertEqual(self.session.get.call_args_list[0][0][0], - Test.base_path % {"something": uri_param}) - - def test_list_multi_page_response_paginated(self): - # This tests our ability to stop making calls once - # we've received all of the data. However, this tests - # the case that we always receive full pages of data - # and then the signal that there is no more data - an empty list. - # In this case, we need to make one extra request beyond - # the end of data to ensure we've received it all. - ids = [1, 2] - resp1 = mock.Mock() - resp1.json.return_value = [{"id": ids[0]}] - resp2 = mock.Mock() - resp2.json.return_value = [{"id": ids[1]}] - resp3 = mock.Mock() - resp3.json.return_value = [] - - self.session.get.side_effect = [resp1, resp2, resp3] - - results = self.sot.list(self.session, paginated=True) - - result0 = next(results) - self.assertEqual(result0.id, ids[0]) - self.session.get.assert_called_with( - self.base_path, - endpoint_filter=self.service_name, - headers={"Accept": "application/json"}, - params={}) - - result1 = next(results) - self.assertEqual(result1.id, ids[1]) - self.session.get.assert_called_with( - self.base_path, - endpoint_filter=self.service_name, - headers={"Accept": "application/json"}, - params={"limit": 1, "marker": 1}) - - self.assertRaises(StopIteration, next, results) - self.session.get.assert_called_with( - self.base_path, - endpoint_filter=self.service_name, - headers={"Accept": "application/json"}, - params={"limit": 1, "marker": 2}) - - def test_list_multi_page_early_termination(self): - # This tests our ability to be somewhat smart when evaluating - # the contents of the responses. When we receive a full page - # of data, we can be smart about terminating our responses - # once we see that we've received a page with less data than - # expected, saving one request. - ids = [1, 2, 3] - resp1 = mock.Mock() - resp1.json.return_value = [{"id": ids[0]}, {"id": ids[1]}] - resp2 = mock.Mock() - resp2.json.return_value = [{"id": ids[2]}] - - self.session.get.side_effect = [resp1, resp2] - - results = self.sot.list(self.session, paginated=True) - - # Get the first page's two items - result0 = next(results) - self.assertEqual(result0.id, ids[0]) - result1 = next(results) - self.assertEqual(result1.id, ids[1]) - self.session.get.assert_called_with( - self.base_path, - endpoint_filter=self.service_name, - headers={"Accept": "application/json"}, - params={}) - - # Second page only has one item - result2 = next(results) - self.assertEqual(result2.id, ids[2]) - self.session.get.assert_called_with( - self.base_path, - endpoint_filter=self.service_name, - headers={"Accept": "application/json"}, - params={"limit": 2, "marker": 2}) - - # Ensure we're done after those three items - self.assertRaises(StopIteration, next, results) - - # Ensure we only made two calls to get this done - self.assertEqual(2, len(self.session.get.call_args_list)) - - -class TestResourceFind(base.TestCase): - - def setUp(self): - super(TestResourceFind, self).setUp() - - self.result = 1 - - class Base(resource2.Resource): - - @classmethod - def existing(cls, **kwargs): - raise exceptions.NotFoundException - - @classmethod - def list(cls, session): - return None - - class OneResult(Base): - - @classmethod - def _get_one_match(cls, *args): - return self.result - - class NoResults(Base): - - @classmethod - def _get_one_match(cls, *args): - return None - - self.no_results = NoResults - self.one_result = OneResult - - def test_find_short_circuit(self): - value = 1 - - class Test(resource2.Resource): - - @classmethod - def existing(cls, **kwargs): - mock_match = mock.Mock() - mock_match.get.return_value = value - return mock_match - - result = Test.find("session", "name") - - self.assertEqual(result, value) - - def test_no_match_raise(self): - self.assertRaises(exceptions.ResourceNotFound, self.no_results.find, - "session", "name", ignore_missing=False) - - def test_no_match_return(self): - self.assertIsNone( - self.no_results.find("session", "name", ignore_missing=True)) - - def test_find_result(self): - self.assertEqual(self.result, self.one_result.find("session", "name")) - - def test_match_empty_results(self): - self.assertIsNone(resource2.Resource._get_one_match("name", [])) - - def test_no_match_by_name(self): - the_name = "Brian" - - match = mock.Mock(spec=resource2.Resource) - match.name = the_name - - result = resource2.Resource._get_one_match("Richard", [match]) - - self.assertIsNone(result, match) - - def test_single_match_by_name(self): - the_name = "Brian" - - match = mock.Mock(spec=resource2.Resource) - match.name = the_name - - result = resource2.Resource._get_one_match(the_name, [match]) - - self.assertIs(result, match) - - def test_single_match_by_id(self): - the_id = "Brian" - - match = mock.Mock(spec=resource2.Resource) - match.id = the_id - - result = resource2.Resource._get_one_match(the_id, [match]) - - self.assertIs(result, match) - - def test_single_match_by_alternate_id(self): - the_id = "Richard" - - class Test(resource2.Resource): - other_id = resource2.Body("other_id", alternate_id=True) - - match = Test(other_id=the_id) - result = Test._get_one_match(the_id, [match]) - - self.assertIs(result, match) - - def test_multiple_matches(self): - the_id = "Brian" - - match = mock.Mock(spec=resource2.Resource) - match.id = the_id - - self.assertRaises( - exceptions.DuplicateResource, - resource2.Resource._get_one_match, the_id, [match, match]) - - -class TestWaitForStatus(base.TestCase): - - def test_immediate_status(self): - status = "loling" - resource = mock.Mock() - resource.status = status - - result = resource2.wait_for_status("session", resource, status, - "failures", "interval", "wait") - - self.assertEqual(result, resource) - - @mock.patch("time.sleep", return_value=None) - def test_status_match(self, mock_sleep): - status = "loling" - resource = mock.Mock() - - # other gets past the first check, two anothers gets through - # the sleep loop, and the third matches - statuses = ["other", "another", "another", status] - type(resource).status = mock.PropertyMock(side_effect=statuses) - - result = resource2.wait_for_status("session", resource, status, - None, 1, 5) - - self.assertEqual(result, resource) - - @mock.patch("time.sleep", return_value=None) - def test_status_fails(self, mock_sleep): - status = "loling" - failure = "crying" - resource = mock.Mock() - - # other gets past the first check, the first failure doesn't - # match the expected, the third matches the failure, - # the fourth is used in creating the exception message - statuses = ["other", failure, failure, failure] - type(resource).status = mock.PropertyMock(side_effect=statuses) - - self.assertRaises(exceptions.ResourceFailure, - resource2.wait_for_status, - "session", resource, status, [failure], 1, 5) - - @mock.patch("time.sleep", return_value=None) - def test_timeout(self, mock_sleep): - status = "loling" - resource = mock.Mock() - - # The first "other" gets past the first check, and then three - # pairs of "other" statuses run through the sleep counter loop, - # after which time should be up. This is because we have a - # one second interval and three second waiting period. - statuses = ["other"] * 7 - type(resource).status = mock.PropertyMock(side_effect=statuses) - - self.assertRaises(exceptions.ResourceTimeout, - resource2.wait_for_status, - "session", resource, status, None, 1, 3) - - def test_no_sleep(self): - resource = mock.Mock() - statuses = ["other"] - type(resource).status = mock.PropertyMock(side_effect=statuses) - - self.assertRaises(exceptions.ResourceTimeout, - resource2.wait_for_status, - "session", resource, "status", None, 0, -1) - - -class TestWaitForDelete(base.TestCase): - - @mock.patch("time.sleep", return_value=None) - def test_success(self, mock_sleep): - resource = mock.Mock() - resource.get.side_effect = [None, None, exceptions.NotFoundException] - - result = resource2.wait_for_delete("session", resource, 1, 3) - - self.assertEqual(result, resource) - - @mock.patch("time.sleep", return_value=None) - def test_timeout(self, mock_sleep): - resource = mock.Mock() - resource.get.side_effect = [None, None, None] - - self.assertRaises(exceptions.ResourceTimeout, - resource2.wait_for_delete, - "session", resource, 1, 3) diff --git a/openstack/tests/unit/test_service_filter.py b/openstack/tests/unit/test_service_filter.py deleted file mode 100644 index 7d0129917e..0000000000 --- a/openstack/tests/unit/test_service_filter.py +++ /dev/null @@ -1,41 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import testtools - -from openstack.identity import identity_service -from openstack import service_filter - - -class TestValidVersion(testtools.TestCase): - def test_constructor(self): - sot = service_filter.ValidVersion('v1.0', 'v1') - self.assertEqual('v1.0', sot.module) - self.assertEqual('v1', sot.path) - - -class TestServiceFilter(testtools.TestCase): - def test_init(self): - sot = service_filter.ServiceFilter( - 'ServiceType', region='REGION1', service_name='ServiceName', - version='1', api_version='1.23', requires_project_id=True) - self.assertEqual('servicetype', sot.service_type) - self.assertEqual('REGION1', sot.region) - self.assertEqual('ServiceName', sot.service_name) - self.assertEqual('1', sot.version) - self.assertEqual('1.23', sot.api_version) - self.assertTrue(sot.requires_project_id) - - def test_get_module(self): - sot = identity_service.IdentityService() - self.assertEqual('openstack.identity.v3', sot.get_module()) - self.assertEqual('identity', sot.get_service_module()) diff --git a/openstack/tests/unit/test_session.py b/openstack/tests/unit/test_session.py deleted file mode 100644 index 89e8f452df..0000000000 --- a/openstack/tests/unit/test_session.py +++ /dev/null @@ -1,307 +0,0 @@ -# Licensed under the Apache License, Version 2.0 (the "License"); you may -# not use this file except in compliance with the License. You may obtain -# a copy of the License at -# -# http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software -# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT -# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the -# License for the specific language governing permissions and limitations -# under the License. - -import mock -import testtools - -from keystoneauth1 import exceptions as _exceptions - -from openstack import exceptions -from openstack import profile -from openstack import session -from openstack import utils - - -class TestSession(testtools.TestCase): - - def test_init_user_agent_none(self): - sot = session.Session(None) - self.assertTrue(sot.user_agent.startswith("openstacksdk")) - - def test_init_user_agent_set(self): - sot = session.Session(None, user_agent="testing/123") - self.assertTrue(sot.user_agent.startswith("testing/123 openstacksdk")) - - def test_init_with_single_api_request(self): - prof = profile.Profile() - prof.set_api_version('clustering', '1.2') - - sot = session.Session(prof) - - # The assertion acutally tests the property assigned in parent class - self.assertEqual({'openstack-api-version': 'clustering 1.2'}, - sot.additional_headers) - - def test_init_with_multi_api_requests(self): - prof = profile.Profile() - prof.set_api_version('clustering', '1.2') - prof.set_api_version('compute', '2.15') - - sot = session.Session(prof) - - versions = sot.additional_headers['openstack-api-version'] - requests = [req.strip() for req in versions.split(',')] - self.assertIn('clustering 1.2', requests) - self.assertIn('compute 2.15', requests) - - def test_init_with_no_api_requests(self): - prof = profile.Profile() - - sot = session.Session(prof) - - self.assertEqual({}, sot.additional_headers) - - def test_map_exceptions_not_found_exception(self): - ksa_exc = _exceptions.HttpError(message="test", http_status=404) - func = mock.Mock(side_effect=ksa_exc) - - os_exc = self.assertRaises( - exceptions.NotFoundException, session.map_exceptions(func)) - self.assertIsInstance(os_exc, exceptions.NotFoundException) - self.assertEqual(ksa_exc.message, os_exc.message) - self.assertEqual(ksa_exc.http_status, os_exc.http_status) - self.assertEqual(ksa_exc, os_exc.cause) - - def test_map_exceptions_http_exception(self): - ksa_exc = _exceptions.HttpError(message="test", http_status=400) - func = mock.Mock(side_effect=ksa_exc) - - os_exc = self.assertRaises( - exceptions.HttpException, session.map_exceptions(func)) - self.assertIsInstance(os_exc, exceptions.HttpException) - self.assertEqual(ksa_exc.message, os_exc.message) - self.assertEqual(ksa_exc.http_status, os_exc.http_status) - self.assertEqual(ksa_exc, os_exc.cause) - - def test_map_exceptions_sdk_exception_1(self): - ksa_exc = _exceptions.ClientException() - func = mock.Mock(side_effect=ksa_exc) - - os_exc = self.assertRaises( - exceptions.SDKException, session.map_exceptions(func)) - self.assertIsInstance(os_exc, exceptions.SDKException) - self.assertEqual(ksa_exc, os_exc.cause) - - def test_map_exceptions_sdk_exception_2(self): - ksa_exc = _exceptions.VersionNotAvailable() - func = mock.Mock(side_effect=ksa_exc) - - os_exc = self.assertRaises( - exceptions.SDKException, session.map_exceptions(func)) - self.assertIsInstance(os_exc, exceptions.SDKException) - self.assertEqual(ksa_exc, os_exc.cause) - - def test__parse_versions_response_exception(self): - uri = "http://www.openstack.org" - level = "DEBUG" - sot = session.Session(None) - sot.get = mock.Mock(side_effect=exceptions.NotFoundException) - - with self.assertLogs(logger=session.__name__, level=level) as log: - self.assertIsNone(sot._parse_versions_response(uri)) - - self.assertEqual(len(log.output), 1, - "Too many warnings were logged") - self.assertEqual( - log.output[0], - "%s:%s:Looking for versions at %s" % (level, session.__name__, - uri)) - - def test__parse_versions_response_no_json(self): - sot = session.Session(None) - retval = mock.Mock() - retval.json = mock.Mock(side_effect=ValueError) - sot.get = mock.Mock(return_value=retval) - - self.assertIsNone(sot._parse_versions_response("test")) - - def test__parse_versions_response_no_versions(self): - sot = session.Session(None) - retval = mock.Mock() - retval.json = mock.Mock(return_value={"no_versions_here": "blarga"}) - sot.get = mock.Mock(return_value=retval) - - self.assertIsNone(sot._parse_versions_response("test")) - - def test__parse_versions_response_with_versions(self): - uri = "http://openstack.org" - versions = [1, 2, 3] - - sot = session.Session(None) - retval = mock.Mock() - retval.json = mock.Mock(return_value={"versions": versions}) - sot.get = mock.Mock(return_value=retval) - - expected = session.Session._Endpoint(uri, versions) - self.assertEqual(expected, sot._parse_versions_response(uri)) - - def test__parse_versions_response_with_nested_versions(self): - uri = "http://openstack.org" - versions = [1, 2, 3] - - sot = session.Session(None) - retval = mock.Mock() - retval.json = mock.Mock(return_value={"versions": - {"values": versions}}) - sot.get = mock.Mock(return_value=retval) - - expected = session.Session._Endpoint(uri, versions) - self.assertEqual(expected, sot._parse_versions_response(uri)) - - def test__get_endpoint_versions_at_subdomain(self): - # This test covers a common case of services deployed under - # subdomains. Additionally, it covers the case of a service - # deployed at the root, which will be the first request made - # for versions. - sc_uri = "https://service.cloud.com/v1/" - versions_uri = "https://service.cloud.com" - - sot = session.Session(None) - sot.get_project_id = mock.Mock(return_value="project_id") - - responses = [session.Session._Endpoint(versions_uri, "versions")] - sot._parse_versions_response = mock.Mock(side_effect=responses) - - result = sot._get_endpoint_versions("type", sc_uri) - - sot._parse_versions_response.assert_called_once_with(versions_uri) - self.assertEqual(result, responses[0]) - self.assertFalse(result.needs_project_id) - - def test__get_endpoint_versions_at_path(self): - # This test covers a common case of services deployed under - # a path. Additionally, it covers the case of a service - # deployed at a path deeper than the root, which will mean - # more than one request will need to be made. - sc_uri = "https://cloud.com/api/service/v2/project_id" - versions_uri = "https://cloud.com/api/service" - - sot = session.Session(None) - sot.get_project_id = mock.Mock(return_value="project_id") - - responses = [None, None, - session.Session._Endpoint(versions_uri, "versions")] - sot._parse_versions_response = mock.Mock(side_effect=responses) - - result = sot._get_endpoint_versions("type", sc_uri) - - sot._parse_versions_response.assert_has_calls( - [mock.call("https://cloud.com"), - mock.call("https://cloud.com/api"), - mock.call(versions_uri)]) - self.assertEqual(result, responses[2]) - self.assertTrue(result.needs_project_id) - - def test__get_endpoint_versions_at_port(self): - # This test covers a common case of services deployed under - # a port. - sc_uri = "https://cloud.com:1234/v3" - versions_uri = "https://cloud.com:1234" - - sot = session.Session(None) - sot.get_project_id = mock.Mock(return_value="project_id") - - responses = [session.Session._Endpoint(versions_uri, "versions")] - sot._parse_versions_response = mock.Mock(side_effect=responses) - - result = sot._get_endpoint_versions("type", sc_uri) - - sot._parse_versions_response.assert_called_once_with(versions_uri) - self.assertEqual(result, responses[0]) - self.assertFalse(result.needs_project_id) - - def test__parse_version(self): - sot = session.Session(None) - - self.assertEqual(sot._parse_version("2"), (2, -1)) - self.assertEqual(sot._parse_version("v2"), (2, -1)) - self.assertEqual(sot._parse_version("v2.1"), (2, 1)) - self.assertRaises(ValueError, sot._parse_version, "lol") - - def test__get_version_match_none(self): - sot = session.Session(None) - - endpoint = session.Session._Endpoint("root", []) - self.assertRaises( - exceptions.EndpointNotFound, - sot._get_version_match, endpoint, None, "service") - - def test__get_version_match_fuzzy(self): - match = "http://devstack/v2.1" - root_endpoint = "http://devstack" - versions = [{"id": "v2.0", - "links": [{"href": "http://devstack/v2/", - "rel": "self"}]}, - {"id": "v2.1", - "links": [{"href": match, - "rel": "self"}]}] - - sot = session.Session(None) - - endpoint = session.Session._Endpoint(root_endpoint, versions) - # Look for a v2 match, which we internally denote as a minor - # version of -1 so we can find the highest matching minor. - rv = sot._get_version_match(endpoint, session.Version(2, -1), - "service") - self.assertEqual(rv, match) - - def test__get_version_match_exact(self): - match = "http://devstack/v2" - root_endpoint = "http://devstack" - versions = [{"id": "v2.0", - "links": [{"href": match, - "rel": "self"}]}, - {"id": "v2.1", - "links": [{"href": "http://devstack/v2.1/", - "rel": "self"}]}] - - sot = session.Session(None) - endpoint = session.Session._Endpoint(root_endpoint, versions) - rv = sot._get_version_match(endpoint, session.Version(2, 0), - "service") - self.assertEqual(rv, match) - - def test__get_version_match_fragment(self): - root = "http://cloud.net" - match = "/v2" - versions = [{"id": "v2.0", "links": [{"href": match, "rel": "self"}]}] - - sot = session.Session(None) - endpoint = session.Session._Endpoint(root, versions) - rv = sot._get_version_match(endpoint, session.Version(2, 0), "service") - self.assertEqual(rv, root + match) - - def test__get_version_match_project_id(self): - match = "http://devstack/v2" - root_endpoint = "http://devstack" - project_id = "asdf123" - versions = [{"id": "v2.0", "links": [{"href": match, "rel": "self"}]}] - - sot = session.Session(None) - sot.get_project_id = mock.Mock(return_value=project_id) - endpoint = session.Session._Endpoint(root_endpoint, versions, - project_id=project_id, - needs_project_id=True) - rv = sot._get_version_match(endpoint, session.Version(2, 0), - "service") - match_endpoint = utils.urljoin(match, project_id) - self.assertEqual(rv, match_endpoint) - - def test_get_endpoint_cached(self): - sot = session.Session(None) - service_type = "compute" - interface = "public" - endpoint = "the world wide web" - - sot.endpoint_cache[(service_type, interface)] = endpoint - rv = sot.get_endpoint(service_type=service_type, interface=interface) - self.assertEqual(rv, endpoint) diff --git a/openstack/tests/unit/test_stats.py b/openstack/tests/unit/test_stats.py new file mode 100644 index 0000000000..999d2602a5 --- /dev/null +++ b/openstack/tests/unit/test_stats.py @@ -0,0 +1,387 @@ +# Copyright 2013 Hewlett-Packard Development Company, L.P. +# Copyright 2014 OpenStack Foundation +# Copyright 2018 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import itertools +import os +import pprint +import select +import socket +import threading +import time + +import fixtures +from keystoneauth1 import exceptions +import prometheus_client +from requests import exceptions as rexceptions +import testtools.content + +from openstack.tests.unit import base + + +class StatsdFixture(fixtures.Fixture): + def _setUp(self): + self.running = True + self.thread = threading.Thread(target=self.run) + self.thread.daemon = True + self.sock = socket.socket(socket.AF_INET, socket.SOCK_DGRAM) + self.sock.bind(('', 0)) + self.port = self.sock.getsockname()[1] + self.wake_read, self.wake_write = os.pipe() + self.stats = [] + self.thread.start() + self.addCleanup(self._cleanup) + + def run(self): + while self.running: + poll = select.poll() + poll.register(self.sock, select.POLLIN) + poll.register(self.wake_read, select.POLLIN) + ret = poll.poll() + for fd, event in ret: + if fd == self.sock.fileno(): + data = self.sock.recvfrom(1024) + if not data: + return + self.stats.append(data[0]) + if fd == self.wake_read: + return + + def _cleanup(self): + self.running = False + os.write(self.wake_write, b'1\n') + self.thread.join() + + +class TestStats(base.TestCase): + def setUp(self): + self.statsd = StatsdFixture() + self.useFixture(self.statsd) + # note, use 127.0.0.1 rather than localhost to avoid getting ipv6 + # see: https://github.com/jsocol/pystatsd/issues/61 + self.useFixture( + fixtures.EnvironmentVariable('STATSD_HOST', '127.0.0.1') + ) + self.useFixture( + fixtures.EnvironmentVariable('STATSD_PORT', str(self.statsd.port)) + ) + + self.add_info_on_exception('statsd_content', self.statsd.stats) + # Set up the above things before the super setup so that we have the + # environment variables set when the Connection is created. + super().setUp() + + self._registry = prometheus_client.CollectorRegistry() + self.cloud.config._collector_registry = self._registry + self.addOnException(self._add_prometheus_samples) + + def _add_prometheus_samples(self, exc_info): + samples = [] + for metric in self._registry.collect(): + for s in metric.samples: + samples.append(s) + self.addDetail( + 'prometheus_samples', + testtools.content.text_content(pprint.pformat(samples)), + ) + + def assert_reported_stat(self, key, value=None, kind=None): + """Check statsd output + + Check statsd return values. A ``value`` should specify a + ``kind``, however a ``kind`` may be specified without a + ``value`` for a generic match. Leave both empty to just check + for key presence. + + :arg str key: The statsd key + :arg str value: The expected value of the metric ``key`` + :arg str kind: The expected type of the metric ``key`` For example + + - ``c`` counter + - ``g`` gauge + - ``ms`` timing + - ``s`` set + + Note that for ``ms`` type, you are expressing a maximum value, + not an exact value. This is to avoid flakey tests. + """ + + self.assertIsNotNone(self.statsd) + + if value: + self.assertNotEqual(kind, None) + + start = time.time() + while time.time() < (start + 1): + # Note our fake statsd just queues up results in a queue. + # We just keep going through them until we find one that + # matches, or fail out. If statsd pipelines are used, + # large single packets are sent with stats separated by + # newlines; thus we first flatten the stats out into + # single entries. + stats = itertools.chain.from_iterable( + [s.decode('utf-8').split('\n') for s in self.statsd.stats] + ) + for stat in stats: + k, v = stat.split(':') + if key == k: + if kind is None: + # key with no qualifiers is found + return True + + s_value, s_kind = v.split('|') + + # if no kind match, look for other keys + if kind != s_kind: + continue + + if value: + # special-case value|ms because statsd can turn + # timing results into float of indeterminate + # length, hence foiling string matching. + if kind == 'ms': + if float(value) >= float(s_value): + return True + elif value == s_value: + return True + # otherwise keep looking for other matches + continue + + # this key matches + return True + time.sleep(0.1) + + raise Exception(f"Key {key} not found in reported stats") + + def assert_prometheus_stat(self, name, value, labels=None): + sample_value = self._registry.get_sample_value(name, labels) + self.assertEqual(sample_value, value) + + def test_list_projects(self): + mock_uri = self.get_mock_url( + service_type='identity', resource='projects', base_url_append='v3' + ) + + self.register_uris( + [ + dict( + method='GET', + uri=mock_uri, + status_code=200, + json={'projects': []}, + ) + ] + ) + + self.cloud.list_projects() + self.assert_calls() + + self.assert_reported_stat( + 'openstack.api.identity.GET.projects.200', value='1', kind='c' + ) + self.assert_prometheus_stat( + 'openstack_http_requests_total', + 1, + dict( + service_type='identity', + endpoint=mock_uri, + method='GET', + status_code='200', + ), + ) + + def test_projects(self): + mock_uri = self.get_mock_url( + service_type='identity', resource='projects', base_url_append='v3' + ) + + self.register_uris( + [ + dict( + method='GET', + uri=mock_uri, + status_code=200, + json={'projects': []}, + ) + ] + ) + + list(self.cloud.identity.projects()) + self.assert_calls() + + self.assert_reported_stat( + 'openstack.api.identity.GET.projects.200', value='1', kind='c' + ) + self.assert_prometheus_stat( + 'openstack_http_requests_total', + 1, + dict( + service_type='identity', + endpoint=mock_uri, + method='GET', + status_code='200', + ), + ) + + def test_servers(self): + mock_uri = 'https://compute.example.com/v2.1/servers/detail' + + self.register_uris( + [ + self.get_nova_discovery_mock_dict(), + dict( + method='GET', + uri=mock_uri, + status_code=200, + json={'servers': []}, + ), + ] + ) + + list(self.cloud.compute.servers()) + self.assert_calls() + + self.assert_reported_stat( + 'openstack.api.compute.GET.servers_detail.200', value='1', kind='c' + ) + self.assert_reported_stat( + 'openstack.api.compute.GET.servers_detail.200', + value='5', + kind='ms', + ) + self.assert_prometheus_stat( + 'openstack_http_requests_total', + 1, + dict( + service_type='compute', + endpoint=mock_uri, + method='GET', + status_code='200', + ), + ) + + def test_servers_no_detail(self): + mock_uri = 'https://compute.example.com/v2.1/servers' + + self.register_uris( + [ + dict( + method='GET', + uri=mock_uri, + status_code=200, + json={'servers': []}, + ) + ] + ) + + self.cloud.compute.get('/servers') + self.assert_calls() + + self.assert_reported_stat( + 'openstack.api.compute.GET.servers.200', value='1', kind='c' + ) + self.assert_reported_stat( + 'openstack.api.compute.GET.servers.200', value='5', kind='ms' + ) + self.assert_reported_stat( + 'openstack.api.compute.GET.servers.attempted', value='1', kind='c' + ) + self.assert_prometheus_stat( + 'openstack_http_requests_total', + 1, + dict( + service_type='compute', + endpoint=mock_uri, + method='GET', + status_code='200', + ), + ) + + def test_servers_error(self): + mock_uri = 'https://compute.example.com/v2.1/servers' + + self.register_uris( + [dict(method='GET', uri=mock_uri, status_code=500, json={})] + ) + + self.cloud.compute.get('/servers') + self.assert_calls() + + self.assert_reported_stat( + 'openstack.api.compute.GET.servers.500', value='1', kind='c' + ) + self.assert_reported_stat( + 'openstack.api.compute.GET.servers.500', value='5', kind='ms' + ) + self.assert_reported_stat( + 'openstack.api.compute.GET.servers.attempted', value='1', kind='c' + ) + self.assert_prometheus_stat( + 'openstack_http_requests_total', + 1, + dict( + service_type='compute', + endpoint=mock_uri, + method='GET', + status_code='500', + ), + ) + + def test_timeout(self): + mock_uri = 'https://compute.example.com/v2.1/servers' + + self.register_uris( + [dict(method='GET', uri=mock_uri, exc=rexceptions.ConnectTimeout)] + ) + + try: + self.cloud.compute.get('/servers') + except exceptions.ConnectTimeout: + pass + + self.assert_reported_stat( + 'openstack.api.compute.GET.servers.failed', value='1', kind='c' + ) + self.assert_reported_stat( + 'openstack.api.compute.GET.servers.attempted', value='1', kind='c' + ) + + +class TestNoStats(base.TestCase): + def setUp(self): + super().setUp() + self.statsd = StatsdFixture() + self.useFixture(self.statsd) + + def test_no_stats(self): + mock_uri = self.get_mock_url( + service_type='identity', resource='projects', base_url_append='v3' + ) + + self.register_uris( + [ + dict( + method='GET', + uri=mock_uri, + status_code=200, + json={'projects': []}, + ) + ] + ) + + self.cloud.identity._statsd_client = None + list(self.cloud.identity.projects()) + self.assert_calls() + self.assertEqual([], self.statsd.stats) diff --git a/openstack/tests/unit/test_utils.py b/openstack/tests/unit/test_utils.py index f0ea12d604..b9ecb69c20 100644 --- a/openstack/tests/unit/test_utils.py +++ b/openstack/tests/unit/test_utils.py @@ -10,71 +10,101 @@ # License for the specific language governing permissions and limitations # under the License. -import mock +import concurrent.futures +import logging import sys +from unittest import mock + +import fixtures +import os_service_types import testtools +import openstack +from openstack import exceptions +from openstack.tests.unit import base from openstack import utils -class Test_enable_logging(testtools.TestCase): - - def _console_tests(self, fake_logging, level, debug, stream): - the_logger = mock.Mock() - fake_logging.getLogger.return_value = the_logger +class Test_enable_logging(base.TestCase): + def setUp(self): + super().setUp() + self.openstack_logger = mock.Mock() + self.openstack_logger.handlers = [] + self.ksa_logger_root = mock.Mock() + self.ksa_logger_root.handlers = [] + self.ksa_logger_1 = mock.Mock() + self.ksa_logger_1.handlers = [] + self.ksa_logger_2 = mock.Mock() + self.ksa_logger_2.handlers = [] + self.ksa_logger_3 = mock.Mock() + self.ksa_logger_3.handlers = [] + self.urllib3_logger = mock.Mock() + self.urllib3_logger.handlers = [] + self.stevedore_logger = mock.Mock() + self.stevedore_logger.handlers = [] + self.fake_get_logger = mock.Mock() + self.fake_get_logger.side_effect = [ + self.openstack_logger, + self.ksa_logger_root, + self.urllib3_logger, + self.stevedore_logger, + self.ksa_logger_1, + self.ksa_logger_2, + self.ksa_logger_3, + ] + self.useFixture( + fixtures.MonkeyPatch('logging.getLogger', self.fake_get_logger) + ) - utils.enable_logging(debug=debug, stream=stream) + def _console_tests(self, level, debug, stream): + openstack.enable_logging(debug=debug, stream=stream) - self.assertEqual(the_logger.addHandler.call_count, 2) - the_logger.setLevel.assert_called_with(level) + self.assertEqual(self.openstack_logger.addHandler.call_count, 1) + self.openstack_logger.setLevel.assert_called_with(level) - def _file_tests(self, fake_logging, level, debug): - the_logger = mock.Mock() - fake_logging.getLogger.return_value = the_logger + def _file_tests(self, level, debug): + file_handler = mock.Mock() + self.useFixture( + fixtures.MonkeyPatch('logging.FileHandler', file_handler) + ) fake_path = "fake/path.log" - utils.enable_logging(debug=debug, path=fake_path) + openstack.enable_logging(debug=debug, path=fake_path) - fake_logging.FileHandler.assert_called_with(fake_path) - self.assertEqual(the_logger.addHandler.call_count, 2) - the_logger.setLevel.assert_called_with(level) + file_handler.assert_called_with(fake_path) + self.assertEqual(self.openstack_logger.addHandler.call_count, 1) + self.openstack_logger.setLevel.assert_called_with(level) def test_none(self): - self.assertRaises( - ValueError, utils.enable_logging, - debug=True, path=None, stream=None) + openstack.enable_logging(debug=True) + self.fake_get_logger.assert_has_calls([]) + self.openstack_logger.setLevel.assert_called_with(logging.DEBUG) + self.assertEqual(self.openstack_logger.addHandler.call_count, 1) + self.assertIsInstance( + self.openstack_logger.addHandler.call_args_list[0][0][0], + logging.StreamHandler, + ) - @mock.patch("openstack.utils.logging") - def test_debug_console_stderr(self, fake_logging): - self._console_tests(fake_logging, - fake_logging.DEBUG, True, sys.stderr) + def test_debug_console_stderr(self): + self._console_tests(logging.DEBUG, True, sys.stderr) - @mock.patch("openstack.utils.logging") - def test_warning_console_stderr(self, fake_logging): - self._console_tests(fake_logging, - fake_logging.WARNING, False, sys.stderr) + def test_warning_console_stderr(self): + self._console_tests(logging.INFO, False, sys.stderr) - @mock.patch("openstack.utils.logging") - def test_debug_console_stdout(self, fake_logging): - self._console_tests(fake_logging, - fake_logging.DEBUG, True, sys.stdout) + def test_debug_console_stdout(self): + self._console_tests(logging.DEBUG, True, sys.stdout) - @mock.patch("openstack.utils.logging") - def test_warning_console_stdout(self, fake_logging): - self._console_tests(fake_logging, - fake_logging.WARNING, False, sys.stdout) + def test_warning_console_stdout(self): + self._console_tests(logging.INFO, False, sys.stdout) - @mock.patch("openstack.utils.logging") - def test_debug_file(self, fake_logging): - self._file_tests(fake_logging, fake_logging.DEBUG, True) + def test_debug_file(self): + self._file_tests(logging.DEBUG, True) - @mock.patch("openstack.utils.logging") - def test_warning_file(self, fake_logging): - self._file_tests(fake_logging, fake_logging.WARNING, False) + def test_warning_file(self): + self._file_tests(logging.INFO, False) -class Test_urljoin(testtools.TestCase): - +class Test_urljoin(base.TestCase): def test_strings(self): root = "http://www.example.com" leaves = "foo", "bar" @@ -88,3 +118,185 @@ def test_with_none(self): result = utils.urljoin(root, *leaves) self.assertEqual(result, "http://www.example.com/foo/") + + def test_unicode_strings(self): + root = "http://www.example.com" + leaves = "ascii", "extra_chars-™" + + try: + result = utils.urljoin(root, *leaves) + except Exception: + self.fail("urljoin failed on unicode strings") + + self.assertEqual(result, "http://www.example.com/ascii/extra_chars-™") + + +class TestSupportsMicroversion(base.TestCase): + def setUp(self): + super().setUp() + self.adapter = mock.Mock(spec=['get_endpoint_data']) + self.endpoint_data = mock.Mock( + spec=['min_microversion', 'max_microversion'], + min_microversion='1.1', + max_microversion='1.99', + ) + self.adapter.get_endpoint_data.return_value = self.endpoint_data + + def test_requested_supported_no_default(self): + self.adapter.default_microversion = None + self.assertTrue(utils.supports_microversion(self.adapter, '1.2')) + + def test_requested_not_supported_no_default(self): + self.adapter.default_microversion = None + self.assertFalse(utils.supports_microversion(self.adapter, '2.2')) + + def test_requested_not_supported_no_default_exception(self): + self.adapter.default_microversion = None + self.assertRaises( + exceptions.SDKException, + utils.supports_microversion, + self.adapter, + '2.2', + True, + ) + + def test_requested_supported_higher_default(self): + self.adapter.default_microversion = '1.8' + self.assertTrue(utils.supports_microversion(self.adapter, '1.6')) + + def test_requested_supported_equal_default(self): + self.adapter.default_microversion = '1.8' + self.assertTrue(utils.supports_microversion(self.adapter, '1.8')) + + def test_requested_supported_lower_default(self): + self.adapter.default_microversion = '1.2' + self.assertFalse(utils.supports_microversion(self.adapter, '1.8')) + + def test_requested_supported_lower_default_exception(self): + self.adapter.default_microversion = '1.2' + self.assertRaises( + exceptions.SDKException, + utils.supports_microversion, + self.adapter, + '1.8', + True, + ) + + @mock.patch('openstack.utils.supports_microversion') + def test_require_microversion(self, sm_mock): + utils.require_microversion(self.adapter, '1.2') + sm_mock.assert_called_with(self.adapter, '1.2', raise_exception=True) + + +class TestMaximumSupportedMicroversion(base.TestCase): + def setUp(self): + super().setUp() + self.adapter = mock.Mock(spec=['get_endpoint_data']) + self.endpoint_data = mock.Mock( + spec=['min_microversion', 'max_microversion'], + min_microversion=None, + max_microversion='1.99', + ) + self.adapter.get_endpoint_data.return_value = self.endpoint_data + + def test_with_none(self): + self.assertIsNone( + utils.maximum_supported_microversion(self.adapter, None) + ) + + def test_with_value(self): + self.assertEqual( + '1.42', utils.maximum_supported_microversion(self.adapter, '1.42') + ) + + def test_value_more_than_max(self): + self.assertEqual( + '1.99', utils.maximum_supported_microversion(self.adapter, '1.104') + ) + + def test_value_less_than_min(self): + self.endpoint_data.min_microversion = '1.42' + self.assertIsNone( + utils.maximum_supported_microversion(self.adapter, '1.2') + ) + + +class TestOsServiceTypesVersion(base.TestCase): + def test_ost_version(self): + ost_version = '2024-05-08T19:22:13.804707' + self.assertEqual( + ost_version, + os_service_types.ServiceTypes().version, + "This project must be pinned to the latest version of " + "os-service-types. Please bump requirements.txt accordingly.", + ) + + +class TestTinyDAG(base.TestCase): + test_graph = { + 'a': ['b', 'd', 'f'], + 'b': ['c', 'd'], + 'c': ['d'], + 'd': ['e'], + 'e': [], + 'f': ['e'], + 'g': ['e'], + } + + @classmethod + def _create_tinydag(cls, data): + sot = utils.TinyDAG() + for k, v in data.items(): + sot.add_node(k) + for dep in v: + sot.add_edge(k, dep) + return sot + + def _verify_order(self, test_graph, test_list): + for k, v in test_graph.items(): + for dep in v: + self.assertTrue(test_list.index(k) < test_list.index(dep)) + + def test_topological_sort(self): + sot = self._create_tinydag(self.test_graph) + sorted_list = sot.topological_sort() + self._verify_order(sot.graph, sorted_list) + self.assertEqual(len(self.test_graph.keys()), len(sorted_list)) + + def test_walk(self): + sot = self._create_tinydag(self.test_graph) + sorted_list = [] + for node in sot.walk(): + sorted_list.append(node) + sot.node_done(node) + self._verify_order(sot.graph, sorted_list) + self.assertEqual(len(self.test_graph.keys()), len(sorted_list)) + + def test_walk_parallel(self): + sot = self._create_tinydag(self.test_graph) + sorted_list = [] + with concurrent.futures.ThreadPoolExecutor(max_workers=15) as executor: + for node in sot.walk(timeout=1): + executor.submit(test_walker_fn, sot, node, sorted_list) + self._verify_order(sot.graph, sorted_list) + self.assertEqual(len(self.test_graph.keys()), len(sorted_list)) + + def test_walk_raise(self): + sot = self._create_tinydag(self.test_graph) + bad_node = 'f' + with testtools.ExpectedException(exceptions.SDKException): + for node in sot.walk(timeout=1): + if node != bad_node: + sot.node_done(node) + + def test_add_node_after_edge(self): + sot = utils.TinyDAG() + sot.add_node('a') + sot.add_edge('a', 'b') + sot.add_node('a') + self.assertEqual(sot._graph['a'], set('b')) + + +def test_walker_fn(graph, node, lst): + lst.append(node) + graph.node_done(node) diff --git a/openstack/tests/unit/workflow/__init__.py b/openstack/tests/unit/workflow/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/workflow/test_cron_trigger.py b/openstack/tests/unit/workflow/test_cron_trigger.py new file mode 100644 index 0000000000..002f5b8307 --- /dev/null +++ b/openstack/tests/unit/workflow/test_cron_trigger.py @@ -0,0 +1,88 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.unit import base +from openstack.workflow.v2 import cron_trigger + + +FAKE_INPUT = { + 'cluster_id': '8c74607c-5a74-4490-9414-a3475b1926c2', + 'node_id': 'fba2cc5d-706f-4631-9577-3956048d13a2', + 'flavor_id': '1', +} + +FAKE_PARAMS = {} + +FAKE = { + 'id': 'ffaed25e-46f5-4089-8e20-b3b4722fd597', + 'pattern': '0 * * * *', + 'remaining_executions': 14, + 'first_execution_time': '1970-01-01T01:00:00.000000', + 'next_execution_time': '1970-01-01T02:00:00.000000', + 'workflow_name': 'cluster-coldmigration', + 'workflow_id': '1995cf40-c22d-4968-b6e8-558942830642', + 'workflow_input': FAKE_INPUT, + 'workflow_params': FAKE_PARAMS, +} + + +class TestCronTrigger(base.TestCase): + def test_basic(self): + sot = cron_trigger.CronTrigger() + self.assertEqual('cron_trigger', sot.resource_key) + self.assertEqual('cron_triggers', sot.resources_key) + self.assertEqual('/cron_triggers', sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_delete) + + self.assertDictEqual( + { + 'marker': 'marker', + 'limit': 'limit', + 'sort_keys': 'sort_keys', + 'sort_dirs': 'sort_dirs', + 'fields': 'fields', + 'name': 'name', + 'workflow_name': 'workflow_name', + 'workflow_id': 'workflow_id', + 'workflow_input': 'workflow_input', + 'workflow_params': 'workflow_params', + 'scope': 'scope', + 'pattern': 'pattern', + 'remaining_executions': 'remaining_executions', + 'project_id': 'project_id', + 'first_execution_time': 'first_execution_time', + 'next_execution_time': 'next_execution_time', + 'created_at': 'created_at', + 'updated_at': 'updated_at', + 'all_projects': 'all_projects', + }, + sot._query_mapping._mapping, + ) + + def test_make_it(self): + sot = cron_trigger.CronTrigger(**FAKE) + self.assertEqual(FAKE['id'], sot.id) + self.assertEqual(FAKE['pattern'], sot.pattern) + self.assertEqual( + FAKE['remaining_executions'], sot.remaining_executions + ) + self.assertEqual( + FAKE['first_execution_time'], sot.first_execution_time + ) + self.assertEqual(FAKE['next_execution_time'], sot.next_execution_time) + self.assertEqual(FAKE['workflow_name'], sot.workflow_name) + self.assertEqual(FAKE['workflow_id'], sot.workflow_id) + self.assertEqual(FAKE['workflow_input'], sot.workflow_input) + self.assertEqual(FAKE['workflow_params'], sot.workflow_params) diff --git a/openstack/tests/unit/workflow/test_execution.py b/openstack/tests/unit/workflow/test_execution.py new file mode 100644 index 0000000000..ef17b266b4 --- /dev/null +++ b/openstack/tests/unit/workflow/test_execution.py @@ -0,0 +1,48 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.unit import base +from openstack.workflow.v2 import execution + + +FAKE_INPUT = { + 'cluster_id': '8c74607c-5a74-4490-9414-a3475b1926c2', + 'node_id': 'fba2cc5d-706f-4631-9577-3956048d13a2', + 'flavor_id': '1', +} + +FAKE = { + 'id': 'ffaed25e-46f5-4089-8e20-b3b4722fd597', + 'workflow_name': 'cluster-coldmigration', + 'input': FAKE_INPUT, +} + + +class TestExecution(base.TestCase): + def setUp(self): + super().setUp() + + def test_basic(self): + sot = execution.Execution() + self.assertEqual('execution', sot.resource_key) + self.assertEqual('executions', sot.resources_key) + self.assertEqual('/executions', sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_delete) + + def test_instantiate(self): + sot = execution.Execution(**FAKE) + self.assertEqual(FAKE['id'], sot.id) + self.assertEqual(FAKE['workflow_name'], sot.workflow_name) + self.assertEqual(FAKE['input'], sot.input) diff --git a/openstack/tests/unit/workflow/test_version.py b/openstack/tests/unit/workflow/test_version.py new file mode 100644 index 0000000000..c822c59c22 --- /dev/null +++ b/openstack/tests/unit/workflow/test_version.py @@ -0,0 +1,41 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.unit import base +from openstack.workflow import version + + +IDENTIFIER = 'IDENTIFIER' +EXAMPLE = { + 'id': IDENTIFIER, + 'links': '2', + 'status': '3', +} + + +class TestVersion(base.TestCase): + def test_basic(self): + sot = version.Version() + self.assertEqual('version', sot.resource_key) + self.assertEqual('versions', sot.resources_key) + self.assertEqual('/', sot.base_path) + self.assertFalse(sot.allow_create) + self.assertFalse(sot.allow_fetch) + self.assertFalse(sot.allow_commit) + self.assertFalse(sot.allow_delete) + self.assertTrue(sot.allow_list) + + def test_make_it(self): + sot = version.Version(**EXAMPLE) + self.assertEqual(EXAMPLE['id'], sot.id) + self.assertEqual(EXAMPLE['links'], sot.links) + self.assertEqual(EXAMPLE['status'], sot.status) diff --git a/openstack/tests/unit/workflow/test_workflow.py b/openstack/tests/unit/workflow/test_workflow.py new file mode 100644 index 0000000000..c2a8bc33a9 --- /dev/null +++ b/openstack/tests/unit/workflow/test_workflow.py @@ -0,0 +1,43 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.unit import base +from openstack.workflow.v2 import workflow + + +FAKE = { + 'scope': 'private', + 'id': 'ffaed25e-46f5-4089-8e20-b3b4722fd597', + 'definition': 'workflow_def', +} + + +class TestWorkflow(base.TestCase): + def setUp(self): + super().setUp() + + def test_basic(self): + sot = workflow.Workflow() + self.assertEqual('workflow', sot.resource_key) + self.assertEqual('workflows', sot.resources_key) + self.assertEqual('/workflows', sot.base_path) + self.assertTrue(sot.allow_fetch) + self.assertTrue(sot.allow_list) + self.assertTrue(sot.allow_create) + self.assertTrue(sot.allow_commit) + self.assertTrue(sot.allow_delete) + + def test_instantiate(self): + sot = workflow.Workflow(**FAKE) + self.assertEqual(FAKE['id'], sot.id) + self.assertEqual(FAKE['scope'], sot.scope) + self.assertEqual(FAKE['definition'], sot.definition) diff --git a/openstack/tests/unit/workflow/v2/__init__.py b/openstack/tests/unit/workflow/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/tests/unit/workflow/v2/test_proxy.py b/openstack/tests/unit/workflow/v2/test_proxy.py new file mode 100644 index 0000000000..39d7976521 --- /dev/null +++ b/openstack/tests/unit/workflow/v2/test_proxy.py @@ -0,0 +1,87 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack.tests.unit import test_proxy_base +from openstack.workflow.v2 import _proxy +from openstack.workflow.v2 import cron_trigger +from openstack.workflow.v2 import execution +from openstack.workflow.v2 import workflow + + +class TestWorkflowProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_workflows(self): + self.verify_list(self.proxy.workflows, workflow.Workflow) + + def test_executions(self): + self.verify_list(self.proxy.executions, execution.Execution) + + def test_workflow_get(self): + self.verify_get(self.proxy.get_workflow, workflow.Workflow) + + def test_execution_get(self): + self.verify_get(self.proxy.get_execution, execution.Execution) + + def test_workflow_create(self): + self.verify_create(self.proxy.create_workflow, workflow.Workflow) + + def test_workflow_update(self): + self.verify_update(self.proxy.update_workflow, workflow.Workflow) + + def test_execution_create(self): + self.verify_create(self.proxy.create_execution, execution.Execution) + + def test_workflow_delete(self): + self.verify_delete(self.proxy.delete_workflow, workflow.Workflow, True) + + def test_execution_delete(self): + self.verify_delete( + self.proxy.delete_execution, execution.Execution, True + ) + + def test_workflow_find(self): + self.verify_find(self.proxy.find_workflow, workflow.Workflow) + + def test_execution_find(self): + self.verify_find(self.proxy.find_execution, execution.Execution) + + +class TestCronTriggerProxy(test_proxy_base.TestProxyBase): + def setUp(self): + super().setUp() + self.proxy = _proxy.Proxy(self.session) + + def test_cron_triggers(self): + self.verify_list(self.proxy.cron_triggers, cron_trigger.CronTrigger) + + def test_cron_trigger_get(self): + self.verify_get(self.proxy.get_cron_trigger, cron_trigger.CronTrigger) + + def test_cron_trigger_create(self): + self.verify_create( + self.proxy.create_cron_trigger, cron_trigger.CronTrigger + ) + + def test_cron_trigger_delete(self): + self.verify_delete( + self.proxy.delete_cron_trigger, cron_trigger.CronTrigger, True + ) + + def test_cron_trigger_find(self): + self.verify_find( + self.proxy.find_cron_trigger, + cron_trigger.CronTrigger, + expected_kwargs={'all_projects': False}, + ) diff --git a/openstack/types.py b/openstack/types.py new file mode 100644 index 0000000000..d84685e88b --- /dev/null +++ b/openstack/types.py @@ -0,0 +1,23 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import typing as ty + + +# Workaround Python's lack of an undefined sentinel +# https://python-patterns.guide/python/sentinel-object/ +class Unset: + def __bool__(self) -> ty.Literal[False]: + return False + + +UNSET: Unset = Unset() diff --git a/openstack/utils.py b/openstack/utils.py index 56eedee37e..9750b3722d 100644 --- a/openstack/utils.py +++ b/openstack/utils.py @@ -10,57 +10,34 @@ # License for the specific language governing permissions and limitations # under the License. -import logging - - -def enable_logging(debug=False, path=None, stream=None): - """Enable logging to a file at path and/or a console stream. - - This function is available for debugging purposes. If you wish to - log this package's message in your application, the standard library - ``logging`` package will receive these messages in any handlers you - create. - - :param bool debug: Set this to ``True`` to receive debug messages, - which includes HTTP requests and responses, - or ``False`` for warning messages. - :param str path: If a *path* is specified, logging output will - written to that file in addition to sys.stderr. - The path is passed to logging.FileHandler, - which will append messages the file (and create - it if needed). - :param stream: One of ``None `` or ``sys.stdout`` or ``sys.stderr``. - If it is ``None``, nothing is logged to a stream. - If it isn't ``None``, console output is logged - to this stream. - - :rtype: None - """ - if path is None and stream is None: - raise ValueError("path and/or stream must be set") +import collections.abc +import hashlib +import io +import queue +import string +import threading +import time +import typing as ty - logger = logging.getLogger('openstack') - ksalog = logging.getLogger('keystoneauth') - formatter = logging.Formatter( - '%(asctime)s %(levelname)s: %(name)s %(message)s') +import keystoneauth1 +from keystoneauth1 import adapter as ks_adapter +from keystoneauth1 import discover - if stream is not None: - console = logging.StreamHandler(stream) - console.setFormatter(formatter) - logger.addHandler(console) - ksalog.addHandler(console) +from openstack import _log +from openstack import exceptions - if path is not None: - file_handler = logging.FileHandler(path) - file_handler.setFormatter(formatter) - logger.addHandler(file_handler) - ksalog.addHandler(file_handler) +_ProxyT = ty.TypeVar('_ProxyT') - logger.setLevel(logging.DEBUG if debug else logging.WARNING) - ksalog.setLevel(logging.DEBUG if debug else logging.WARNING) +if ty.TYPE_CHECKING: + from openstack.block_storage.v2 import _proxy as _block_storage_v2 + from openstack.block_storage.v3 import _proxy as _block_storage_v3 + from openstack.identity.v2 import _proxy as _identity_v2 + from openstack.identity.v3 import _proxy as _identity_v3 + from openstack.image.v1 import _proxy as _image_v1 + from openstack.image.v2 import _proxy as _image_v2 -def urljoin(*args): +def urljoin(*args: str | None) -> str: """A custom version of urljoin that simply joins strings into a path. The real urljoin takes into account web semantics like when joining a url @@ -68,3 +45,708 @@ def urljoin(*args): link. We generally won't care about that in client. """ return '/'.join(str(a or '').strip('/') for a in args) + + +def iterate_timeout( + timeout: int | None, + message: str, + wait: int | float | None = 2, +) -> ty.Generator[int, None, None]: + """Iterate and raise an exception on timeout. + + This is a generator that will continually yield and sleep for + wait seconds, and if the timeout is reached, will raise an exception + with . + + :param timeout: Maximum number of seconds to wait for transition. Set to + ``None`` to wait forever. + :param message: The message to use for the exception if the timeout is + reached. + :param wait: Number of seconds to wait between checks. Set to ``None`` + to use the default interval. + + :returns: None + :raises: :class:`~openstack.exceptions.ResourceTimeout` transition + :raises: :class:`~openstack.exceptions.SDKException` if ``wait`` is not a + valid float, integer or None. + """ + log = _log.setup_logging('openstack.iterate_timeout') + + try: + # None as a wait winds up flowing well in the per-resource cache + # flow. We could spread this logic around to all of the calling + # points, but just having this treat None as "I don't have a value" + # seems friendlier + if wait is None: + wait = 2 + elif wait == 0: + # wait should be < timeout, unless timeout is None + wait = 0.1 if timeout is None else min(0.1, timeout) + wait = float(wait) + except ValueError: + raise exceptions.SDKException( + f"Wait value must be an int or float value. {wait!r} given instead" + ) + + start = time.time() + count = 0 + while (timeout is None) or (time.time() < start + timeout): + count += 1 + yield count + log.debug('Waiting %s seconds', wait) + time.sleep(wait) + raise exceptions.ResourceTimeout(message) + + +class _AccessSaver: + __slots__ = ('keys',) + + def __init__(self) -> None: + self.keys: list[str] = [] + + def __getitem__(self, key: str) -> None: + self.keys.append(key) + + +def get_string_format_keys( + fmt_string: str, old_style: bool = True +) -> list[str]: + """Gets a list of required keys from a format string + + Required mostly for parsing base_path urls for required keys, which + use the old style string formatting. + """ + if old_style: + a = _AccessSaver() + fmt_string % a + + return a.keys + else: + keys = [] + for t in string.Formatter().parse(fmt_string): + if t[1] is not None: + keys.append(t[1]) + return keys + + +def supports_version( + adapter: ks_adapter.Adapter, + version: str, + raise_exception: bool = False, +) -> bool: + """Determine if the given adapter supports the given version. + + Checks the version asserted by the service and ensures this matches the + provided version. ``version`` can be a major version or a major-minor + version + + :param adapter: :class:`~keystoneauth1.adapter.Adapter` instance. + :param version: String containing the desired version. + :param raise_exception: Raise exception when requested version + is not supported by the server. + :returns: ``True`` if the service supports the version, else ``False``. + :raises: :class:`~openstack.exceptions.SDKException` when + ``raise_exception`` is ``True`` and requested version is not supported. + """ + + def _supports_version() -> bool: + required = discover.normalize_version_number(version) + major_version = adapter.get_api_major_version() + + if not major_version: + return False + + if not discover.version_match(required, major_version): + return False + + return True + + supported = _supports_version() + + if not supported and raise_exception: + raise exceptions.SDKException( + f'Required version {version} is not supported by the server' + ) + + return supported + + +@ty.overload +def ensure_service_version( + proxy: '_identity_v2.Proxy | _identity_v3.Proxy', + version: ty.Literal['2'], +) -> '_identity_v2.Proxy': ... + + +@ty.overload +def ensure_service_version( + proxy: '_identity_v2.Proxy | _identity_v3.Proxy', + version: ty.Literal['3'], +) -> '_identity_v3.Proxy': ... + + +@ty.overload +def ensure_service_version( + proxy: '_block_storage_v2.Proxy | _block_storage_v3.Proxy', + version: ty.Literal['2'], +) -> '_block_storage_v2.Proxy': ... + + +@ty.overload +def ensure_service_version( + proxy: '_block_storage_v2.Proxy | _block_storage_v3.Proxy', + version: ty.Literal['3'], +) -> '_block_storage_v3.Proxy': ... + + +@ty.overload +def ensure_service_version( + proxy: '_image_v1.Proxy | _image_v2.Proxy', + version: ty.Literal['1'], +) -> '_image_v1.Proxy': ... + + +@ty.overload +def ensure_service_version( + proxy: '_image_v1.Proxy | _image_v2.Proxy', + version: ty.Literal['2'], +) -> '_image_v2.Proxy': ... + + +@ty.overload +def ensure_service_version(proxy: _ProxyT, version: str) -> _ProxyT: ... + + +def ensure_service_version(proxy: ty.Any, version: str) -> ty.Any: + """Ensure the provided proxy is for a given version. + + This is intended for type narrowing. + + :param proxy: A versioned service proxy. + :param version: The required API version string. + :returns: The proxy, typed as the specific version requested. + :raises: :class:`~openstack.exceptions.SDKException` if the proxy is not + the requested version. + """ + if proxy.api_version != version: + raise exceptions.SDKException( + f"Service requires API version {version!r} but the configured " + f"version is {proxy.api_version!r}" + ) + return proxy + + +def supports_microversion( + adapter: ks_adapter.Adapter, + microversion: str | int | float | ty.Iterable[str | int | float], + raise_exception: bool = False, +) -> bool: + """Determine if the given adapter supports the given microversion. + + Checks the min and max microversion asserted by the service and ensures + ``min <= microversion <= max``. If set, the current default microversion is + taken into consideration to ensure ``microversion <= default``. + + :param adapter: :class:`~keystoneauth1.adapter.Adapter` instance. + :param microversion: String containing the desired microversion. + :param raise_exception: Raise exception when requested microversion + is not supported by the server or is higher than the current default + microversion. + :returns: True if the service supports the microversion, else False. + :raises: :class:`~openstack.exceptions.SDKException` when + ``raise_exception`` is ``True`` and requested microversion is not + supported. + """ + endpoint_data = adapter.get_endpoint_data() + if endpoint_data is None: + if raise_exception: + raise exceptions.SDKException('Could not retrieve endpoint data') + return False + + if ( + endpoint_data.min_microversion + and endpoint_data.max_microversion + and discover.version_between( + endpoint_data.min_microversion, + endpoint_data.max_microversion, + microversion, + ) + ): + if adapter.default_microversion is not None: + # If default_microversion is set - evaluate + # whether it match the expectation + candidate = discover.normalize_version_number( + adapter.default_microversion + ) + required = discover.normalize_version_number(microversion) + supports = discover.version_match(required, candidate) + if raise_exception and not supports: + raise exceptions.SDKException( + f'Required microversion {microversion} is higher than ' + f'currently selected {adapter.default_microversion}' + ) + return supports + + return True + + if raise_exception: + raise exceptions.SDKException( + f'Required microversion {microversion} is not supported ' + f'by the server side' + ) + + return False + + +def require_microversion(adapter: ks_adapter.Adapter, required: str) -> None: + """Require microversion. + + :param adapter: :class:`~keystoneauth1.adapter.Adapter` instance. + :param str microversion: String containing the desired microversion. + :raises: :class:`~openstack.exceptions.SDKException` when requested + microversion is not supported + """ + supports_microversion(adapter, required, raise_exception=True) + + +def pick_microversion( + session: ks_adapter.Adapter, required: str +) -> str | None: + """Get a new microversion if it is higher than session's default. + + :param session: The session to use for making this request. + :param required: Minimum version that is required for an action. + :return: ``required`` as a string if the ``session``'s default is too low, + otherwise the ``session``'s default. Returns ``None`` if both + are ``None``. + :raises: TypeError if ``required`` is invalid. + :raises: :class:`~openstack.exceptions.SDKException` if requested + microversion is not supported. + """ + required_normalized = None + if required is not None: + required_normalized = discover.normalize_version_number(required) + + if session.default_microversion is not None: + default = discover.normalize_version_number( + session.default_microversion + ) + + if required_normalized is None: + required_normalized = default + else: + required_normalized = ( + default + if discover.version_match(required_normalized, default) + else required_normalized + ) + + if required_normalized is None: + return None + + if not supports_microversion(session, required_normalized): + raise exceptions.SDKException( + 'Requested microversion is not supported by the server side ' + 'or the default microversion is too low' + ) + return discover.version_to_string(required_normalized) + + +def maximum_supported_microversion( + adapter: ks_adapter.Adapter, + client_maximum: str | None, +) -> str | None: + """Determine the maximum microversion supported by both client and server. + + :param adapter: :class:`~keystoneauth1.adapter.Adapter` instance. + :param client_maximum: Maximum microversion supported by the client. + If ``None``, ``None`` is returned. + + :returns: the maximum supported microversion as string or ``None``. + """ + if client_maximum is None: + return None + + # NOTE(dtantsur): if we cannot determine supported microversions, fall back + # to the default one. + try: + endpoint_data = adapter.get_endpoint_data() + except keystoneauth1.exceptions.discovery.DiscoveryFailure: + endpoint_data = None + + if endpoint_data is None: + log = _log.setup_logging('openstack') + log.warning( + 'Cannot determine endpoint data for service %s', + adapter.service_type or adapter.service_name, + ) + return None + + if not endpoint_data.max_microversion: + return None + + client_max = discover.normalize_version_number(client_maximum) + server_max = discover.normalize_version_number( + endpoint_data.max_microversion + ) + + if endpoint_data.min_microversion: + server_min = discover.normalize_version_number( + endpoint_data.min_microversion + ) + if client_max < server_min: + # NOTE(dtantsur): we may want to raise in this case, but this keeps + # the current behavior intact. + return None + + result = min(client_max, server_max) + return discover.version_to_string(result) + + +def _hashes_up_to_date( + md5: str | None, + sha256: str | None, + md5_key: str, + sha256_key: str, +) -> bool: + """Compare md5 and sha256 hashes for being up to date + + md5 and sha256 are the current values. + md5_key and sha256_key are the previous values. + """ + up_to_date = False + if md5 and md5_key == md5: + up_to_date = True + if sha256 and sha256_key == sha256: + up_to_date = True + if md5 and md5_key != md5: + up_to_date = False + if sha256 and sha256_key != sha256: + up_to_date = False + return up_to_date + + +def _calculate_data_hashes( + data: io.BufferedReader | bytes, +) -> tuple[str, str]: + _md5 = hashlib.md5(usedforsecurity=False) + _sha256 = hashlib.sha256() + + if isinstance(data, io.BufferedIOBase): + for chunk in iter(lambda: data.read(8192), b''): + _md5.update(chunk) + _sha256.update(chunk) + elif isinstance(data, bytes): + _md5.update(data) + _sha256.update(data) + else: + raise TypeError( + 'unsupported type for data; expected IO stream or bytes; got ' + '{type(data)}' + ) + + return _md5.hexdigest(), _sha256.hexdigest() + + +def _get_file_hashes(filename: str) -> tuple[str, str]: + _md5, _sha256 = (None, None) + with open(filename, 'rb') as file_obj: + _md5, _sha256 = _calculate_data_hashes(file_obj) + + return _md5, _sha256 + + +class TinyDAG: + """Tiny DAG + + Bases on the Kahn's algorithm, and enables parallel visiting of the nodes + (parallel execution of the workflow items). + """ + + def __init__(self) -> None: + self._reset() + self._lock = threading.Lock() + + def _reset(self) -> None: + self._graph: dict[str, set[str]] = {} + self._wait_timeout = 120 + + @property + def graph(self) -> dict[str, set[str]]: + """Get graph as adjacency dict""" + return self._graph + + def add_node(self, node: str) -> None: + self._graph.setdefault(node, set()) + + def add_edge(self, u: str, v: str) -> None: + self._graph[u].add(v) + + def walk(self, timeout: int | None = None) -> 'TinyDAG': + """Start the walking from the beginning.""" + if timeout: + self._wait_timeout = timeout + return self + + def __iter__(self) -> 'TinyDAG': + self._start_traverse() + return self + + def __next__(self) -> str: + # Start waiting if it is expected to get something + # (counting down from graph length to 0). + if self._it_cnt > 0: + self._it_cnt -= 1 + try: + res = self._queue.get(block=True, timeout=self._wait_timeout) + return res + + except queue.Empty: + raise exceptions.SDKException( + 'Timeout waiting for cleanup task to complete' + ) + else: + raise StopIteration + + def node_done(self, node: str) -> None: + """Mark node as "processed" and put following items into the queue""" + self._done.add(node) + + for v in self._graph[node]: + self._run_in_degree[v] -= 1 + if self._run_in_degree[v] == 0: + self._queue.put(v) + + def _start_traverse(self) -> None: + """Initialize graph traversing""" + self._run_in_degree = self._get_in_degree() + self._queue: queue.Queue[str] = queue.Queue() + self._done: set[str] = set() + self._it_cnt = len(self._graph) + + for k, v in self._run_in_degree.items(): + if v == 0: + self._queue.put(k) + + def _get_in_degree(self) -> dict[str, int]: + """Calculate the in_degree (count incoming) for nodes""" + _in_degree: dict[str, int] = {u: 0 for u in self._graph.keys()} + for u in self._graph: + for v in self._graph[u]: + _in_degree[v] += 1 + + return _in_degree + + def topological_sort(self) -> list[str]: + """Return the graph nodes in the topological order""" + result = [] + for node in self: + result.append(node) + self.node_done(node) + + return result + + def size(self) -> int: + return len(self._graph.keys()) + + def is_complete(self) -> bool: + return len(self._done) == self.size() + + +# Importing Munch is a relatively expensive operation (0.3s) while we do not +# really even need much of it. Before we can rework all places where we rely on +# it we can have a reduced version. +class Munch(dict[str, ty.Any]): + """A slightly stripped version of munch.Munch class""" + + def __init__(self, *args: ty.Any, **kwargs: ty.Any): + self.update(*args, **kwargs) + + # only called if k not found in normal places + def __getattr__(self, k: str) -> ty.Any: + """Gets key if it exists, otherwise throws AttributeError.""" + try: + return object.__getattribute__(self, k) + except AttributeError: + try: + return self[k] + except KeyError: + raise AttributeError(k) + + def __setattr__(self, k: str, v: ty.Any) -> None: + """Sets attribute k if it exists, otherwise sets key k. A KeyError + raised by set-item (only likely if you subclass Munch) will + propagate as an AttributeError instead. + """ + try: + # Throws exception if not in prototype chain + object.__getattribute__(self, k) + except AttributeError: + try: + self[k] = v + except Exception: + raise AttributeError(k) + else: + object.__setattr__(self, k, v) + + def __delattr__(self, k: str) -> None: + """Deletes attribute k if it exists, otherwise deletes key k. + + A KeyError raised by deleting the key - such as when the key is missing + - will propagate as an AttributeError instead. + """ + try: + # Throws exception if not in prototype chain + object.__getattribute__(self, k) + except AttributeError: + try: + del self[k] + except KeyError: + raise AttributeError(k) + else: + object.__delattr__(self, k) + + def toDict(self) -> dict[str, ty.Any]: + """Recursively converts a munch back into a dictionary.""" + return unmunchify(self) + + @property + def __dict__(self) -> dict[str, ty.Any]: # type: ignore[override] + return self.toDict() + + def __repr__(self) -> str: + """Invertible* string-form of a Munch.""" + return f'{self.__class__.__name__}({dict.__repr__(self)})' + + def __dir__(self) -> list[str]: + return list(self.keys()) + + def __getstate__(self) -> dict[str, ty.Any]: + """Implement a serializable interface used for pickling. + See https://docs.python.org/3.6/library/pickle.html. + """ + return {k: v for k, v in self.items()} + + def __setstate__(self, state: dict[str, ty.Any]) -> None: + """Implement a serializable interface used for pickling. + See https://docs.python.org/3.6/library/pickle.html. + """ + self.clear() + self.update(state) + + # TODO(stephenfin): This needs to be stricter in the types that it will + # accept. By limiting it to the primitive types (or subclasses of same) we + # should cover everything we (sdk) care about and will be able to type the + # results. + @classmethod + def fromDict(cls, d: dict[str, ty.Any]) -> 'Munch': + """Recursively transforms a dictionary into a Munch via copy.""" + # Munchify x, using `seen` to track object cycles + seen: dict[int, ty.Any] = dict() + + def munchify_cycles(obj: ty.Any) -> ty.Any: + try: + return seen[id(obj)] + except KeyError: + pass + + seen[id(obj)] = partial = pre_munchify(obj) + return post_munchify(partial, obj) + + def pre_munchify(obj: ty.Any) -> ty.Any: + if isinstance(obj, collections.abc.Mapping): + return cls({}) + elif isinstance(obj, list): + return type(obj)() + elif isinstance(obj, tuple): + type_factory = getattr(obj, "_make", type(obj)) + return type_factory(munchify_cycles(item) for item in obj) + else: + return obj + + def post_munchify(partial: ty.Any, obj: ty.Any) -> ty.Any: + if isinstance(obj, collections.abc.Mapping): + partial.update( + (k, munchify_cycles(obj[k])) for k in obj.keys() + ) + elif isinstance(obj, list): + partial.extend(munchify_cycles(item) for item in obj) + elif isinstance(obj, tuple): + for item_partial, item in zip(partial, obj): + post_munchify(item_partial, item) + + return partial + + return ty.cast('Munch', munchify_cycles(d)) + + def copy(self) -> 'Munch': + return self.fromDict(self) + + def update(self, *args: ty.Any, **kwargs: ty.Any) -> None: + """ + Override built-in method to call custom __setitem__ method that may + be defined in subclasses. + """ + for k, v in dict(*args, **kwargs).items(): + self[k] = v + + def get(self, k: str, d: ty.Any = None) -> ty.Any: + """ + D.get(k[,d]) -> D[k] if k in D, else d. d defaults to None. + """ + if k not in self: + return d + return self[k] + + def setdefault(self, k: str, d: ty.Any = None) -> ty.Any: + """ + D.setdefault(k[,d]) -> D.get(k,d), also set D[k]=d if k not in D + """ + if k not in self: + self[k] = d + return self[k] + + +def munchify(x: dict[str, ty.Any], factory: type[Munch] = Munch) -> Munch: + """Recursively transforms a dictionary into a Munch via copy.""" + return Munch.fromDict(x) + + +def unmunchify(x: Munch) -> dict[str, ty.Any]: + """Recursively converts a Munch into a dictionary.""" + + # Munchify x, using `seen` to track object cycles + seen: dict[int, ty.Any] = dict() + + def unmunchify_cycles(obj: ty.Any) -> ty.Any: + try: + return seen[id(obj)] + except KeyError: + pass + + seen[id(obj)] = partial = pre_unmunchify(obj) + return post_unmunchify(partial, obj) + + def pre_unmunchify(obj: ty.Any) -> ty.Any: + if isinstance(obj, collections.abc.Mapping): + return dict() + elif isinstance(obj, list): + return type(obj)() + elif isinstance(obj, tuple): + type_factory = getattr(obj, "_make", type(obj)) + return type_factory(unmunchify_cycles(item) for item in obj) + else: + return obj + + def post_unmunchify(partial: ty.Any, obj: ty.Any) -> ty.Any: + if isinstance(obj, collections.abc.Mapping): + partial.update((k, unmunchify_cycles(obj[k])) for k in obj.keys()) + elif isinstance(obj, list): + partial.extend(unmunchify_cycles(v) for v in obj) + elif isinstance(obj, tuple): + for value_partial, value in zip(partial, obj): + post_unmunchify(value_partial, value) + + return partial + + return ty.cast(dict[str, ty.Any], unmunchify_cycles(x)) diff --git a/openstack/warnings.py b/openstack/warnings.py new file mode 100644 index 0000000000..885bd4e527 --- /dev/null +++ b/openstack/warnings.py @@ -0,0 +1,74 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +# API deprecation warnings +# +# These are for service-related deprecations, such as the removal of an API or +# API field due to a microversion. + + +class OpenStackDeprecationWarning(DeprecationWarning): + """Base class for warnings about deprecated features in openstacksdk.""" + + +class RemovedResourceWarning(OpenStackDeprecationWarning): + """Indicates that a resource has been removed in newer API versions and + should not be used. + """ + + +class RemovedFieldWarning(OpenStackDeprecationWarning): + """Indicates that a field has been removed in newer API versions and should + not be used. + """ + + +class LegacyAPIWarning(OpenStackDeprecationWarning): + """Indicates an API that is in 'legacy' status, a long term deprecation.""" + + +# Package deprecation warnings +# +# These are for SDK-specific deprecations, such as removed functions or +# function parameters. + + +class _RemovedInSDKWarning(PendingDeprecationWarning): + """Indicates an argument that is deprecated for removal. + + This is a base class and should not be used directly. + """ + + +class RemovedInSDK50Warning(_RemovedInSDKWarning): + """Indicates an argument that is deprecated for removal in SDK 5.0.""" + + +class RemovedInSDK60Warning(_RemovedInSDKWarning): + """Indicates an argument that is deprecated for removal in SDK 6.0.""" + + +# General warnings +# +# These are usually related to misconfigurations. + + +class OpenStackWarning(Warning): + """Base class for general warnings in openstacksdk.""" + + +class ConfigurationWarning(OpenStackWarning): + """Indicates an issue with configuration.""" + + +class UnsupportedServiceVersion(OpenStackWarning): + """Indicates a major version that SDK doesn't understand.""" diff --git a/openstack/workflow/__init__.py b/openstack/workflow/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/workflow/v2/__init__.py b/openstack/workflow/v2/__init__.py new file mode 100644 index 0000000000..e69de29bb2 diff --git a/openstack/workflow/v2/_proxy.py b/openstack/workflow/v2/_proxy.py new file mode 100644 index 0000000000..43670c8e9c --- /dev/null +++ b/openstack/workflow/v2/_proxy.py @@ -0,0 +1,361 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import typing as ty + +from openstack import proxy +from openstack import resource +from openstack.workflow.v2 import cron_trigger as _cron_trigger +from openstack.workflow.v2 import execution as _execution +from openstack.workflow.v2 import workflow as _workflow + + +class Proxy(proxy.Proxy): + api_version: ty.ClassVar[ty.Literal['2']] = '2' + + _resource_registry = { + "execution": _execution.Execution, + "workflow": _workflow.Workflow, + } + + def create_workflow(self, **attrs): + """Create a new workflow from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.workflow.v2.workflow.Workflow`, + comprised of the properties on the Workflow class. + + :returns: The results of workflow creation + :rtype: :class:`~openstack.workflow.v2.workflow.Workflow` + """ + return self._create(_workflow.Workflow, **attrs) + + def update_workflow(self, workflow, **attrs): + """Update workflow from attributes + + :param workflow: The value can be either the name of a workflow or a + :class:`~openstack.workflow.v2.workflow.Workflow` + instance. + :param dict attrs: Keyword arguments which will be used to update + a :class:`~openstack.workflow.v2.workflow.Workflow`, + comprised of the properties on the Workflow class. + + :returns: The results of workflow update + :rtype: :class:`~openstack.workflow.v2.workflow.Workflow` + """ + return self._update(_workflow.Workflow, workflow, **attrs) + + def get_workflow(self, *attrs): + """Get a workflow + + :param workflow: The value can be the name of a workflow or + :class:`~openstack.workflow.v2.workflow.Workflow` instance. + + :returns: One :class:`~openstack.workflow.v2.workflow.Workflow` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + workflow matching the name could be found. + """ + return self._get(_workflow.Workflow, *attrs) + + def workflows(self, **query): + """Retrieve a generator of workflows + + :param kwargs query: Optional query parameters to be sent to + restrict the workflows to be returned. Available parameters + include: + + * limit: Requests at most the specified number of items be + returned from the query. + * marker: Specifies the ID of the last-seen workflow. Use the + limit parameter to make an initial limited request and use + the ID of the last-seen workflow from the response as the + marker parameter value in a subsequent limited request. + + :returns: A generator of workflow instances. + """ + return self._list(_workflow.Workflow, **query) + + def delete_workflow(self, value, ignore_missing=True): + """Delete a workflow + + :param value: The value can be either the name of a workflow or a + :class:`~openstack.workflow.v2.workflow.Workflow` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will + be raised when the workflow does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent workflow. + + :returns: ``None`` + """ + return self._delete( + _workflow.Workflow, value, ignore_missing=ignore_missing + ) + + def find_workflow(self, name_or_id, ignore_missing=True): + """Find a single workflow + + :param name_or_id: The name or ID of an workflow. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :returns: One :class:`~openstack.compute.v2.workflow.Extension` or + None + """ + return self._find( + _workflow.Workflow, name_or_id, ignore_missing=ignore_missing + ) + + def create_execution(self, **attrs): + """Create a new execution from attributes + + :param workflow_name: The name of target workflow to execute. + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.workflow.v2.execution.Execution`, + comprised of the properties on the Execution class. + + :returns: The results of execution creation + :rtype: :class:`~openstack.workflow.v2.execution.Execution` + """ + return self._create(_execution.Execution, **attrs) + + def get_execution(self, *attrs): + """Get a execution + + :param workflow_name: The name of target workflow to execute. + :param execution: The value can be either the ID of a execution or a + :class:`~openstack.workflow.v2.execution.Execution` instance. + + :returns: One :class:`~openstack.workflow.v2.execution.Execution` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + execution matching the criteria could be found. + """ + return self._get(_execution.Execution, *attrs) + + def executions(self, **query): + """Retrieve a generator of executions + + :param kwargs query: Optional query parameters to be sent to + restrict the executions to be returned. Available parameters + include: + + * limit: Requests at most the specified number of items be + returned from the query. + * marker: Specifies the ID of the last-seen execution. Use the + limit parameter to make an initial limited request and use + the ID of the last-seen execution from the response as the + marker parameter value in a subsequent limited request. + + :returns: A generator of execution instances. + """ + return self._list(_execution.Execution, **query) + + def delete_execution(self, value, ignore_missing=True): + """Delete an execution + + :param value: The value can be either the name of a execution or a + :class:`~openstack.workflow.v2.execute.Execution` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the execution does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent execution. + + :returns: ``None`` + """ + return self._delete( + _execution.Execution, value, ignore_missing=ignore_missing + ) + + def find_execution(self, name_or_id, ignore_missing=True): + """Find a single execution + + :param name_or_id: The name or ID of an execution. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the resource does not exist. + When set to ``True``, None will be returned when + attempting to find a nonexistent resource. + :returns: One :class:`~openstack.compute.v2.execution.Execution` or + None + """ + return self._find( + _execution.Execution, name_or_id, ignore_missing=ignore_missing + ) + + def create_cron_trigger(self, **attrs): + """Create a new cron trigger from attributes + + :param dict attrs: Keyword arguments which will be used to create + a :class:`~openstack.workflow.v2.cron_trigger.CronTrigger`, + comprised of the properties on the CronTrigger class. + + :returns: The results of cron trigger creation + :rtype: :class:`~openstack.workflow.v2.cron_trigger.CronTrigger` + """ + return self._create(_cron_trigger.CronTrigger, **attrs) + + def get_cron_trigger(self, cron_trigger): + """Get a cron trigger + + :param cron_trigger: The value can be the name of a cron_trigger or + :class:`~openstack.workflow.v2.cron_trigger.CronTrigger` instance. + + :returns: One :class:`~openstack.workflow.v2.cron_trigger.CronTrigger` + :raises: :class:`~openstack.exceptions.NotFoundException` when no + cron triggers matching the criteria could be found. + """ + return self._get(_cron_trigger.CronTrigger, cron_trigger) + + def cron_triggers(self, *, all_projects=False, **query): + """Retrieve a generator of cron triggers + + :param bool all_projects: When set to ``True``, list cron triggers from + all projects. Admin-only by default. + :param kwargs query: Optional query parameters to be sent to + restrict the cron triggers to be returned. Available parameters + include: + + * limit: Requests at most the specified number of items be + returned from the query. + * marker: Specifies the ID of the last-seen cron trigger. Use the + limit parameter to make an initial limited request and use + the ID of the last-seen cron trigger from the response as the + marker parameter value in a subsequent limited request. + + :returns: A generator of CronTrigger instances. + """ + if all_projects: + query['all_projects'] = True + return self._list(_cron_trigger.CronTrigger, **query) + + def delete_cron_trigger(self, value, ignore_missing=True): + """Delete a cron trigger + + :param value: The value can be either the name of a cron trigger or a + :class:`~openstack.workflow.v2.cron_trigger.CronTrigger` + instance. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be + raised when the cron trigger does not exist. + When set to ``True``, no exception will be set when + attempting to delete a nonexistent cron trigger. + + :returns: ``None`` + """ + return self._delete( + _cron_trigger.CronTrigger, value, ignore_missing=ignore_missing + ) + + # TODO(stephenfin): Drop 'query' parameter or apply it consistently + def find_cron_trigger( + self, + name_or_id, + ignore_missing=True, + *, + all_projects=False, + **query, + ): + """Find a single cron trigger + + :param name_or_id: The name or ID of a cron trigger. + :param bool ignore_missing: When set to ``False`` + :class:`~openstack.exceptions.NotFoundException` will be raised + when the resource does not exist. When set to ``True``, None will + be returned when attempting to find a nonexistent resource. + :param bool all_projects: When set to ``True``, search for cron + triggers by name across all projects. Note that this will likely + result in a higher chance of duplicates. + :param kwargs query: Optional query parameters to be sent to limit + the cron triggers being returned. + + :returns: One :class:`~openstack.compute.v2.cron_trigger.CronTrigger` + or None + :raises: :class:`~openstack.exceptions.NotFoundException` when no + resource can be found. + :raises: :class:`~openstack.exceptions.DuplicateResource` when multiple + resources are found. + """ + return self._find( + _cron_trigger.CronTrigger, + name_or_id, + ignore_missing=ignore_missing, + all_projects=all_projects, + **query, + ) + + # ========== Utilities ========== + + def wait_for_status( + self, + res: resource.ResourceT, + status: str, + failures: list[str] | None = None, + interval: int | float | None = 2, + wait: int | None = None, + attribute: str = 'status', + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for the resource to be in a particular status. + + :param session: The session to use for making this request. + :param resource: The resource to wait on to reach the status. The + resource must have a status attribute specified via ``attribute``. + :param status: Desired status of the resource. + :param failures: Statuses that would indicate the transition + failed such as 'ERROR'. Defaults to ['ERROR']. + :param interval: Number of seconds to wait between checks. + :param wait: Maximum number of seconds to wait for transition. + Set to ``None`` to wait forever. + :param attribute: Name of the resource attribute that contains the + status. + :param callback: A callback function. This will be called with a single + value, progress. This is API specific but is generally a percentage + value from 0-100. + + :return: The updated resource. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if the + transition to status failed to occur in ``wait`` seconds. + :raises: :class:`~openstack.exceptions.ResourceFailure` if the resource + transitioned to one of the states in ``failures``. + :raises: :class:`~AttributeError` if the resource does not have a + ``status`` attribute + """ + return resource.wait_for_status( + self, res, status, failures, interval, wait, attribute, callback + ) + + def wait_for_delete( + self, + res: resource.ResourceT, + interval: int = 2, + wait: int = 120, + callback: ty.Callable[[int], None] | None = None, + ) -> resource.ResourceT: + """Wait for a resource to be deleted. + + :param res: The resource to wait on to be deleted. + :param interval: Number of seconds to wait before to consecutive + checks. + :param wait: Maximum number of seconds to wait before the change. + :param callback: A callback function. This will be called with a single + value, progress, which is a percentage value from 0-100. + + :returns: The resource is returned on success. + :raises: :class:`~openstack.exceptions.ResourceTimeout` if transition + to delete failed to occur in the specified seconds. + """ + return resource.wait_for_delete(self, res, interval, wait, callback) diff --git a/openstack/workflow/v2/cron_trigger.py b/openstack/workflow/v2/cron_trigger.py new file mode 100644 index 0000000000..139672d2c2 --- /dev/null +++ b/openstack/workflow/v2/cron_trigger.py @@ -0,0 +1,81 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class CronTrigger(resource.Resource): + resource_key = 'cron_trigger' + resources_key = 'cron_triggers' + base_path = '/cron_triggers' + + # capabilities + allow_create = True + allow_list = True + allow_fetch = True + allow_delete = True + + _query_mapping = resource.QueryParameters( + 'marker', + 'limit', + 'sort_keys', + 'sort_dirs', + 'fields', + 'name', + 'workflow_name', + 'workflow_id', + 'workflow_input', + 'workflow_params', + 'scope', + 'pattern', + 'remaining_executions', + 'project_id', + 'first_execution_time', + 'next_execution_time', + 'created_at', + 'updated_at', + 'all_projects', + ) + + #: The name of this Cron Trigger + name = resource.Body("name") + #: The pattern for this Cron Trigger + pattern = resource.Body("pattern") + #: Count of remaining exectuions + remaining_executions = resource.Body("remaining_executions") + #: Time of the first execution + first_execution_time = resource.Body("first_execution_time") + #: Time of the next execution + next_execution_time = resource.Body("next_execution_time") + #: Workflow name + workflow_name = resource.Body("workflow_name") + #: Workflow ID + workflow_id = resource.Body("workflow_id") + #: The inputs for Workflow + workflow_input = resource.Body("workflow_input") + #: Workflow params + workflow_params = resource.Body("workflow_params") + #: The ID of the associated project + project_id = resource.Body("project_id") + #: The time at which the cron trigger was created + created_at = resource.Body("created_at") + #: The time at which the cron trigger was created + updated_at = resource.Body("updated_at") + + def create( + self, + session, + prepend_key=False, + *args, + **kwargs, + ): + return super().create(session, prepend_key, *args, **kwargs) diff --git a/openstack/workflow/v2/execution.py b/openstack/workflow/v2/execution.py new file mode 100644 index 0000000000..2eb2730e8b --- /dev/null +++ b/openstack/workflow/v2/execution.py @@ -0,0 +1,78 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Execution(resource.Resource): + resource_key = 'execution' + resources_key = 'executions' + base_path = '/executions' + + # capabilities + allow_create = True + allow_list = True + allow_fetch = True + allow_delete = True + + _query_mapping = resource.QueryParameters( + 'marker', + 'limit', + 'sort_keys', + 'sort_dirs', + 'fields', + 'params', + 'include_output', + ) + + #: The name of the workflow + workflow_name = resource.Body("workflow_name") + #: The ID of the workflow + workflow_id = resource.Body("workflow_id") + #: A description of the workflow execution + description = resource.Body("description") + #: A reference to the parent task execution + task_execution_id = resource.Body("task_execution_id") + #: Status can be one of: IDLE, RUNNING, SUCCESS, ERROR, or PAUSED + status = resource.Body("state") + #: An optional information string about the status + status_info = resource.Body("state_info") + #: A JSON structure containing workflow input values + # TODO(briancurtin): type=dict + input = resource.Body("input") + #: An optional JSON structure containing workflow type specific parameters + params = resource.Body("params") + #: The output of the workflow + output = resource.Body("output") + #: The time at which the Execution was created + created_at = resource.Body("created_at") + #: The time at which the Execution was updated + updated_at = resource.Body("updated_at") + + def create( + self, + session, + prepend_key=True, + base_path=None, + **kwargs, + ): + request = self._prepare_request( + requires_id=False, prepend_key=prepend_key, base_path=base_path + ) + + request_body = request.body["execution"] + response = session.post( + request.url, json=request_body, headers=request.headers + ) + + self._translate_response(response, has_body=True) + return self diff --git a/openstack/workflow/v2/workflow.py b/openstack/workflow/v2/workflow.py new file mode 100644 index 0000000000..8b6df87b0d --- /dev/null +++ b/openstack/workflow/v2/workflow.py @@ -0,0 +1,97 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import resource + + +class Workflow(resource.Resource): + resource_key = 'workflow' + resources_key = 'workflows' + base_path = '/workflows' + + # capabilities + allow_create = True + allow_commit = True + allow_list = True + allow_fetch = True + allow_delete = True + + _query_mapping = resource.QueryParameters( + 'marker', 'limit', 'sort_keys', 'sort_dirs', 'fields' + ) + + #: The name of this Workflow + name = resource.Body("name") + #: The inputs for this Workflow + input = resource.Body("input") + #: A Workflow definition using the Mistral v2 DSL + definition = resource.Body("definition") + #: A list of values associated with a workflow that users can use + #: to group workflows by some criteria + # TODO(briancurtin): type=list + tags = resource.Body("tags") + #: Can be either "private" or "public" + scope = resource.Body("scope") + #: The ID of the associated project + project_id = resource.Body("project_id") + #: The time at which the workflow was created + created_at = resource.Body("created_at") + #: The time at which the workflow was created + updated_at = resource.Body("updated_at") + + def _request_kwargs(self, prepend_key=True, base_path=None): + request = self._prepare_request( + requires_id=False, prepend_key=prepend_key, base_path=base_path + ) + + headers = {"Content-Type": 'text/plain'} + kwargs = { + "data": self.definition, + } + + scope = f"?scope={self.scope}" + uri = request.url + scope + + request.headers.update(headers) + return dict(url=uri, json=None, headers=request.headers, **kwargs) + + def create( + self, + session, + prepend_key=True, + base_path=None, + **kwargs, + ): + kwargs = self._request_kwargs( + prepend_key=prepend_key, base_path=base_path + ) + response = session.post(**kwargs) + self._translate_response(response, has_body=False) + return self + + def commit( + self, + session, + prepend_key=True, + has_body=True, + retry_on_conflict=None, + base_path=None, + *, + microversion=None, + **kwargs, + ): + kwargs = self._request_kwargs( + prepend_key=prepend_key, base_path=base_path + ) + response = session.put(**kwargs) + self._translate_response(response, has_body=False) + return self diff --git a/openstack/workflow/version.py b/openstack/workflow/version.py new file mode 100644 index 0000000000..692230a198 --- /dev/null +++ b/openstack/workflow/version.py @@ -0,0 +1,27 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + + +from openstack import resource + + +class Version(resource.Resource): + resource_key = 'version' + resources_key = 'versions' + base_path = '/' + + # capabilities + allow_list = True + + # Properties + links = resource.Body('links') + status = resource.Body('status') diff --git a/openstack/workflow/workflow_service.py b/openstack/workflow/workflow_service.py new file mode 100644 index 0000000000..98ece44021 --- /dev/null +++ b/openstack/workflow/workflow_service.py @@ -0,0 +1,24 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +from openstack import service_description +from openstack.workflow.v2 import _proxy + + +class WorkflowService( + service_description.ServiceDescription[_proxy.Proxy], +): + """The workflow service.""" + + supported_versions = { + '2': _proxy.Proxy, + } diff --git a/playbooks/acceptance/post.yaml b/playbooks/acceptance/post.yaml new file mode 100644 index 0000000000..32d0f80ade --- /dev/null +++ b/playbooks/acceptance/post.yaml @@ -0,0 +1,42 @@ +--- +# This could be running on localhost only, but then the devstack job would need +# to perform API call on the worker node. To keep the code a bit less crazy +# rather address all hosts and perform certain steps on the localhost (zuul +# executor). +- hosts: all + tasks: + # TODO: + # - clean the resources, which might have been created + + # Token is saved on the zuul executor node + - name: Check token file + delegate_to: localhost + ansible.builtin.stat: + path: "{{ zuul.executor.work_root }}/.{{ zuul.build }}" + register: token_file + + # no_log is important since content WILL in logs + - name: Read the token from file + delegate_to: localhost + no_log: true + ansible.builtin.slurp: + src: "{{ token_file.stat.path }}" + register: token_data + when: "token_file.stat.exists" + + - name: Delete data file + delegate_to: localhost + command: "shred {{ token_file.stat.path }}" + when: "token_file.stat.exists" + + # no_log is important since content WILL appear in logs + - name: Revoke token + no_log: true + ansible.builtin.uri: + url: "{{ openstack_credentials.auth.auth_url | default(auth_url) }}/v3/auth/tokens" + method: "DELETE" + headers: + X-Auth-Token: "{{ token_data['content'] | b64decode }}" + X-Subject-Token: "{{ token_data['content'] | b64decode }}" + status_code: 204 + when: "token_file.stat.exists and 'content' in token_data" diff --git a/playbooks/acceptance/pre.yaml b/playbooks/acceptance/pre.yaml new file mode 100644 index 0000000000..091c9a32e1 --- /dev/null +++ b/playbooks/acceptance/pre.yaml @@ -0,0 +1,69 @@ +--- +- hosts: all + tasks: + - name: Get temporary token for the cloud + # nolog is important since content WILL appear in logs + no_log: true + ansible.builtin.uri: + url: "{{ openstack_credentials.auth.auth_url | default(auth_url) }}/v3/auth/tokens" + method: "POST" + body_format: "json" + body: + auth: + identity: + methods: ["password"] + password: + user: + name: "{{ openstack_credentials.auth.username | default(omit) }}" + id: "{{ openstack_credentials.auth.user_id | default(omit) }}" + password: "{{ openstack_credentials.auth.password }}" + domain: + name: "{{ openstack_credentials.auth.user_domain_name | default(omit) }}" + id: "{{ openstack_credentials.auth.user_domain_id | default(omit) }}" + scope: + project: + name: "{{ openstack_credentials.auth.project_name | default(omit) }}" + id: "{{ openstack_credentials.auth.project_id | default(omit) }}" + domain: + name: "{{ openstack_credentials.auth.project_domain_name | default(omit) }}" + id: "{{ openstack_credentials.auth.project_domain_id | default(omit) }}" + return_content: true + status_code: 201 + register: os_auth + + - name: Verify token + # nolog is important since content WILL appear in logs + no_log: true + ansible.builtin.uri: + url: "{{ openstack_credentials.auth.auth_url | default(auth_url) }}/v3/auth/tokens" + method: "GET" + headers: + X-Auth-Token: "{{ os_auth.x_subject_token }}" + X-Subject-Token: "{{ os_auth.x_subject_token }}" + + - name: Include deploy-clouds-config role + include_role: + name: deploy-clouds-config + vars: + cloud_config: + clouds: + acceptance: + profile: "{{ openstack_credentials.profile | default('') }}" + auth_type: "token" + auth: + auth_url: "{{ openstack_credentials.auth.auth_url | default(auth_url) }}" + project_name: "{{ openstack_credentials.auth.project_name | default('') }}" + project_domain_id: "{{ openstack_credentials.auth.project_domain_id | default('') }}" + project_domain_name: "{{ openstack_credentials.auth.project_domain_name | default('') }}" + token: "{{ os_auth.x_subject_token }}" + region_name: "{{ openstack_credentials.region_name | default('') }}" + verify: "{{ openstack_credentials.verify | default(true) }}" + + # Intruders might want to corrupt clouds.yaml to avoid revoking token in the post phase + # To prevent this we save token on the executor for later use. + - name: Save the token + delegate_to: localhost + copy: + dest: "{{ zuul.executor.work_root }}/.{{ zuul.build }}" + content: "{{ os_auth.x_subject_token }}" + mode: "0640" diff --git a/playbooks/acceptance/run-with-devstack.yaml b/playbooks/acceptance/run-with-devstack.yaml new file mode 100644 index 0000000000..26dcdf68ee --- /dev/null +++ b/playbooks/acceptance/run-with-devstack.yaml @@ -0,0 +1,19 @@ +--- +# Need to actually start devstack first +- hosts: all + roles: + - run-devstack + +- name: Get the token + ansible.builtin.import_playbook: pre.yaml + +# Run the rest +- hosts: all + roles: + - role: bindep + bindep_profile: test + bindep_dir: "{{ zuul_work_dir }}" + - test-setup + - ensure-tox + - get-devstack-os-environment + - tox diff --git a/playbooks/devstack/legacy-git.yaml b/playbooks/devstack/legacy-git.yaml new file mode 100644 index 0000000000..96ba6d5506 --- /dev/null +++ b/playbooks/devstack/legacy-git.yaml @@ -0,0 +1,11 @@ +- hosts: all + tasks: + + - name: Set openstacksdk libraries to master branch before functional tests + command: git checkout master + args: + chdir: "src/opendev.org/{{ item }}" + with_items: + - openstack/shade + - openstack/keystoneauth + - openstack/os-client-config diff --git a/playbooks/devstack/post.yaml b/playbooks/devstack/post.yaml new file mode 100644 index 0000000000..c2ebc50749 --- /dev/null +++ b/playbooks/devstack/post.yaml @@ -0,0 +1,9 @@ +- hosts: all + tasks: + - include_role: + name: fetch-tox-output + - include_role: + name: fetch-subunit-output + when: fetch_subunit|default(true)|bool + - include_role: + name: process-stackviz diff --git a/post_test_hook.sh b/post_test_hook.sh index b40b0d98ab..13421d4239 100755 --- a/post_test_hook.sh +++ b/post_test_hook.sh @@ -8,15 +8,17 @@ DIR=$(cd $(dirname "$0") && pwd) echo "Running SDK functional test suite" sudo -H -u stack -i <=6.1.1"] +build-backend = "pbr.build" + +[project] +name = "openstacksdk" +description = "An SDK for building applications to work with OpenStack" +authors = [ + {name = "OpenStack", email = "openstack-discuss@lists.openstack.org"}, +] +readme = {file = "README.rst", content-type = "text/x-rst"} +license = {text = "Apache-2.0"} +dynamic = ["version", "dependencies"] +requires-python = ">=3.10" +classifiers = [ + "Environment :: OpenStack", + "Intended Audience :: Information Technology", + "Intended Audience :: System Administrators", + "License :: OSI Approved :: Apache Software License", + "Operating System :: POSIX :: Linux", + "Programming Language :: Python", + "Programming Language :: Python :: 3", + "Programming Language :: Python :: 3.10", + "Programming Language :: Python :: 3.11", + "Programming Language :: Python :: 3.12", + "Programming Language :: Python :: 3.13", +] + +[project.urls] +Homepage = "https://docs.openstack.org/openstacksdk" +Repository = "https://opendev.org/openstack/openstacksdk/" + +[project.scripts] +# TODO(mordred) Move this to an OSC command at some point +openstack-inventory = "openstack.cloud.cmd.inventory:main" + +[tool.setuptools] +packages = [ + "openstack" +] + +[tool.mypy] +python_version = "3.10" +show_column_numbers = true +show_error_context = true +follow_imports = "normal" +check_untyped_defs = true +warn_unused_ignores = true +# many of the following are false while we incrementally add typing +warn_return_any = false +warn_unused_configs = true +warn_redundant_casts = true +strict_equality = true +disallow_untyped_decorators = false +disallow_any_generics = false +disallow_subclassing_any = false +disallow_untyped_calls = false +disallow_incomplete_defs = true +disallow_untyped_defs = false +no_implicit_reexport = true +extra_checks = true +disable_error_code = ["import-untyped"] +exclude = "(?x)(doc | examples | releasenotes)" + +[[tool.mypy.overrides]] +module = [ + "openstack._log", + "openstack.common", + "openstack.common.*", + "openstack.config", + "openstack.config.*", + "openstack.connection", + "openstack.exceptions", + "openstack.fields", + "openstack.format", + "openstack.proxy", + "openstack.utils", + "openstack.version", + "openstack.warnings", +] +warn_return_any = true +disallow_untyped_decorators = true +disallow_any_generics = true +disallow_subclassing_any = true +disallow_untyped_calls = true +disallow_untyped_defs = true +no_implicit_reexport = true + +[[tool.mypy.overrides]] +module = ["openstack.tests.unit.*"] +ignore_errors = true + +[tool.ruff] +line-length = 79 + +[tool.ruff.format] +quote-style = "preserve" +docstring-code-format = true + +[tool.ruff.lint] +select = ["E4", "E5", "E7", "E9", "F", "RUF", "S", "UP", "W"] +ignore = [ + # there are a lot of these to fix + "RUF012", + # we only use asserts for type narrowing + "S101", +] +external = ["H"] + +[tool.ruff.lint.per-file-ignores] +"openstack/tests/*" = ["S"] +"openstack/_services_mixin.py" = ["E501"] +"examples/*" = ["S"] diff --git a/releasenotes/notes/Add-trusted-vif-to-the-port-e306789f92e181b2.yaml b/releasenotes/notes/Add-trusted-vif-to-the-port-e306789f92e181b2.yaml new file mode 100644 index 0000000000..e4cfe953f2 --- /dev/null +++ b/releasenotes/notes/Add-trusted-vif-to-the-port-e306789f92e181b2.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add ``trusted`` attribute to ``port`` resourse. Users can use this + attribute to set port to be trusted what will be then populated into + the ``binding:profile`` dictionary. diff --git a/releasenotes/notes/add-accelerator-attributes-support-492cae3594272818.yaml b/releasenotes/notes/add-accelerator-attributes-support-492cae3594272818.yaml new file mode 100644 index 0000000000..c0f9ec50da --- /dev/null +++ b/releasenotes/notes/add-accelerator-attributes-support-492cae3594272818.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Support for the attributes API of the Accelerator service (Cyborg) has + been added. diff --git a/releasenotes/notes/add-aggregates-fc563e237755112e.yaml b/releasenotes/notes/add-aggregates-fc563e237755112e.yaml new file mode 100644 index 0000000000..81733146bb --- /dev/null +++ b/releasenotes/notes/add-aggregates-fc563e237755112e.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Basic CRUD functionality was added on Host Aggregates. Actions are not + implemented yet (adding/removing hosts from Host Aggregates). diff --git a/releasenotes/notes/add-application-credentials-abab9106dea10c11.yaml b/releasenotes/notes/add-application-credentials-abab9106dea10c11.yaml new file mode 100644 index 0000000000..9f48183038 --- /dev/null +++ b/releasenotes/notes/add-application-credentials-abab9106dea10c11.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added CRUD support for `application credentials + `_. diff --git a/releasenotes/notes/add-az-to-loadbalancer-da9bf1baaedc89a4.yaml b/releasenotes/notes/add-az-to-loadbalancer-da9bf1baaedc89a4.yaml new file mode 100644 index 0000000000..321d4a0f1e --- /dev/null +++ b/releasenotes/notes/add-az-to-loadbalancer-da9bf1baaedc89a4.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds Octavia (load_balancer) support for the availability zone and + availability zone profile APIs. diff --git a/releasenotes/notes/add-baremetal-port-vendor-category-a544098f87558c8c.yaml b/releasenotes/notes/add-baremetal-port-vendor-category-a544098f87558c8c.yaml new file mode 100644 index 0000000000..0b08304a79 --- /dev/null +++ b/releasenotes/notes/add-baremetal-port-vendor-category-a544098f87558c8c.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add the ``vendor`` field and ``category`` field on the baremetal ``Port`` + object as added in Ironic API 1.100 and 1.101 respectively. diff --git a/releasenotes/notes/add-block-storage-group-snapshots-954cc869227317c3.yaml b/releasenotes/notes/add-block-storage-group-snapshots-954cc869227317c3.yaml new file mode 100644 index 0000000000..eb4e0f266d --- /dev/null +++ b/releasenotes/notes/add-block-storage-group-snapshots-954cc869227317c3.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for group snapshots to the block storage service. diff --git a/releasenotes/notes/add-block-storage-group-type-group-specs-d07047167224ec83.yaml b/releasenotes/notes/add-block-storage-group-type-group-specs-d07047167224ec83.yaml new file mode 100644 index 0000000000..50ae352a07 --- /dev/null +++ b/releasenotes/notes/add-block-storage-group-type-group-specs-d07047167224ec83.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for creating, updating and deleting group type group specs for + the block storage service. diff --git a/releasenotes/notes/add-block-storage-groups-bf5f1af714c9e505.yaml b/releasenotes/notes/add-block-storage-groups-bf5f1af714c9e505.yaml new file mode 100644 index 0000000000..2f24f1812a --- /dev/null +++ b/releasenotes/notes/add-block-storage-groups-bf5f1af714c9e505.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for groups to the block storage service. diff --git a/releasenotes/notes/add-block-storage-service-support-ce03092ce2d7e7b9.yaml b/releasenotes/notes/add-block-storage-service-support-ce03092ce2d7e7b9.yaml new file mode 100644 index 0000000000..95fadb5805 --- /dev/null +++ b/releasenotes/notes/add-block-storage-service-support-ce03092ce2d7e7b9.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for block storage services. diff --git a/releasenotes/notes/add-block-storage-summary-support-dd00d424c4e6a3b1.yaml b/releasenotes/notes/add-block-storage-summary-support-dd00d424c4e6a3b1.yaml new file mode 100644 index 0000000000..4620d2e3b7 --- /dev/null +++ b/releasenotes/notes/add-block-storage-summary-support-dd00d424c4e6a3b1.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support for block storage summary. + diff --git a/releasenotes/notes/add-bulk-create-resources-12192ec9d76c7716.yaml b/releasenotes/notes/add-bulk-create-resources-12192ec9d76c7716.yaml new file mode 100644 index 0000000000..896e17bdbf --- /dev/null +++ b/releasenotes/notes/add-bulk-create-resources-12192ec9d76c7716.yaml @@ -0,0 +1,5 @@ +--- +features: + - Enabling Resource class for being able to create objects in bulk way. Add + first objects using that feature - Port, which now expose a proxy method + `create_ports` for creating multiple port objects at once. diff --git a/releasenotes/notes/add-cipher-list-support-to-octavia-b6b2b0053ca6b184.yaml b/releasenotes/notes/add-cipher-list-support-to-octavia-b6b2b0053ca6b184.yaml new file mode 100644 index 0000000000..e29879d5d4 --- /dev/null +++ b/releasenotes/notes/add-cipher-list-support-to-octavia-b6b2b0053ca6b184.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added the ``tls_ciphers`` properties to listener.py + and pool.py for storing stings of tls ciphers in + OpenSSL cipher string format. diff --git a/releasenotes/notes/add-compute-flavor-ops-12149e58299c413e.yaml b/releasenotes/notes/add-compute-flavor-ops-12149e58299c413e.yaml new file mode 100644 index 0000000000..45c2d6b9d1 --- /dev/null +++ b/releasenotes/notes/add-compute-flavor-ops-12149e58299c413e.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add additional compute flavor operations (flavor_add_tenant_access, + flavor_remove_tenant_access, get_flavor_access, extra_specs fetching/updating). diff --git a/releasenotes/notes/add-current-user-id-49b6463e6bcc3b31.yaml b/releasenotes/notes/add-current-user-id-49b6463e6bcc3b31.yaml new file mode 100644 index 0000000000..fd9a1bece7 --- /dev/null +++ b/releasenotes/notes/add-current-user-id-49b6463e6bcc3b31.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added a new property, 'current_user_id' which contains + the id of the currently authenticated user from the token. diff --git a/releasenotes/notes/add-cyborg-support-b9afca69f709c048.yaml b/releasenotes/notes/add-cyborg-support-b9afca69f709c048.yaml new file mode 100644 index 0000000000..9688a5e24c --- /dev/null +++ b/releasenotes/notes/add-cyborg-support-b9afca69f709c048.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add support for Cyborg(accelerator) diff --git a/releasenotes/notes/add-default-type-support-aaa1e54b8bd16d86.yaml b/releasenotes/notes/add-default-type-support-aaa1e54b8bd16d86.yaml new file mode 100644 index 0000000000..b2ea6adde3 --- /dev/null +++ b/releasenotes/notes/add-default-type-support-aaa1e54b8bd16d86.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + Added support for the following operations: + + * Set default volume type + * Get default volume type + * List default volume type + * Unset default volume type diff --git a/releasenotes/notes/add-dns-606cc018e01d40fa.yaml b/releasenotes/notes/add-dns-606cc018e01d40fa.yaml new file mode 100644 index 0000000000..dcaab35dcf --- /dev/null +++ b/releasenotes/notes/add-dns-606cc018e01d40fa.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds support for `dns + `_ service. diff --git a/releasenotes/notes/add-dns-domain-support-for-port-3fa4568330dda07e.yaml b/releasenotes/notes/add-dns-domain-support-for-port-3fa4568330dda07e.yaml new file mode 100644 index 0000000000..7c24608b47 --- /dev/null +++ b/releasenotes/notes/add-dns-domain-support-for-port-3fa4568330dda07e.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + ``dns_domain`` attribute support has been added to the network + port resource diff --git a/releasenotes/notes/add-dns-quota-49ae659a88eeeab9.yaml b/releasenotes/notes/add-dns-quota-49ae659a88eeeab9.yaml new file mode 100644 index 0000000000..c2be9828a6 --- /dev/null +++ b/releasenotes/notes/add-dns-quota-49ae659a88eeeab9.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add quota support for designate(DNS) API. diff --git a/releasenotes/notes/add-dns-resource-list-by-project-8b5479a045ef7373.yaml b/releasenotes/notes/add-dns-resource-list-by-project-8b5479a045ef7373.yaml new file mode 100644 index 0000000000..f79ed22c02 --- /dev/null +++ b/releasenotes/notes/add-dns-resource-list-by-project-8b5479a045ef7373.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add functionality to list DNS resources for a certain project only, + or for all projects, using the new `project_id` and `all_projects` + parameters. diff --git a/releasenotes/notes/add-dns-service-status-bf1e1cfd811e59a0.yaml b/releasenotes/notes/add-dns-service-status-bf1e1cfd811e59a0.yaml new file mode 100644 index 0000000000..b10c55a6d5 --- /dev/null +++ b/releasenotes/notes/add-dns-service-status-bf1e1cfd811e59a0.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add getting the status of one or all services in Designate (DNS) system. diff --git a/releasenotes/notes/add-dns-tld-d3cfac70f76637e3.yaml b/releasenotes/notes/add-dns-tld-d3cfac70f76637e3.yaml new file mode 100644 index 0000000000..d33e47f443 --- /dev/null +++ b/releasenotes/notes/add-dns-tld-d3cfac70f76637e3.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds support for `dns tld + `_ service. diff --git a/releasenotes/notes/add-dns-zone-share-api-374e71cac504917f.yaml b/releasenotes/notes/add-dns-zone-share-api-374e71cac504917f.yaml new file mode 100644 index 0000000000..1541c17445 --- /dev/null +++ b/releasenotes/notes/add-dns-zone-share-api-374e71cac504917f.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add Designate (DNS) support for zone shares. diff --git a/releasenotes/notes/add-extend-volume-completion-support-712217dafff8ce28.yaml b/releasenotes/notes/add-extend-volume-completion-support-712217dafff8ce28.yaml new file mode 100644 index 0000000000..665b92da18 --- /dev/null +++ b/releasenotes/notes/add-extend-volume-completion-support-712217dafff8ce28.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for the extend volume completion action. diff --git a/releasenotes/notes/add-fakes-generator-72c53d34c995fcb2.yaml b/releasenotes/notes/add-fakes-generator-72c53d34c995fcb2.yaml new file mode 100644 index 0000000000..06fa9c0398 --- /dev/null +++ b/releasenotes/notes/add-fakes-generator-72c53d34c995fcb2.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add fake resource generator to ease unit testing in packages that depend on + openstacksdk. diff --git a/releasenotes/notes/add-find-backup-find-snapshot-v2-756a05ccd150db82.yaml b/releasenotes/notes/add-find-backup-find-snapshot-v2-756a05ccd150db82.yaml new file mode 100644 index 0000000000..68789d3b82 --- /dev/null +++ b/releasenotes/notes/add-find-backup-find-snapshot-v2-756a05ccd150db82.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + The ``find_snapshot`` and ``find_backup`` methods have been added to the + v2 block storage proxy API. These were previously only available for the v3 + proxy API. diff --git a/releasenotes/notes/add-fip-portforwarding-methods-cffc14a6283cedfb.yaml b/releasenotes/notes/add-fip-portforwarding-methods-cffc14a6283cedfb.yaml new file mode 100644 index 0000000000..274e86a0e9 --- /dev/null +++ b/releasenotes/notes/add-fip-portforwarding-methods-cffc14a6283cedfb.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add floating IP Port Forwarding related methods. diff --git a/releasenotes/notes/add-identity-domain-configuration-2e8bcaa20736b379.yaml b/releasenotes/notes/add-identity-domain-configuration-2e8bcaa20736b379.yaml new file mode 100644 index 0000000000..6929c8be6f --- /dev/null +++ b/releasenotes/notes/add-identity-domain-configuration-2e8bcaa20736b379.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for creating, updating and deleting domain configurations for + the identity service. diff --git a/releasenotes/notes/add-identity-group-users-proxy-method-e37f8983b2406819.yaml b/releasenotes/notes/add-identity-group-users-proxy-method-e37f8983b2406819.yaml new file mode 100644 index 0000000000..88e67dfa67 --- /dev/null +++ b/releasenotes/notes/add-identity-group-users-proxy-method-e37f8983b2406819.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add possibility to list users in the group. diff --git a/releasenotes/notes/add-identity-role-options-property-5d99d3fd909f01eb.yaml b/releasenotes/notes/add-identity-role-options-property-5d99d3fd909f01eb.yaml new file mode 100644 index 0000000000..8b7e7eaf9f --- /dev/null +++ b/releasenotes/notes/add-identity-role-options-property-5d99d3fd909f01eb.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for `options` property for roles in identity. diff --git a/releasenotes/notes/add-identity-service-provider-support-8c97cbb157883626.yaml b/releasenotes/notes/add-identity-service-provider-support-8c97cbb157883626.yaml new file mode 100644 index 0000000000..1b50dfe326 --- /dev/null +++ b/releasenotes/notes/add-identity-service-provider-support-8c97cbb157883626.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for service providers to the identity service. diff --git a/releasenotes/notes/add-image-attributes-05b820a85cd09806.yaml b/releasenotes/notes/add-image-attributes-05b820a85cd09806.yaml new file mode 100644 index 0000000000..6507b9892f --- /dev/null +++ b/releasenotes/notes/add-image-attributes-05b820a85cd09806.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add image attributes is_hidden, hash_algo, hash_value diff --git a/releasenotes/notes/add-image-cache-support-3f8c13550a84d749.yaml b/releasenotes/notes/add-image-cache-support-3f8c13550a84d749.yaml new file mode 100644 index 0000000000..36dc0fb833 --- /dev/null +++ b/releasenotes/notes/add-image-cache-support-3f8c13550a84d749.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for glance Cache API. diff --git a/releasenotes/notes/add-image-cache-support-78477e1686c52e56.yaml b/releasenotes/notes/add-image-cache-support-78477e1686c52e56.yaml new file mode 100644 index 0000000000..36dc0fb833 --- /dev/null +++ b/releasenotes/notes/add-image-cache-support-78477e1686c52e56.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for glance Cache API. diff --git a/releasenotes/notes/add-image-metadef-namespace-support-b93557afdcf4272c.yaml b/releasenotes/notes/add-image-metadef-namespace-support-b93557afdcf4272c.yaml new file mode 100644 index 0000000000..01514fff0c --- /dev/null +++ b/releasenotes/notes/add-image-metadef-namespace-support-b93557afdcf4272c.yaml @@ -0,0 +1,4 @@ +--- +features: + -| + Adds support to query metadef namespaces from glance. diff --git a/releasenotes/notes/add-image-metadef-property-fb87e5a7090e73ac.yaml b/releasenotes/notes/add-image-metadef-property-fb87e5a7090e73ac.yaml new file mode 100644 index 0000000000..042d4c71a1 --- /dev/null +++ b/releasenotes/notes/add-image-metadef-property-fb87e5a7090e73ac.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for the ``MetadefProperty`` Image resource. diff --git a/releasenotes/notes/add-image-metadef-schema-b463825481bdf954.yaml b/releasenotes/notes/add-image-metadef-schema-b463825481bdf954.yaml new file mode 100644 index 0000000000..f029759321 --- /dev/null +++ b/releasenotes/notes/add-image-metadef-schema-b463825481bdf954.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add support for metadata definition schema resource in image service. \ No newline at end of file diff --git a/releasenotes/notes/add-image-metadef-tags-c980ec5e6502d76c.yaml b/releasenotes/notes/add-image-metadef-tags-c980ec5e6502d76c.yaml new file mode 100644 index 0000000000..11ac842322 --- /dev/null +++ b/releasenotes/notes/add-image-metadef-tags-c980ec5e6502d76c.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for Image Metadef Tags to create, remove + create-multiple, update tags. diff --git a/releasenotes/notes/add-image-schema-9c07c2789490718a.yaml b/releasenotes/notes/add-image-schema-9c07c2789490718a.yaml new file mode 100644 index 0000000000..3217f998da --- /dev/null +++ b/releasenotes/notes/add-image-schema-9c07c2789490718a.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add support for schema resource in image service. diff --git a/releasenotes/notes/add-image-service-info-90d6063b5ba0735d.yaml b/releasenotes/notes/add-image-service-info-90d6063b5ba0735d.yaml new file mode 100644 index 0000000000..ea84d0d096 --- /dev/null +++ b/releasenotes/notes/add-image-service-info-90d6063b5ba0735d.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add image service info discovery (import constraints and supported stores) diff --git a/releasenotes/notes/add-image-stage-1dbc3844a042fd26.yaml b/releasenotes/notes/add-image-stage-1dbc3844a042fd26.yaml new file mode 100644 index 0000000000..bde7506dfc --- /dev/null +++ b/releasenotes/notes/add-image-stage-1dbc3844a042fd26.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for staging image data. diff --git a/releasenotes/notes/add-jmespath-support-f47b7a503dbbfda1.yaml b/releasenotes/notes/add-jmespath-support-f47b7a503dbbfda1.yaml new file mode 100644 index 0000000000..2d157a3c8a --- /dev/null +++ b/releasenotes/notes/add-jmespath-support-f47b7a503dbbfda1.yaml @@ -0,0 +1,4 @@ +--- +features: + - All get and search functions can now take a jmespath expression in their + filters parameter. diff --git a/releasenotes/notes/add-key-manager-project-quotas-281845cccdc52ad2.yaml b/releasenotes/notes/add-key-manager-project-quotas-281845cccdc52ad2.yaml new file mode 100644 index 0000000000..75ada9b4ef --- /dev/null +++ b/releasenotes/notes/add-key-manager-project-quotas-281845cccdc52ad2.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for key manager project quota API diff --git a/releasenotes/notes/add-limit-to-shared-file-2b443c2a00c75e6e.yaml b/releasenotes/notes/add-limit-to-shared-file-2b443c2a00c75e6e.yaml new file mode 100644 index 0000000000..58ff44f097 --- /dev/null +++ b/releasenotes/notes/add-limit-to-shared-file-2b443c2a00c75e6e.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support to list absolute resource limits on the shared + file system service. diff --git a/releasenotes/notes/add-list_flavor_access-e038253e953e6586.yaml b/releasenotes/notes/add-list_flavor_access-e038253e953e6586.yaml new file mode 100644 index 0000000000..12f289f8ba --- /dev/null +++ b/releasenotes/notes/add-list_flavor_access-e038253e953e6586.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add a list_flavor_access method to list all + the projects/tenants allowed to access a given flavor. diff --git a/releasenotes/notes/add-load-balancer-flavor-api-d2598e30347a19fc.yaml b/releasenotes/notes/add-load-balancer-flavor-api-d2598e30347a19fc.yaml new file mode 100644 index 0000000000..462acc1353 --- /dev/null +++ b/releasenotes/notes/add-load-balancer-flavor-api-d2598e30347a19fc.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds Octavia (load_balancer) support for the flavor APIs. diff --git a/releasenotes/notes/add-load-balancer-flavor-profile-api-e5a15157563eb75f.yaml b/releasenotes/notes/add-load-balancer-flavor-profile-api-e5a15157563eb75f.yaml new file mode 100644 index 0000000000..1d674cc51d --- /dev/null +++ b/releasenotes/notes/add-load-balancer-flavor-profile-api-e5a15157563eb75f.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds Octavia (load_balancer) support for the flavor profile APIs. diff --git a/releasenotes/notes/add-load-balancer-listener-alpn-protocols-ded816c78bf2080c.yaml b/releasenotes/notes/add-load-balancer-listener-alpn-protocols-ded816c78bf2080c.yaml new file mode 100644 index 0000000000..685c5695b5 --- /dev/null +++ b/releasenotes/notes/add-load-balancer-listener-alpn-protocols-ded816c78bf2080c.yaml @@ -0,0 +1,3 @@ +--- +features: + - Adds ALPN protocols support for the Octavia (load_balancer) listeners. diff --git a/releasenotes/notes/add-load-balancer-pool-alpn-protocols-77f0c7015f176369.yaml b/releasenotes/notes/add-load-balancer-pool-alpn-protocols-77f0c7015f176369.yaml new file mode 100644 index 0000000000..fd112b0bd2 --- /dev/null +++ b/releasenotes/notes/add-load-balancer-pool-alpn-protocols-77f0c7015f176369.yaml @@ -0,0 +1,3 @@ +--- +features: + - Adds ALPN protocols support for the Octavia (load_balancer) pools. diff --git a/releasenotes/notes/add-load-balancer-provider-api-08bcfb72ddf5b247.yaml b/releasenotes/notes/add-load-balancer-provider-api-08bcfb72ddf5b247.yaml new file mode 100644 index 0000000000..906e9026a9 --- /dev/null +++ b/releasenotes/notes/add-load-balancer-provider-api-08bcfb72ddf5b247.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds Octavia (load_balancer) support for the providers APIs. diff --git a/releasenotes/notes/add-magnum-cluster-support-843fe2709b8f4789.yaml b/releasenotes/notes/add-magnum-cluster-support-843fe2709b8f4789.yaml new file mode 100644 index 0000000000..28609a3355 --- /dev/null +++ b/releasenotes/notes/add-magnum-cluster-support-843fe2709b8f4789.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added magnum cluster CRUD support to cloud abstraction layer. diff --git a/releasenotes/notes/add-manage-volume-support-a4fd90e3ff2fa0d0.yaml b/releasenotes/notes/add-manage-volume-support-a4fd90e3ff2fa0d0.yaml new file mode 100644 index 0000000000..6d0a0aedfd --- /dev/null +++ b/releasenotes/notes/add-manage-volume-support-a4fd90e3ff2fa0d0.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for manage volume operation. diff --git a/releasenotes/notes/add-masakara-support-3f7df4436ac869cf.yaml b/releasenotes/notes/add-masakara-support-3f7df4436ac869cf.yaml new file mode 100644 index 0000000000..baa5fd37eb --- /dev/null +++ b/releasenotes/notes/add-masakara-support-3f7df4436ac869cf.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Ported in support for masakari/``instance_ha`` service from + `python-masakariclient`. diff --git a/releasenotes/notes/add-masakari-enabled-to-segment-0e83da869d2ab03f.yaml b/releasenotes/notes/add-masakari-enabled-to-segment-0e83da869d2ab03f.yaml new file mode 100644 index 0000000000..97d01f59b0 --- /dev/null +++ b/releasenotes/notes/add-masakari-enabled-to-segment-0e83da869d2ab03f.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for the ``enabled`` field of the ``Segment`` resource for + the instance HA service (Masakari). diff --git a/releasenotes/notes/add-masakari-vmoves-873ad67830c92254.yaml b/releasenotes/notes/add-masakari-vmoves-873ad67830c92254.yaml new file mode 100644 index 0000000000..ffe6d382c8 --- /dev/null +++ b/releasenotes/notes/add-masakari-vmoves-873ad67830c92254.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for the new ``VMove`` resource for the instance + HA service (Masakari). diff --git a/releasenotes/notes/add-metadef-object-5eec168baf039e80.yaml b/releasenotes/notes/add-metadef-object-5eec168baf039e80.yaml new file mode 100644 index 0000000000..a1e037f78e --- /dev/null +++ b/releasenotes/notes/add-metadef-object-5eec168baf039e80.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for Image Metadef objects. diff --git a/releasenotes/notes/add-migrations-946adf16674d4b2a.yaml b/releasenotes/notes/add-migrations-946adf16674d4b2a.yaml new file mode 100644 index 0000000000..f199f91f2b --- /dev/null +++ b/releasenotes/notes/add-migrations-946adf16674d4b2a.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for the Compute service's migrations API, allowing users to + list all in-progress migrations for all servers. diff --git a/releasenotes/notes/add-namespace-object-delete-all-6cea62cb038012df.yaml b/releasenotes/notes/add-namespace-object-delete-all-6cea62cb038012df.yaml new file mode 100644 index 0000000000..b8863d816d --- /dev/null +++ b/releasenotes/notes/add-namespace-object-delete-all-6cea62cb038012df.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for deleting all objects inside + a namespace. diff --git a/releasenotes/notes/add-new-field-progress-details-in-notification-resource-f7871acb6ffd46dc.yaml b/releasenotes/notes/add-new-field-progress-details-in-notification-resource-f7871acb6ffd46dc.yaml new file mode 100644 index 0000000000..f0470b45db --- /dev/null +++ b/releasenotes/notes/add-new-field-progress-details-in-notification-resource-f7871acb6ffd46dc.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + In microversion 1.1, Masakari returns ``recovery_workflow_details`` information + of the notification in ``GET /notifications/{notification_id}`` API. Added + ``recovery_workflow_details`` attribute to Notification class to read the + recovery_workflow_details of the notification. diff --git a/releasenotes/notes/add-node-boot-mode-5f49882fdd86f35b.yaml b/releasenotes/notes/add-node-boot-mode-5f49882fdd86f35b.yaml new file mode 100644 index 0000000000..b97ad34726 --- /dev/null +++ b/releasenotes/notes/add-node-boot-mode-5f49882fdd86f35b.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support to display node fields ``boot_mode`` and ``secure_boot`` + which are introduced in API 1.75. diff --git a/releasenotes/notes/add-node-boot-mode-set-5718a8d6511b4826.yaml b/releasenotes/notes/add-node-boot-mode-set-5718a8d6511b4826.yaml new file mode 100644 index 0000000000..23d6e439a7 --- /dev/null +++ b/releasenotes/notes/add-node-boot-mode-set-5718a8d6511b4826.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for changing node states ``boot_mode`` and ``secure_boot`` + in sync with functionality introduced in API 1.76. diff --git a/releasenotes/notes/add-node-firmware-list-support-fec2f96a3a578730.yaml b/releasenotes/notes/add-node-firmware-list-support-fec2f96a3a578730.yaml new file mode 100644 index 0000000000..1dad3c21c7 --- /dev/null +++ b/releasenotes/notes/add-node-firmware-list-support-fec2f96a3a578730.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds support for querying a bare-metal node's firmware as per functionality + introduced in API 1.86. diff --git a/releasenotes/notes/add-node-health-field-bd30892473f3f9f2.yaml b/releasenotes/notes/add-node-health-field-bd30892473f3f9f2.yaml new file mode 100644 index 0000000000..40e2c812cf --- /dev/null +++ b/releasenotes/notes/add-node-health-field-bd30892473f3f9f2.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for the node ``health`` field which exposes hardware health + status from the BMC via Redfish. Introduced in API microversion 1.109. diff --git a/releasenotes/notes/add-node-inventory-52f54e16777814e7.yaml b/releasenotes/notes/add-node-inventory-52f54e16777814e7.yaml new file mode 100644 index 0000000000..5e4b84128d --- /dev/null +++ b/releasenotes/notes/add-node-inventory-52f54e16777814e7.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds support for querying a node's hardware inventory as per functionality + introduced in API 1.81. \ No newline at end of file diff --git a/releasenotes/notes/add-node-vendor_passthru-29b384cadf795b48.yaml b/releasenotes/notes/add-node-vendor_passthru-29b384cadf795b48.yaml new file mode 100644 index 0000000000..682ee4943e --- /dev/null +++ b/releasenotes/notes/add-node-vendor_passthru-29b384cadf795b48.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add node vendor_passthru interface for Ironic API. diff --git a/releasenotes/notes/add-octavia-amphora-api-7f3586f6a4f31de4.yaml b/releasenotes/notes/add-octavia-amphora-api-7f3586f6a4f31de4.yaml new file mode 100644 index 0000000000..af9a5b4091 --- /dev/null +++ b/releasenotes/notes/add-octavia-amphora-api-7f3586f6a4f31de4.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds Octavia (load_balancer) support for the amphora APIs. diff --git a/releasenotes/notes/add-octavia-lb-failover-9a34c9577d78ad34.yaml b/releasenotes/notes/add-octavia-lb-failover-9a34c9577d78ad34.yaml new file mode 100644 index 0000000000..d87404ed9c --- /dev/null +++ b/releasenotes/notes/add-octavia-lb-failover-9a34c9577d78ad34.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added Octavia (load_balancer) load balancer failover. diff --git a/releasenotes/notes/add-octavia-lb-listener-stats-1538cc6e4f734353.yaml b/releasenotes/notes/add-octavia-lb-listener-stats-1538cc6e4f734353.yaml new file mode 100644 index 0000000000..8fb7f9f095 --- /dev/null +++ b/releasenotes/notes/add-octavia-lb-listener-stats-1538cc6e4f734353.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added load balancer and listener get statistics methods. diff --git a/releasenotes/notes/add-octavia-listener-hsts-fields-50c621b71e56dc13.yaml b/releasenotes/notes/add-octavia-listener-hsts-fields-50c621b71e56dc13.yaml new file mode 100644 index 0000000000..b97968a861 --- /dev/null +++ b/releasenotes/notes/add-octavia-listener-hsts-fields-50c621b71e56dc13.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added new fields to loadbalancer create/update listener API in order to + support new HTTP Strict Transport Security support. diff --git a/releasenotes/notes/add-octavia-tags-support-1c1cf94184e6ebb7.yaml b/releasenotes/notes/add-octavia-tags-support-1c1cf94184e6ebb7.yaml new file mode 100644 index 0000000000..5eb3c85e81 --- /dev/null +++ b/releasenotes/notes/add-octavia-tags-support-1c1cf94184e6ebb7.yaml @@ -0,0 +1,10 @@ +--- +features: + - Add tags support for the Octavia (load_balancer) objects. + - | + Added support for the Octavia (load_balancer) L7 Policy "redirect_prefix" + capability. +fixes: + - | + Fixed the Octavia (load_balancer) load balancer objects to have + "flavor_id" instead of the nonexistent "flavor" field. diff --git a/releasenotes/notes/add-placement-resource-class-e1c644d978b886bc.yaml b/releasenotes/notes/add-placement-resource-class-e1c644d978b886bc.yaml new file mode 100644 index 0000000000..c3631abde0 --- /dev/null +++ b/releasenotes/notes/add-placement-resource-class-e1c644d978b886bc.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for the ``ResourceClass`` Placement resource. diff --git a/releasenotes/notes/add-placement-resource-provider-aggregates-1310c0be6a4097d3.yaml b/releasenotes/notes/add-placement-resource-provider-aggregates-1310c0be6a4097d3.yaml new file mode 100644 index 0000000000..e8385edad1 --- /dev/null +++ b/releasenotes/notes/add-placement-resource-provider-aggregates-1310c0be6a4097d3.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for aggregates to the ``ResourceProvider`` Placement resource. diff --git a/releasenotes/notes/add-placement-resource-provider-inventory-8714cafefae74810.yaml b/releasenotes/notes/add-placement-resource-provider-inventory-8714cafefae74810.yaml new file mode 100644 index 0000000000..33cabac4e8 --- /dev/null +++ b/releasenotes/notes/add-placement-resource-provider-inventory-8714cafefae74810.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for the ``ResourceProviderInventory`` Placement resource. diff --git a/releasenotes/notes/add-placement-support-a2011eb1e900804d.yaml b/releasenotes/notes/add-placement-support-a2011eb1e900804d.yaml new file mode 100644 index 0000000000..dd9ee1d434 --- /dev/null +++ b/releasenotes/notes/add-placement-support-a2011eb1e900804d.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Add initial support for Placement. Currently the following resources are + supported: + + - ``ResourceProvider`` diff --git a/releasenotes/notes/add-placement-trait-29957d2c03edbfb9.yaml b/releasenotes/notes/add-placement-trait-29957d2c03edbfb9.yaml new file mode 100644 index 0000000000..773d77fd15 --- /dev/null +++ b/releasenotes/notes/add-placement-trait-29957d2c03edbfb9.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for the ``Trait`` Placement resource. diff --git a/releasenotes/notes/add-port-hardware-offload-type-1232c5ae3f62d7df.yaml b/releasenotes/notes/add-port-hardware-offload-type-1232c5ae3f62d7df.yaml new file mode 100644 index 0000000000..b53e6765ad --- /dev/null +++ b/releasenotes/notes/add-port-hardware-offload-type-1232c5ae3f62d7df.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Add ``hardware_offload_type`` attribute to ``port`` resource. Users + can set this attribute to a valid value defined in + ``neutron_lib.constants.VALID_HWOL_TYPES``, set "None" or leave it + undefined. diff --git a/releasenotes/notes/add-port-numa-affinity-policy-b42a85dbe26560d2.yaml b/releasenotes/notes/add-port-numa-affinity-policy-b42a85dbe26560d2.yaml new file mode 100644 index 0000000000..2696cb40e4 --- /dev/null +++ b/releasenotes/notes/add-port-numa-affinity-policy-b42a85dbe26560d2.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add ``numa_affinity_policy`` attribute to ``port`` resource. Users + can set this attribute to ``required``, ``deferred`` or ``legacy``. + This parameter is nullable. diff --git a/releasenotes/notes/add-port-trunk-details-ed2d98a36ce70c0f.yaml b/releasenotes/notes/add-port-trunk-details-ed2d98a36ce70c0f.yaml new file mode 100644 index 0000000000..ccfbffc757 --- /dev/null +++ b/releasenotes/notes/add-port-trunk-details-ed2d98a36ce70c0f.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Add ``trunk_details`` attribute to ``port`` resource. This attribute is + a dictionary with the trunk ID and a list of subports. Each element in the + subports list is a dictionary with the subport ID, the segmentation type + and segmentation ID. diff --git a/releasenotes/notes/add-propagate_uplink_status-to-port-0152d476c65979e3.yaml b/releasenotes/notes/add-propagate_uplink_status-to-port-0152d476c65979e3.yaml new file mode 100644 index 0000000000..28bf68160c --- /dev/null +++ b/releasenotes/notes/add-propagate_uplink_status-to-port-0152d476c65979e3.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + Add ``propagate_uplink_status`` attribute to ``port`` resource. + Users can set this attribute to ``True`` or ``False``. + If it is set to ``True``, uplink status propagation is enabled. + Otherwise, it is disabled. + Neutron server needs to have the API extension + ``uplink-status-propagation`` in order to support this feature. + This feature can be used in SRIOV scenario, in which users + enable uplink status propagation of the SRIOV port + so that the link status of the VF will follow the PF. diff --git a/releasenotes/notes/add-quota-class-set-to-shared-file-systems-43da33e6a3ed65e3.yaml b/releasenotes/notes/add-quota-class-set-to-shared-file-systems-43da33e6a3ed65e3.yaml new file mode 100644 index 0000000000..e102feb3de --- /dev/null +++ b/releasenotes/notes/add-quota-class-set-to-shared-file-systems-43da33e6a3ed65e3.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added get and update to Quota Class Set to file system as a service. diff --git a/releasenotes/notes/add-server-clear-password-256e269223453bd7.yaml b/releasenotes/notes/add-server-clear-password-256e269223453bd7.yaml new file mode 100644 index 0000000000..70f07cabeb --- /dev/null +++ b/releasenotes/notes/add-server-clear-password-256e269223453bd7.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``Server.clear_password`` and equivalent ``clear_server_password`` + proxy method have been added. diff --git a/releasenotes/notes/add-server-console-078ed2696e5b04d9.yaml b/releasenotes/notes/add-server-console-078ed2696e5b04d9.yaml new file mode 100644 index 0000000000..a3e76872ea --- /dev/null +++ b/releasenotes/notes/add-server-console-078ed2696e5b04d9.yaml @@ -0,0 +1,6 @@ +--- +features: + - Added get_server_console method to fetch the console + log from a Server. On clouds that do not expose this + feature, a debug line will be logged and an empty + string will be returned. diff --git a/releasenotes/notes/add-server-migrations-6e31183196f14deb.yaml b/releasenotes/notes/add-server-migrations-6e31183196f14deb.yaml new file mode 100644 index 0000000000..8e451cedab --- /dev/null +++ b/releasenotes/notes/add-server-migrations-6e31183196f14deb.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add support for the Compute service's server migrations API, allowing users + to list all migrations for a server as well as force complete or abort + in-progress migrations. diff --git a/releasenotes/notes/add-server-tag-proxy-methods-c791a36d8d4d85f6.yaml b/releasenotes/notes/add-server-tag-proxy-methods-c791a36d8d4d85f6.yaml new file mode 100644 index 0000000000..f502d61023 --- /dev/null +++ b/releasenotes/notes/add-server-tag-proxy-methods-c791a36d8d4d85f6.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + The following new compute proxy methods have been added: + + - ``add_tag_to_server`` + - ``remove_tag_from_server`` + - ``remove_tags_from_server`` diff --git a/releasenotes/notes/add-service-0bcc16eb026eade3.yaml b/releasenotes/notes/add-service-0bcc16eb026eade3.yaml new file mode 100644 index 0000000000..e515dc7520 --- /dev/null +++ b/releasenotes/notes/add-service-0bcc16eb026eade3.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added a new method `openstack.connection.Connection.add_service` which + allows the registration of Proxy/Resource classes defined externally. diff --git a/releasenotes/notes/add-sg-rules-bulk-f36a3e2326d74867.yaml b/releasenotes/notes/add-sg-rules-bulk-f36a3e2326d74867.yaml new file mode 100644 index 0000000000..4776d33667 --- /dev/null +++ b/releasenotes/notes/add-sg-rules-bulk-f36a3e2326d74867.yaml @@ -0,0 +1,5 @@ +--- +features: + - Added bulk create securtiy groups rules. With new proxy method + `create_security_group_rules` now it's possible to create multiple rules + for certain security group. diff --git a/releasenotes/notes/add-share-access-rules-to-shared-file-362bee34f7331186.yaml b/releasenotes/notes/add-share-access-rules-to-shared-file-362bee34f7331186.yaml new file mode 100644 index 0000000000..005de8728b --- /dev/null +++ b/releasenotes/notes/add-share-access-rules-to-shared-file-362bee34f7331186.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support to create, list, get and delete share access rules with the + shared file system service. diff --git a/releasenotes/notes/add-share-network-subnet-to-shared-file-b5de3ce6ca723209.yaml b/releasenotes/notes/add-share-network-subnet-to-shared-file-b5de3ce6ca723209.yaml new file mode 100644 index 0000000000..c61e18c6dd --- /dev/null +++ b/releasenotes/notes/add-share-network-subnet-to-shared-file-b5de3ce6ca723209.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support to create, list, get, and delete share network + subnets on the shared file system service. diff --git a/releasenotes/notes/add-share-network-to-shared-file-c5c9a6b8ccf1d958.yaml b/releasenotes/notes/add-share-network-to-shared-file-c5c9a6b8ccf1d958.yaml new file mode 100644 index 0000000000..5177b99778 --- /dev/null +++ b/releasenotes/notes/add-share-network-to-shared-file-c5c9a6b8ccf1d958.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support to create, update, list, get, and delete share + networks on the shared file system service. diff --git a/releasenotes/notes/add-share-query-mapping-fix-7f3c2d8e9a1b5c4f.yaml b/releasenotes/notes/add-share-query-mapping-fix-7f3c2d8e9a1b5c4f.yaml new file mode 100644 index 0000000000..2100e1d8cd --- /dev/null +++ b/releasenotes/notes/add-share-query-mapping-fix-7f3c2d8e9a1b5c4f.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + Fixed the ``Share`` resource in the shared file system service to properly + support query parameters when listing shares. Previously, query parameters + such as ``all_tenants`` were silently ignored because the ``_query_mapping`` + attribute was missing. This affected admin users trying to list shares + across all projects, as well as filtering by status, name, and other + attributes. diff --git a/releasenotes/notes/add-share-snapshot-instance-to-shared-file-4d935f12d67bf59d.yaml b/releasenotes/notes/add-share-snapshot-instance-to-shared-file-4d935f12d67bf59d.yaml new file mode 100644 index 0000000000..385bc44f95 --- /dev/null +++ b/releasenotes/notes/add-share-snapshot-instance-to-shared-file-4d935f12d67bf59d.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support to list and get share snapshot instances + on the shared file system service. diff --git a/releasenotes/notes/add-share-snapshot-to-shared-file-82ecedbdbed2e3c5.yaml b/releasenotes/notes/add-share-snapshot-to-shared-file-82ecedbdbed2e3c5.yaml new file mode 100644 index 0000000000..294fddca07 --- /dev/null +++ b/releasenotes/notes/add-share-snapshot-to-shared-file-82ecedbdbed2e3c5.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds support to create, update, list, get, and delete share + snapshots to shared file system service. diff --git a/releasenotes/notes/add-share_group-to-shared-file-8cee20d8aa2afbb7.yaml b/releasenotes/notes/add-share_group-to-shared-file-8cee20d8aa2afbb7.yaml new file mode 100644 index 0000000000..ae76ef0553 --- /dev/null +++ b/releasenotes/notes/add-share_group-to-shared-file-8cee20d8aa2afbb7.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support to create, update, list, get, and delete share + groups on the shared file system service. diff --git a/releasenotes/notes/add-shared-file-syste-share_instance-fffaea2d3a77ba24.yaml b/releasenotes/notes/add-shared-file-syste-share_instance-fffaea2d3a77ba24.yaml new file mode 100644 index 0000000000..f14a7a4cf7 --- /dev/null +++ b/releasenotes/notes/add-shared-file-syste-share_instance-fffaea2d3a77ba24.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added support to list, get, reset status of, + and force delete share instances + (from shared file system service). diff --git a/releasenotes/notes/add-shared-file-system-locks-support-4859ca93f93a1056.yaml b/releasenotes/notes/add-shared-file-system-locks-support-4859ca93f93a1056.yaml new file mode 100644 index 0000000000..c6dca56013 --- /dev/null +++ b/releasenotes/notes/add-shared-file-system-locks-support-4859ca93f93a1056.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Added support to manipulate resource locks from the shared file system + service. + - | + Added support to restrict the visibility and deletion of the shared file + system share access rules. diff --git a/releasenotes/notes/add-shared-file-system-manage-unmanage-share-830e313f96e5fd2b.yaml b/releasenotes/notes/add-shared-file-system-manage-unmanage-share-830e313f96e5fd2b.yaml new file mode 100644 index 0000000000..d349d6c902 --- /dev/null +++ b/releasenotes/notes/add-shared-file-system-manage-unmanage-share-830e313f96e5fd2b.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support to manage and unmanage shares + from the shared file system service. \ No newline at end of file diff --git a/releasenotes/notes/add-shared-file-system-share-group-snapshot-c5099e6c8accf077.yaml b/releasenotes/notes/add-shared-file-system-share-group-snapshot-c5099e6c8accf077.yaml new file mode 100644 index 0000000000..9c6b096865 --- /dev/null +++ b/releasenotes/notes/add-shared-file-system-share-group-snapshot-c5099e6c8accf077.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support for list, show, update, delete, reset and create + Share Group Snapshots for Shared File Systems service. diff --git a/releasenotes/notes/add-shared-file-system-share-metadata-e0415bb71d8a0a48.yaml b/releasenotes/notes/add-shared-file-system-share-metadata-e0415bb71d8a0a48.yaml new file mode 100644 index 0000000000..6461ec7b7f --- /dev/null +++ b/releasenotes/notes/add-shared-file-system-share-metadata-e0415bb71d8a0a48.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added support to list, get, create, + update, and delete share metadata + from shared file system service. \ No newline at end of file diff --git a/releasenotes/notes/add-shared-file-system-share-resize-ddd650c2e32fed34.yaml b/releasenotes/notes/add-shared-file-system-share-resize-ddd650c2e32fed34.yaml new file mode 100644 index 0000000000..b1adea6265 --- /dev/null +++ b/releasenotes/notes/add-shared-file-system-share-resize-ddd650c2e32fed34.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for shrink/extend share actions. diff --git a/releasenotes/notes/add-shared-file-system-shares-2e1d44a1bb882d6d.yaml b/releasenotes/notes/add-shared-file-system-shares-2e1d44a1bb882d6d.yaml new file mode 100644 index 0000000000..6946d2313c --- /dev/null +++ b/releasenotes/notes/add-shared-file-system-shares-2e1d44a1bb882d6d.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added revert share to snapshot to shared + file system service. diff --git a/releasenotes/notes/add-shared-file-system-shares-e9f356a318045607.yaml b/releasenotes/notes/add-shared-file-system-shares-e9f356a318045607.yaml new file mode 100644 index 0000000000..e7a1bcb01d --- /dev/null +++ b/releasenotes/notes/add-shared-file-system-shares-e9f356a318045607.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support to create, update, list, get, and delete shares + (from shared file system service). diff --git a/releasenotes/notes/add-shared-file-systems-83a3767429fd5e8c.yaml b/releasenotes/notes/add-shared-file-systems-83a3767429fd5e8c.yaml new file mode 100644 index 0000000000..9f770db32f --- /dev/null +++ b/releasenotes/notes/add-shared-file-systems-83a3767429fd5e8c.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Support for the OpenStack Shared File System API (manila) has been + introduced. + - | + Added support to list Shared File System Service API Versions + and Availability Zones. diff --git a/releasenotes/notes/add-shared-file-systems-export-location-a27c1741880c384b.yaml b/releasenotes/notes/add-shared-file-systems-export-location-a27c1741880c384b.yaml new file mode 100644 index 0000000000..df1a1243a9 --- /dev/null +++ b/releasenotes/notes/add-shared-file-systems-export-location-a27c1741880c384b.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support to list and show Export Locations + for shares from the Shared File Systems service. diff --git a/releasenotes/notes/add-shelve_offload-427f6550fc55e622.yaml b/releasenotes/notes/add-shelve_offload-427f6550fc55e622.yaml new file mode 100644 index 0000000000..0162c0e551 --- /dev/null +++ b/releasenotes/notes/add-shelve_offload-427f6550fc55e622.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds shelve_offload_server method to the compute proxy diff --git a/releasenotes/notes/add-show-all-images-flag-352748b6c3d99f3f.yaml b/releasenotes/notes/add-show-all-images-flag-352748b6c3d99f3f.yaml new file mode 100644 index 0000000000..98c320b263 --- /dev/null +++ b/releasenotes/notes/add-show-all-images-flag-352748b6c3d99f3f.yaml @@ -0,0 +1,9 @@ +--- +features: + - Added flag "show_all" to list_images. The behavior of + Glance v2 to only show shared images if they have been + accepted by the user can be confusing, and the only way + to change it is to use search_images(filters=dict(member_status='all')) + which isn't terribly obvious. "show_all=True" will set + that flag, as well as disabling the filtering of images + in "deleted" state. diff --git a/releasenotes/notes/add-stack-events-b8674d7bb657e789.yaml b/releasenotes/notes/add-stack-events-b8674d7bb657e789.yaml new file mode 100644 index 0000000000..cdc9367f0a --- /dev/null +++ b/releasenotes/notes/add-stack-events-b8674d7bb657e789.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``stack_events`` method and ``StackEvent`` Class have been + added to retrieve stack events \ No newline at end of file diff --git a/releasenotes/notes/add-stack-export-3ace746a8c80d766.yaml b/releasenotes/notes/add-stack-export-3ace746a8c80d766.yaml new file mode 100644 index 0000000000..00d680b0fa --- /dev/null +++ b/releasenotes/notes/add-stack-export-3ace746a8c80d766.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add ``export_stack`` to print stack infomation in a json format diff --git a/releasenotes/notes/add-stack-suspend-and-resume-26d4fc5904291d5d.yaml b/releasenotes/notes/add-stack-suspend-and-resume-26d4fc5904291d5d.yaml new file mode 100644 index 0000000000..fa3eecd469 --- /dev/null +++ b/releasenotes/notes/add-stack-suspend-and-resume-26d4fc5904291d5d.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds ``suspend_stack`` and ``resume_stack`` to support stack non-lifecycle operations. \ No newline at end of file diff --git a/releasenotes/notes/add-storage-pool-to-shared-file-ad45da1b2510b412.yaml b/releasenotes/notes/add-storage-pool-to-shared-file-ad45da1b2510b412.yaml new file mode 100644 index 0000000000..7303d35601 --- /dev/null +++ b/releasenotes/notes/add-storage-pool-to-shared-file-ad45da1b2510b412.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support for retrieving storage pools information from + the Shared File Systems service. diff --git a/releasenotes/notes/add-support-allowed-cidrs-loadbalancer-listener-809e523a8bd6a7d5.yaml b/releasenotes/notes/add-support-allowed-cidrs-loadbalancer-listener-809e523a8bd6a7d5.yaml new file mode 100644 index 0000000000..3d47aa3975 --- /dev/null +++ b/releasenotes/notes/add-support-allowed-cidrs-loadbalancer-listener-809e523a8bd6a7d5.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added allowed_cidrs parameter into load balancer listener. diff --git a/releasenotes/notes/add-support-availability_zone-loadbalancer-a18aa1708d7859e2.yaml b/releasenotes/notes/add-support-availability_zone-loadbalancer-a18aa1708d7859e2.yaml new file mode 100644 index 0000000000..0a0efef263 --- /dev/null +++ b/releasenotes/notes/add-support-availability_zone-loadbalancer-a18aa1708d7859e2.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added availability_zone parameter into load balancer. \ No newline at end of file diff --git a/releasenotes/notes/add-support-for-setting-static-routes-b3ce6cac2c5e9e51.yaml b/releasenotes/notes/add-support-for-setting-static-routes-b3ce6cac2c5e9e51.yaml new file mode 100644 index 0000000000..0b7c577ddd --- /dev/null +++ b/releasenotes/notes/add-support-for-setting-static-routes-b3ce6cac2c5e9e51.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + The networking API v2 specification, which is implemented by OpenStack + Neutron, features an optional routes parameter - when updating a router + (PUT requests). Static routes are crucial for routers to handle traffic + from subnets not directly connected to a router. The routes parameter has + now been added to the OpenStackCloud.update_router method as a list of + dictionaries with destination and nexthop parameters. diff --git a/releasenotes/notes/add-system-role-assignment-693dd3e1da33a54d.yaml b/releasenotes/notes/add-system-role-assignment-693dd3e1da33a54d.yaml new file mode 100644 index 0000000000..cb171b3efd --- /dev/null +++ b/releasenotes/notes/add-system-role-assignment-693dd3e1da33a54d.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + Add support for system role assignment. A system role assignment + ultimately controls access to system-level API calls. + + Good examples of system-level APIs include management of the + service catalog and compute hypervisors. + + `System role assignment API reference + `_. diff --git a/releasenotes/notes/add-tls-container-refs-params-for-octavia-pools-76f295cd2daa7f53.yaml b/releasenotes/notes/add-tls-container-refs-params-for-octavia-pools-76f295cd2daa7f53.yaml new file mode 100644 index 0000000000..e7a87adbcd --- /dev/null +++ b/releasenotes/notes/add-tls-container-refs-params-for-octavia-pools-76f295cd2daa7f53.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add both ``ca_tls_container_ref`` and ``crl_container_ref`` + parameters for Octavia pools, which can be used to store the ca + certificate used by backend servers and the revocation list file. diff --git a/releasenotes/notes/add-tls-version-support-for-octavia-7ecb372e6fb58101.yaml b/releasenotes/notes/add-tls-version-support-for-octavia-7ecb372e6fb58101.yaml new file mode 100644 index 0000000000..d96dd0c9e4 --- /dev/null +++ b/releasenotes/notes/add-tls-version-support-for-octavia-7ecb372e6fb58101.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added the ``tls_versions`` properties to listener.py + and pool.py for storing a python list of TLS protocol + versions to be used by the pools and listeners. diff --git a/releasenotes/notes/add-tls_enabled-parameter-for-octavia-pools-f0a23436d826b313.yaml b/releasenotes/notes/add-tls_enabled-parameter-for-octavia-pools-f0a23436d826b313.yaml new file mode 100644 index 0000000000..e3cc8675a2 --- /dev/null +++ b/releasenotes/notes/add-tls_enabled-parameter-for-octavia-pools-f0a23436d826b313.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``tls_enabled`` parameter for Octavia pools, it can be used to enable + TLS communications between a load balancer and its member servers. diff --git a/releasenotes/notes/add-unified-limit-5ac334a08e137a70.yaml b/releasenotes/notes/add-unified-limit-5ac334a08e137a70.yaml new file mode 100644 index 0000000000..8bb65ec01e --- /dev/null +++ b/releasenotes/notes/add-unified-limit-5ac334a08e137a70.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added the unified limits basic CRUD methods. It includes two kinds of + resources: `registered_limit` and `limit`. diff --git a/releasenotes/notes/add-user-group-assignment-9c419b6c6bfe392c.yaml b/releasenotes/notes/add-user-group-assignment-9c419b6c6bfe392c.yaml new file mode 100644 index 0000000000..266b5f3c48 --- /dev/null +++ b/releasenotes/notes/add-user-group-assignment-9c419b6c6bfe392c.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for user group assignments in identity service. diff --git a/releasenotes/notes/add-user-message-to-shared-file-85d7bbccf8347c4f.yaml b/releasenotes/notes/add-user-message-to-shared-file-85d7bbccf8347c4f.yaml new file mode 100644 index 0000000000..b4d4492309 --- /dev/null +++ b/releasenotes/notes/add-user-message-to-shared-file-85d7bbccf8347c4f.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support to list, get, and delete user messages + on the shared file system service. diff --git a/releasenotes/notes/add-vif-optional-params-abb755b74f076eb2.yaml b/releasenotes/notes/add-vif-optional-params-abb755b74f076eb2.yaml new file mode 100644 index 0000000000..924436dea0 --- /dev/null +++ b/releasenotes/notes/add-vif-optional-params-abb755b74f076eb2.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Extend the ``attach_vif`` and ``attach_vif_to_node`` methods of the + baremetal proxy to to accept optional parameters for VIF port UUID and + VIF portgroup UUID. diff --git a/releasenotes/notes/add-vlan_qinq-to-the-network-72d69e4f8856d48f.yaml b/releasenotes/notes/add-vlan_qinq-to-the-network-72d69e4f8856d48f.yaml new file mode 100644 index 0000000000..95e2e2f704 --- /dev/null +++ b/releasenotes/notes/add-vlan_qinq-to-the-network-72d69e4f8856d48f.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add ``vlan_qinq`` attribute to the ``network`` resource. Users can use this + attribute to create network which will allow to configure VLANs + transparently in the guest VM and will use ethertype ``0x8a88 (802.1ad)``. diff --git a/releasenotes/notes/add-vmedia-support-20494ed415e5b32b.yaml b/releasenotes/notes/add-vmedia-support-20494ed415e5b32b.yaml new file mode 100644 index 0000000000..3469532825 --- /dev/null +++ b/releasenotes/notes/add-vmedia-support-20494ed415e5b32b.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Implements virtual media attach/detach API for bare metal nodes. diff --git a/releasenotes/notes/add-volume-attachment-support-b5f9a9e78ba88355.yaml b/releasenotes/notes/add-volume-attachment-support-b5f9a9e78ba88355.yaml new file mode 100644 index 0000000000..d540fd0587 --- /dev/null +++ b/releasenotes/notes/add-volume-attachment-support-b5f9a9e78ba88355.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + Added support for: + + * Create Attachment + * Update Attachment + * List Attachment + * Get Attachment + * Delete Attachment + * Complete Attachment diff --git a/releasenotes/notes/add-volume-extend-support-86e5c8cff5d6874e.yaml b/releasenotes/notes/add-volume-extend-support-86e5c8cff5d6874e.yaml new file mode 100644 index 0000000000..b0816fb8a8 --- /dev/null +++ b/releasenotes/notes/add-volume-extend-support-86e5c8cff5d6874e.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add the ability to extend a volume size with extend_volume method. diff --git a/releasenotes/notes/add-volume-group-replication-actions-c64b2641625a5a2a.yaml b/releasenotes/notes/add-volume-group-replication-actions-c64b2641625a5a2a.yaml new file mode 100644 index 0000000000..f271a5cdb9 --- /dev/null +++ b/releasenotes/notes/add-volume-group-replication-actions-c64b2641625a5a2a.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for the volume group replication actions. diff --git a/releasenotes/notes/add-volume-image-metadata-support-c61bcb918fdff529.yaml b/releasenotes/notes/add-volume-image-metadata-support-c61bcb918fdff529.yaml new file mode 100644 index 0000000000..6bd02c360e --- /dev/null +++ b/releasenotes/notes/add-volume-image-metadata-support-c61bcb918fdff529.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for managing volume image metadata. diff --git a/releasenotes/notes/add-volume-snapshot-manage-unmanage-support-fc0be2a3fb4427d1.yaml b/releasenotes/notes/add-volume-snapshot-manage-unmanage-support-fc0be2a3fb4427d1.yaml new file mode 100644 index 0000000000..b54379ad4d --- /dev/null +++ b/releasenotes/notes/add-volume-snapshot-manage-unmanage-support-fc0be2a3fb4427d1.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support for volume snapshot manage and volume + snapshot unmanage. diff --git a/releasenotes/notes/add-volume-transfer-support-28bf34a243d96e1b.yaml b/releasenotes/notes/add-volume-transfer-support-28bf34a243d96e1b.yaml new file mode 100644 index 0000000000..40976dcab0 --- /dev/null +++ b/releasenotes/notes/add-volume-transfer-support-28bf34a243d96e1b.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support for volume transfer create, find, + delete, get, list and accept. diff --git a/releasenotes/notes/add-volume-type-update-b84f50b7fa3b061d.yaml b/releasenotes/notes/add-volume-type-update-b84f50b7fa3b061d.yaml new file mode 100644 index 0000000000..3e9f3630a2 --- /dev/null +++ b/releasenotes/notes/add-volume-type-update-b84f50b7fa3b061d.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add support for updating block storage volume type objects. diff --git a/releasenotes/notes/add_access_rules-06eb8a1f9fcd9367.yaml b/releasenotes/notes/add_access_rules-06eb8a1f9fcd9367.yaml new file mode 100644 index 0000000000..60ebf1c6bb --- /dev/null +++ b/releasenotes/notes/add_access_rules-06eb8a1f9fcd9367.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support for `access_rules + `_. diff --git a/releasenotes/notes/add_description_create_user-0ddc9a0ef4da840d.yaml b/releasenotes/notes/add_description_create_user-0ddc9a0ef4da840d.yaml new file mode 100644 index 0000000000..98dd190bff --- /dev/null +++ b/releasenotes/notes/add_description_create_user-0ddc9a0ef4da840d.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add description parameter to create_user, available on Keystone v3 diff --git a/releasenotes/notes/add_designate_recordsets_support-69af0a6b317073e7.yaml b/releasenotes/notes/add_designate_recordsets_support-69af0a6b317073e7.yaml new file mode 100644 index 0000000000..0d464961b9 --- /dev/null +++ b/releasenotes/notes/add_designate_recordsets_support-69af0a6b317073e7.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for Designate recordsets resources, with the + usual methods (search/list/get/create/update/delete). diff --git a/releasenotes/notes/add_designate_zones_support-35fa9b8b09995b43.yaml b/releasenotes/notes/add_designate_zones_support-35fa9b8b09995b43.yaml new file mode 100644 index 0000000000..f5253af0fc --- /dev/null +++ b/releasenotes/notes/add_designate_zones_support-35fa9b8b09995b43.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for Designate zones resources, with the + usual methods (search/list/get/create/update/delete). diff --git a/releasenotes/notes/add_filter_ports_by_conductor_groups-7e21ddc8eb941536.yaml b/releasenotes/notes/add_filter_ports_by_conductor_groups-7e21ddc8eb941536.yaml new file mode 100644 index 0000000000..7c4faa5ab0 --- /dev/null +++ b/releasenotes/notes/add_filter_ports_by_conductor_groups-7e21ddc8eb941536.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds support for filtering baremetal ports by ``conductor_groups``. diff --git a/releasenotes/notes/add_heat_tag_support-135aa43ba1dce3bb.yaml b/releasenotes/notes/add_heat_tag_support-135aa43ba1dce3bb.yaml new file mode 100644 index 0000000000..4e0a0ea873 --- /dev/null +++ b/releasenotes/notes/add_heat_tag_support-135aa43ba1dce3bb.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Add tags support when creating a stack, as specified by the openstack + orchestration api at [1] + + [1]https://developer.openstack.org/api-ref/orchestration/v1/#create-stack diff --git a/releasenotes/notes/add_host_aggregate_support-471623faf45ec3c3.yaml b/releasenotes/notes/add_host_aggregate_support-471623faf45ec3c3.yaml new file mode 100644 index 0000000000..6a6ff37a1f --- /dev/null +++ b/releasenotes/notes/add_host_aggregate_support-471623faf45ec3c3.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for host aggregates and host aggregate + membership. diff --git a/releasenotes/notes/add_image_import_support-6cea2e7d7a781071.yaml b/releasenotes/notes/add_image_import_support-6cea2e7d7a781071.yaml new file mode 100644 index 0000000000..da0ffe5998 --- /dev/null +++ b/releasenotes/notes/add_image_import_support-6cea2e7d7a781071.yaml @@ -0,0 +1,7 @@ +--- +features: + - Add ability to create image without upload data at the same time + - Add support for interoperable image import process as introduced in the + Image API v2.6 at [1] + + [1]https://developer.openstack.org/api-ref/image/v2/index.html#interoperable-image-import diff --git a/releasenotes/notes/add_influxdb_stats-665714d715302ad5.yaml b/releasenotes/notes/add_influxdb_stats-665714d715302ad5.yaml new file mode 100644 index 0000000000..f88ae1147d --- /dev/null +++ b/releasenotes/notes/add_influxdb_stats-665714d715302ad5.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add possibility to report API metrics into InfluxDB. diff --git a/releasenotes/notes/add_magnum_baymodel_support-e35e5aab0b14ff75.yaml b/releasenotes/notes/add_magnum_baymodel_support-e35e5aab0b14ff75.yaml new file mode 100644 index 0000000000..21dbed6f1b --- /dev/null +++ b/releasenotes/notes/add_magnum_baymodel_support-e35e5aab0b14ff75.yaml @@ -0,0 +1,7 @@ +--- +features: + - Add support for Magnum baymodels, with the + usual methods (search/list/get/create/update/delete). Due to upcoming + rename in Magnum from baymodel to cluster_template, the shade + functionality uses the term cluster_template. However, baymodel aliases + are provided for each api call. diff --git a/releasenotes/notes/add_magnum_services_support-3d95f9dcc60b5573.yaml b/releasenotes/notes/add_magnum_services_support-3d95f9dcc60b5573.yaml new file mode 100644 index 0000000000..3a32e3ddea --- /dev/null +++ b/releasenotes/notes/add_magnum_services_support-3d95f9dcc60b5573.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add support for listing Magnum services. diff --git a/releasenotes/notes/add_max_item_parameter-3ab3c2e1cd2312c5.yaml b/releasenotes/notes/add_max_item_parameter-3ab3c2e1cd2312c5.yaml new file mode 100644 index 0000000000..5eb60a84fc --- /dev/null +++ b/releasenotes/notes/add_max_item_parameter-3ab3c2e1cd2312c5.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + A new parameter, ``max_items``, is added to the ``Resource.list`` + method. This allows users to specify the maximum number of resources + that should be returned to the user, as opposed to the maximum number + of items that should be requested from the server in a single request. + The latter is already handled by the ``limit`` parameter diff --git a/releasenotes/notes/add_project_cleanup-39c3517b25a5372e.yaml b/releasenotes/notes/add_project_cleanup-39c3517b25a5372e.yaml new file mode 100644 index 0000000000..f99c66fc92 --- /dev/null +++ b/releasenotes/notes/add_project_cleanup-39c3517b25a5372e.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Project cleanup functionality. It provides a single method in the + connection object, which calls cleanup method in all supported services + (both part of the SDK itself and all "imported" in the runtime or through + the vendor_hook functionality). Cleanup is working in multiple threads + where possible (no dependencies between services). diff --git a/releasenotes/notes/add_server_group_support-dfa472e3dae7d34d.yaml b/releasenotes/notes/add_server_group_support-dfa472e3dae7d34d.yaml new file mode 100644 index 0000000000..e903841346 --- /dev/null +++ b/releasenotes/notes/add_server_group_support-dfa472e3dae7d34d.yaml @@ -0,0 +1,3 @@ +--- +features: + - Adds support to create and delete server groups. diff --git a/releasenotes/notes/add_support_port_binding_attrs-c70966724eb970f3.yaml b/releasenotes/notes/add_support_port_binding_attrs-c70966724eb970f3.yaml new file mode 100644 index 0000000000..bcee489ca8 --- /dev/null +++ b/releasenotes/notes/add_support_port_binding_attrs-c70966724eb970f3.yaml @@ -0,0 +1,5 @@ +--- +features: + - Add support for query of port binding extended attributes including + 'binding:host_id', 'binding:vnic_type', 'binding:vif_type', + 'binding:vif_details', and 'binding:profile'. diff --git a/releasenotes/notes/add_update_server-8761059d6de7e68b.yaml b/releasenotes/notes/add_update_server-8761059d6de7e68b.yaml new file mode 100644 index 0000000000..5bbe898d41 --- /dev/null +++ b/releasenotes/notes/add_update_server-8761059d6de7e68b.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add update_server method to update name or description of a server. diff --git a/releasenotes/notes/add_update_service-28e590a7a7524053.yaml b/releasenotes/notes/add_update_service-28e590a7a7524053.yaml new file mode 100644 index 0000000000..ff3e7befa4 --- /dev/null +++ b/releasenotes/notes/add_update_service-28e590a7a7524053.yaml @@ -0,0 +1,6 @@ +--- +features: + - Add the ability to update a keystone service information. This feature is + not available on keystone v2.0. The new function, update_service(), allows + the user to update description, name of service, service type, and enabled + status. diff --git a/releasenotes/notes/add_vendor_hook-e87b6afb7f215a30.yaml b/releasenotes/notes/add_vendor_hook-e87b6afb7f215a30.yaml new file mode 100644 index 0000000000..1ef8c2b856 --- /dev/null +++ b/releasenotes/notes/add_vendor_hook-e87b6afb7f215a30.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Add possibility to automatically invoke vendor hooks. This can be done + either through extending profile (vendor_hook), or passing `vendor_hook` + parameter to the connection. The format of the vendor_hook is the same as + in the setuptools (module.name:function_name). The hook will get connection + as the only parameter. diff --git a/releasenotes/notes/added-federation-support-3b65e531e57211f5.yaml b/releasenotes/notes/added-federation-support-3b65e531e57211f5.yaml new file mode 100644 index 0000000000..8c7880e602 --- /dev/null +++ b/releasenotes/notes/added-federation-support-3b65e531e57211f5.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds support to create and manage Identity v3 Federation resources - + Specifically, Identity Providers, Mappings and Federation Protocols. diff --git a/releasenotes/notes/added-senlin-support-1eb4e47c31258f66.yaml b/releasenotes/notes/added-senlin-support-1eb4e47c31258f66.yaml new file mode 100644 index 0000000000..ccc38b29e8 --- /dev/null +++ b/releasenotes/notes/added-senlin-support-1eb4e47c31258f66.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added support for senlin diff --git a/releasenotes/notes/allocation-api-04f6b3b7a0ccc850.yaml b/releasenotes/notes/allocation-api-04f6b3b7a0ccc850.yaml new file mode 100644 index 0000000000..8ca573f135 --- /dev/null +++ b/releasenotes/notes/allocation-api-04f6b3b7a0ccc850.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds support for the baremetal allocation API. diff --git a/releasenotes/notes/allocation-update-910c36c1290e5121.yaml b/releasenotes/notes/allocation-update-910c36c1290e5121.yaml new file mode 100644 index 0000000000..6b9147a43d --- /dev/null +++ b/releasenotes/notes/allocation-update-910c36c1290e5121.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Allows updating ``name`` and ``extra`` fields of a baremetal allocation. diff --git a/releasenotes/notes/alternate-auth-context-3939f1492a0e1355.yaml b/releasenotes/notes/alternate-auth-context-3939f1492a0e1355.yaml new file mode 100644 index 0000000000..e454f5b91e --- /dev/null +++ b/releasenotes/notes/alternate-auth-context-3939f1492a0e1355.yaml @@ -0,0 +1,5 @@ +--- +features: + - Added methods for making new cloud connections + based on the current OpenStackCloud. This should enable working + more easily across projects or user accounts. diff --git a/releasenotes/notes/always-detail-cluster-templates-3eb4b5744ba327ac.yaml b/releasenotes/notes/always-detail-cluster-templates-3eb4b5744ba327ac.yaml new file mode 100644 index 0000000000..cc98f8c9fc --- /dev/null +++ b/releasenotes/notes/always-detail-cluster-templates-3eb4b5744ba327ac.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - Cluster Templates have data model and normalization + now. As a result, the detail parameter is now ignored + and detailed records are always returned. diff --git a/releasenotes/notes/auth-url-vexxhost-8d63cd17bde21320.yaml b/releasenotes/notes/auth-url-vexxhost-8d63cd17bde21320.yaml new file mode 100644 index 0000000000..f32e1eccac --- /dev/null +++ b/releasenotes/notes/auth-url-vexxhost-8d63cd17bde21320.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - | + The ``v3password`` ``auth_type`` implies that the ``auth_url`` given + is a versioned endpoint and so discovery is skipped for auth. Previously + the ``auth_type`` for Vexxhost had been set to ``v3password`` due to v2 + being no longer available to give better errors to users. The ``auth_url`` + was unfortunately left unversioned, so authentication ceased working. The + ``auth_url`` has been changed to the versioned endpoint. diff --git a/releasenotes/notes/bail-on-failed-service-cf299c37d5647b08.yaml b/releasenotes/notes/bail-on-failed-service-cf299c37d5647b08.yaml new file mode 100644 index 0000000000..5f004f8cf4 --- /dev/null +++ b/releasenotes/notes/bail-on-failed-service-cf299c37d5647b08.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + When a known service cannot be resolved to a supported version, + an exception is now thrown instead of just returning a blank + Proxy object. This allows returning sane errors to users. diff --git a/releasenotes/notes/baremetal-configdrive-mkisofs-xorrisofs-075db4d7d80e5a13.yaml b/releasenotes/notes/baremetal-configdrive-mkisofs-xorrisofs-075db4d7d80e5a13.yaml new file mode 100644 index 0000000000..008459e8d9 --- /dev/null +++ b/releasenotes/notes/baremetal-configdrive-mkisofs-xorrisofs-075db4d7d80e5a13.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + When generating a config drive for baremetal, "mkisofs" and "xorrisofs" + are now supported beside the already available "genisoimage" binary. + This is useful on environment where the "genisoimage" binary is not + available but "mkisofs" and/or "xorrisofs" are available. + diff --git a/releasenotes/notes/baremetal-details-09b27fba82111cfb.yaml b/releasenotes/notes/baremetal-details-09b27fba82111cfb.yaml new file mode 100644 index 0000000000..f54d0dbd1e --- /dev/null +++ b/releasenotes/notes/baremetal-details-09b27fba82111cfb.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + The objects returned by baremetal detailed listing functions + (``connection.baremetal.{nodes,ports,chassis,port_groups}``) are now + fully functional, e.g. can be directly updated or deleted. +deprecations: + - | + The following baremetal resource classes are no longer used and will be + removed in a future release: ``NodeDetail``, ``PortDetail``, + ``ChassisDetail`` and ``PortGroupDetail``. The regular ``Node``, ``Port``, + ``Chassis`` and ``PortGroup`` are now used instead. diff --git a/releasenotes/notes/baremetal-errors-5cc871e8df4c9d95.yaml b/releasenotes/notes/baremetal-errors-5cc871e8df4c9d95.yaml new file mode 100644 index 0000000000..cc1fd8a40c --- /dev/null +++ b/releasenotes/notes/baremetal-errors-5cc871e8df4c9d95.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Adds support for error messages from the bare metal service. diff --git a/releasenotes/notes/baremetal-fields-1f6fbcd8bd1ea2aa.yaml b/releasenotes/notes/baremetal-fields-1f6fbcd8bd1ea2aa.yaml new file mode 100644 index 0000000000..1331176239 --- /dev/null +++ b/releasenotes/notes/baremetal-fields-1f6fbcd8bd1ea2aa.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixes specifying fields when listing bare metal resources. diff --git a/releasenotes/notes/baremetal-fields-624546fa533a8287.yaml b/releasenotes/notes/baremetal-fields-624546fa533a8287.yaml new file mode 100644 index 0000000000..053140d58d --- /dev/null +++ b/releasenotes/notes/baremetal-fields-624546fa533a8287.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds support for fetching specific fields when getting bare metal + `Node`, `Port`, `PortGroup`, `Chassis` and `Allocation` resources. diff --git a/releasenotes/notes/baremetal-fields-convert-857b8804327f1e86.yaml b/releasenotes/notes/baremetal-fields-convert-857b8804327f1e86.yaml new file mode 100644 index 0000000000..07fa11f848 --- /dev/null +++ b/releasenotes/notes/baremetal-fields-convert-857b8804327f1e86.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes conversion of the bare metal ``fields`` argument from SDK to + server-side field names (e.g. ``instance_id`` to ``instance_uuid``). diff --git a/releasenotes/notes/baremetal-introspection-973351b3ee76309e.yaml b/releasenotes/notes/baremetal-introspection-973351b3ee76309e.yaml new file mode 100644 index 0000000000..7ab2885bce --- /dev/null +++ b/releasenotes/notes/baremetal-introspection-973351b3ee76309e.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds support for the bare metal introspection service. diff --git a/releasenotes/notes/baremetal-maintenance-5cb95c6d898d4d72.yaml b/releasenotes/notes/baremetal-maintenance-5cb95c6d898d4d72.yaml new file mode 100644 index 0000000000..4a309b9fdf --- /dev/null +++ b/releasenotes/notes/baremetal-maintenance-5cb95c6d898d4d72.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Implements updating the baremetal Node's ``maintenance_reason``. diff --git a/releasenotes/notes/baremetal-patch-feebd96b1b92f3b9.yaml b/releasenotes/notes/baremetal-patch-feebd96b1b92f3b9.yaml new file mode 100644 index 0000000000..bcf4edb3ff --- /dev/null +++ b/releasenotes/notes/baremetal-patch-feebd96b1b92f3b9.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + Adds support for changing bare metal resources by providing a JSON patch. + Adds the following calls to the bare metal proxy: ``patch_node``, + ``patch_port``, ``patch_port_group`` and ``patch_chassis``. +deprecations: + - | + The ``set_node_instance_info`` call is deprecated, use ``patch_machine`` + with the same arguments instead. + - | + The ``purge_node_instance_info`` call is deprecated, use ``patch_machine`` + or ``update_machine`` instead. diff --git a/releasenotes/notes/baremetal-ports-cc0f56ae0d192aba.yaml b/releasenotes/notes/baremetal-ports-cc0f56ae0d192aba.yaml new file mode 100644 index 0000000000..90adce120f --- /dev/null +++ b/releasenotes/notes/baremetal-ports-cc0f56ae0d192aba.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``OpenStackCloud`` bare metal NIC calls now support all microversions + supported by the SDK. Previously version 1.6 was hardcoded. diff --git a/releasenotes/notes/baremetal-reservation-40327923092e9647.yaml b/releasenotes/notes/baremetal-reservation-40327923092e9647.yaml new file mode 100644 index 0000000000..9b5f7704e1 --- /dev/null +++ b/releasenotes/notes/baremetal-reservation-40327923092e9647.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + Added ``wait_for_node_reservation`` to the baremetal proxy. +deprecations: + - | + The `OpenStackCloud` ``wait_for_baremetal_node_lock`` call is deprecated. + Generally, users should not have to call it. The new + ``wait_for_node_reservation`` from the baremetal proxy can be used when + needed. diff --git a/releasenotes/notes/baremetal-retired-fields-f56a4632ad4797d7.yaml b/releasenotes/notes/baremetal-retired-fields-f56a4632ad4797d7.yaml new file mode 100644 index 0000000000..115febd22b --- /dev/null +++ b/releasenotes/notes/baremetal-retired-fields-f56a4632ad4797d7.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds ``is_retired`` and ``retired_reason`` to the baremetal Node schema. \ No newline at end of file diff --git a/releasenotes/notes/baremetal-retries-804f553b4e22b3bf.yaml b/releasenotes/notes/baremetal-retries-804f553b4e22b3bf.yaml new file mode 100644 index 0000000000..b54dc1f19c --- /dev/null +++ b/releasenotes/notes/baremetal-retries-804f553b4e22b3bf.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + Changes the ``baremetal.attach_vif_to_node`` call to retry HTTP CONFLICT + by default. While it's a valid error code when a VIF is already attached + to a node, the same code is also used when the target node is locked. + The latter happens more often, so the retries are now on by default and + can be disabled by setting ``retry_on_conflict`` to ``False``. diff --git a/releasenotes/notes/baremetal-retries-ff8aa8f73fb97415.yaml b/releasenotes/notes/baremetal-retries-ff8aa8f73fb97415.yaml new file mode 100644 index 0000000000..c654c5b185 --- /dev/null +++ b/releasenotes/notes/baremetal-retries-ff8aa8f73fb97415.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + The bare metal operations now retry HTTP 409 and 503 by default. The number + of retries can be changes via the ``baremetal_status_code_retries`` + configuration option (defaulting to 5). diff --git a/releasenotes/notes/baremetal-traits-d1137318db33b8d1.yaml b/releasenotes/notes/baremetal-traits-d1137318db33b8d1.yaml new file mode 100644 index 0000000000..7e706e611a --- /dev/null +++ b/releasenotes/notes/baremetal-traits-d1137318db33b8d1.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Implements add/remove/set traits API for bare metal nodes. diff --git a/releasenotes/notes/baremetal-update-80effb38aae8e02d.yaml b/releasenotes/notes/baremetal-update-80effb38aae8e02d.yaml new file mode 100644 index 0000000000..45ddbb2544 --- /dev/null +++ b/releasenotes/notes/baremetal-update-80effb38aae8e02d.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Correct updating bare metal resources. Previously an incorrect body used + to be sent. diff --git a/releasenotes/notes/baremetal-validate-ccce2a37d2a20d96.yaml b/releasenotes/notes/baremetal-validate-ccce2a37d2a20d96.yaml new file mode 100644 index 0000000000..7783c2fb99 --- /dev/null +++ b/releasenotes/notes/baremetal-validate-ccce2a37d2a20d96.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds support for bare metal node validation to the bare metal proxy. diff --git a/releasenotes/notes/baremetal-vif-122457118c722a9b.yaml b/releasenotes/notes/baremetal-vif-122457118c722a9b.yaml new file mode 100644 index 0000000000..061d703a18 --- /dev/null +++ b/releasenotes/notes/baremetal-vif-122457118c722a9b.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Implements VIF attach/detach API for bare metal nodes. diff --git a/releasenotes/notes/baremetal-wait-e4571cdb150b188a.yaml b/releasenotes/notes/baremetal-wait-e4571cdb150b188a.yaml new file mode 100644 index 0000000000..c104a7fec7 --- /dev/null +++ b/releasenotes/notes/baremetal-wait-e4571cdb150b188a.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + The baremetal calls ``wait_for_nodes_provision_state``, + ``wait_for_allocation`` and the baremetal introspection call + ``wait_for_introspection`` now raise ``ResourceFailure`` on reaching + an error state instead of a generic ``SDKException``. diff --git a/releasenotes/notes/basic-api-cache-4ad8cf2754b004d1.yaml b/releasenotes/notes/basic-api-cache-4ad8cf2754b004d1.yaml new file mode 100644 index 0000000000..7ac9f23a22 --- /dev/null +++ b/releasenotes/notes/basic-api-cache-4ad8cf2754b004d1.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add possibility to cache GET requests using dogpile cache. diff --git a/releasenotes/notes/bgpvpn-list-filters-e76183a7008c0631.yaml b/releasenotes/notes/bgpvpn-list-filters-e76183a7008c0631.yaml new file mode 100644 index 0000000000..4eb359e9b3 --- /dev/null +++ b/releasenotes/notes/bgpvpn-list-filters-e76183a7008c0631.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + ``openstack.network.v2.bgpvpn.BgpVpn`` can now be filtered by its + associations to `networks`, `routers` and `ports. Additionally, + filtering for the attributes `name`, `project_id`, `local_pref`, `vni` + and `type` is now done on server-side. diff --git a/releasenotes/notes/block-storage-backup-5886e91fd6e423bf.yaml b/releasenotes/notes/block-storage-backup-5886e91fd6e423bf.yaml new file mode 100644 index 0000000000..d6f82506b3 --- /dev/null +++ b/releasenotes/notes/block-storage-backup-5886e91fd6e423bf.yaml @@ -0,0 +1,3 @@ +--- +features: + - Implement block-storage.v2 Backup resource with restore functionality. diff --git a/releasenotes/notes/block-storage-init-return-95b465b4755f03ca.yaml b/releasenotes/notes/block-storage-init-return-95b465b4755f03ca.yaml new file mode 100644 index 0000000000..6650ac89af --- /dev/null +++ b/releasenotes/notes/block-storage-init-return-95b465b4755f03ca.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Methods ``openstack.block_storage.v3.volume.Volume.init_attachment`` and + ``block_storage.init_volume_attachment`` now return the results of the POST + request instead of None. This replicates the behaviour of cinderclient; the + returned data is used by nova and ironic for managing volume attachments. \ No newline at end of file diff --git a/releasenotes/notes/block-storage-qs-0e3b69be2e709b65.yaml b/releasenotes/notes/block-storage-qs-0e3b69be2e709b65.yaml new file mode 100644 index 0000000000..01adc564e3 --- /dev/null +++ b/releasenotes/notes/block-storage-qs-0e3b69be2e709b65.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add block storage QuotaSet resource and proxy methods. diff --git a/releasenotes/notes/block-storage-v3-9798d584d088c048.yaml b/releasenotes/notes/block-storage-v3-9798d584d088c048.yaml new file mode 100644 index 0000000000..0ce997f492 --- /dev/null +++ b/releasenotes/notes/block-storage-v3-9798d584d088c048.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added support for block storage v3. diff --git a/releasenotes/notes/block_storage-type_encryption-121f8a222c822fb5.yaml b/releasenotes/notes/block_storage-type_encryption-121f8a222c822fb5.yaml new file mode 100644 index 0000000000..deed2290d4 --- /dev/null +++ b/releasenotes/notes/block_storage-type_encryption-121f8a222c822fb5.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add support for block storage type encryption parameters. diff --git a/releasenotes/notes/boot-on-server-group-a80e51850db24b3d.yaml b/releasenotes/notes/boot-on-server-group-a80e51850db24b3d.yaml new file mode 100644 index 0000000000..4f4a39c23c --- /dev/null +++ b/releasenotes/notes/boot-on-server-group-a80e51850db24b3d.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added ``group`` parameter to create_server to allow + booting a server into a specific server group. diff --git a/releasenotes/notes/bug-2001080-de52ead3c5466792.yaml b/releasenotes/notes/bug-2001080-de52ead3c5466792.yaml new file mode 100644 index 0000000000..08f83f06a2 --- /dev/null +++ b/releasenotes/notes/bug-2001080-de52ead3c5466792.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + [`bug 2001080 `_] + Project update will only update the enabled field of projects when + ``enabled=True`` or ``enabled=False`` is passed explicitly. The previous + behavior had ``enabled=True`` as the default. diff --git a/releasenotes/notes/bug-2010898-430da335e4df0efe.yaml b/releasenotes/notes/bug-2010898-430da335e4df0efe.yaml new file mode 100644 index 0000000000..2d09fc799a --- /dev/null +++ b/releasenotes/notes/bug-2010898-430da335e4df0efe.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + [`bug 2010898 `_] + Fix Swift endpoint url handling to determine info/caps url diff --git a/releasenotes/notes/bug-2081292-def552ed9c4e24a3.yaml b/releasenotes/notes/bug-2081292-def552ed9c4e24a3.yaml new file mode 100644 index 0000000000..8acad24b07 --- /dev/null +++ b/releasenotes/notes/bug-2081292-def552ed9c4e24a3.yaml @@ -0,0 +1,10 @@ +--- +fixes: + - | + The ``update_quota_set`` methods in the Compute and Block Storage (v2, v3) + proxy APIs were modified in v3.3.0 to accept ``Project`` objects as the + first argument. A compatibility shim was included to handle callers still + passing ``QuotaSet`` objects, but this shim did not modify the provided + ``QuotaSet`` object in place as the previous code did. This has now been + fixed. The shim is still expected to be removed in v5.0.0. + [`bug 2081292 `_] diff --git a/releasenotes/notes/bug-2137505-9390f7f914817f81.yaml b/releasenotes/notes/bug-2137505-9390f7f914817f81.yaml new file mode 100644 index 0000000000..a3132317eb --- /dev/null +++ b/releasenotes/notes/bug-2137505-9390f7f914817f81.yaml @@ -0,0 +1,13 @@ +--- +fixes: + - | + openstacksdk was publishing metrics to the global Prometheus collector + registry (``prometheus_client.REGISTRY``) if the ``prometheus-client`` + library was installed, but the ``Connection`` object was not configured + to publish Prometheus metrics to a custom registry. This was causing the + global Prometheus collector registry to be polluted with potentially + unwanted metrics, and was also a potential cause of memory leaks if + openstacksdk is used to make a large number of requests. This issue has + now been fixed; openstacksdk will only publish Prometheus metrics when + ``collector_registry`` has been passed to the connection object, and + will only publish to that registry. diff --git a/releasenotes/notes/cache-auth-in-keyring-773dd5f682cd1610.yaml b/releasenotes/notes/cache-auth-in-keyring-773dd5f682cd1610.yaml new file mode 100644 index 0000000000..8e35c048a7 --- /dev/null +++ b/releasenotes/notes/cache-auth-in-keyring-773dd5f682cd1610.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support for optionally caching auth information int the local + keyring. Requires the installation of the python ``keyring`` package. diff --git a/releasenotes/notes/cache-in-use-volumes-c7fa8bb378106fe3.yaml b/releasenotes/notes/cache-in-use-volumes-c7fa8bb378106fe3.yaml new file mode 100644 index 0000000000..4ac0b61af6 --- /dev/null +++ b/releasenotes/notes/cache-in-use-volumes-c7fa8bb378106fe3.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - Fixed caching the volume list when volumes are in + use. diff --git a/releasenotes/notes/catch-up-release-notes-e385fad34e9f3d6e.yaml b/releasenotes/notes/catch-up-release-notes-e385fad34e9f3d6e.yaml new file mode 100644 index 0000000000..251fdb9666 --- /dev/null +++ b/releasenotes/notes/catch-up-release-notes-e385fad34e9f3d6e.yaml @@ -0,0 +1,15 @@ +--- +features: + - Swiftclient instantiation now provides authentication + information so that long lived swiftclient objects can + reauthenticate if necessary. + - Add support for explicit v2password auth type. + - Add SSL support to VEXXHOST vendor profile. + - Add zetta.io cloud vendor profile. +fixes: + - Fix bug where project_domain_{name,id} was set even + if project_{name,id} was not set. +other: + - HPCloud vendor profile removed due to cloud shutdown. + - RunAbove vendor profile removed due to migration to + OVH. diff --git a/releasenotes/notes/change-attach-vol-return-value-4834a1f78392abb1.yaml b/releasenotes/notes/change-attach-vol-return-value-4834a1f78392abb1.yaml new file mode 100644 index 0000000000..19db8ebef3 --- /dev/null +++ b/releasenotes/notes/change-attach-vol-return-value-4834a1f78392abb1.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + The ``attach_volume`` method now always returns a ``volume_attachment`` + object. Previously, ``attach_volume`` would return a ``volume`` object if + it was called with ``wait=True`` and a ``volume_attachment`` object + otherwise. + diff --git a/releasenotes/notes/cinder_volume_backups_support-6f7ceab440853833.yaml b/releasenotes/notes/cinder_volume_backups_support-6f7ceab440853833.yaml new file mode 100644 index 0000000000..380b653f43 --- /dev/null +++ b/releasenotes/notes/cinder_volume_backups_support-6f7ceab440853833.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for Cinder volume backup resources, with the + usual methods (search/list/get/create/delete). diff --git a/releasenotes/notes/cinderv2-norm-fix-037189c60b43089f.yaml b/releasenotes/notes/cinderv2-norm-fix-037189c60b43089f.yaml new file mode 100644 index 0000000000..0847ee6671 --- /dev/null +++ b/releasenotes/notes/cinderv2-norm-fix-037189c60b43089f.yaml @@ -0,0 +1,3 @@ +--- +fixes: + - Fixed the volume normalization function when used with cinder v2. diff --git a/releasenotes/notes/cleanup-objects-f99aeecf22ac13dd.yaml b/releasenotes/notes/cleanup-objects-f99aeecf22ac13dd.yaml new file mode 100644 index 0000000000..e1e0752fd1 --- /dev/null +++ b/releasenotes/notes/cleanup-objects-f99aeecf22ac13dd.yaml @@ -0,0 +1,6 @@ +--- +features: + - If shade has to create objects in swift to upload an + image, it will now delete those objects upon successful + image creation as they are no longer needed. They will + also be deleted on fatal import errors. diff --git a/releasenotes/notes/cloud-profile-status-e0d29b5e2f10e95c.yaml b/releasenotes/notes/cloud-profile-status-e0d29b5e2f10e95c.yaml new file mode 100644 index 0000000000..b447ed0a46 --- /dev/null +++ b/releasenotes/notes/cloud-profile-status-e0d29b5e2f10e95c.yaml @@ -0,0 +1,6 @@ +--- +features: + - Add a field to vendor cloud profiles to indicate + active, deprecated and shutdown status. A message to + the user is triggered when attempting to use cloud + with either deprecated or shutdown status. diff --git a/releasenotes/notes/clustering-resource-deletion-bed869ba47c2aac1.yaml b/releasenotes/notes/clustering-resource-deletion-bed869ba47c2aac1.yaml new file mode 100644 index 0000000000..877a571719 --- /dev/null +++ b/releasenotes/notes/clustering-resource-deletion-bed869ba47c2aac1.yaml @@ -0,0 +1,13 @@ +--- +fixes: + - | + Fixed a regression in deleting Node and Cluster resources + in clustering caused by the addition of the ``location`` + property to all resource objects. Previously the delete + calls had directly returned the ``location`` field + returned in the headers from the clustering service pointing + to an Action resource that could be fetched to get status + on the delete operation. The delete calls now return an + Action resource directly that is correctly constructed + so that ``wait_for_status`` and ``wait_for_deleted`` + work as expected. diff --git a/releasenotes/notes/complete-aggregate-functions-45d5f2beeeac2b48.yaml b/releasenotes/notes/complete-aggregate-functions-45d5f2beeeac2b48.yaml new file mode 100644 index 0000000000..3fafe61885 --- /dev/null +++ b/releasenotes/notes/complete-aggregate-functions-45d5f2beeeac2b48.yaml @@ -0,0 +1,6 @@ +--- +features: + - Complete compute.aggregate functions to the latest state +fixes: + - aggregate.deleted property is renamed to 'is_deleted' to comply with the + naming convention diff --git a/releasenotes/notes/compute-add-validate-console-auth-token-999b790aec83de85.yaml b/releasenotes/notes/compute-add-validate-console-auth-token-999b790aec83de85.yaml new file mode 100644 index 0000000000..474e42c1e2 --- /dev/null +++ b/releasenotes/notes/compute-add-validate-console-auth-token-999b790aec83de85.yaml @@ -0,0 +1,13 @@ +--- +features: + - | + Add the ``validate_console_auth_token`` method to the Compute proxy. This + method uses the pre-existing ``os-console-auth-tokens`` OpenStack Compute + API to validate a console access token as produced by + ``get_console_url``. In addition, the method returns hypervisor connection + information for the console (hypervisor IP and port numbers), as this call + is generally used by the console proxies which users connect to. + + By default, callers of this method must have ``admin`` access to the + OpenStack Compute API due to the privileged nature of the hypervisor + connection information returned. diff --git a/releasenotes/notes/compute-microversion-2-17-b05cb87580b8d56a.yaml b/releasenotes/notes/compute-microversion-2-17-b05cb87580b8d56a.yaml new file mode 100644 index 0000000000..e8f8c51074 --- /dev/null +++ b/releasenotes/notes/compute-microversion-2-17-b05cb87580b8d56a.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add support for Compute API microversion 2.17, which allows admins to + trigger a crash dump for a server. This can be useful for debugging + misbehaving guests. diff --git a/releasenotes/notes/compute-microversion-2-73-abae1d0c3740f76e.yaml b/releasenotes/notes/compute-microversion-2-73-abae1d0c3740f76e.yaml new file mode 100644 index 0000000000..f3d89c2a64 --- /dev/null +++ b/releasenotes/notes/compute-microversion-2-73-abae1d0c3740f76e.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add support for Compute API microversion 2.73, which allows admins to + specify a reason when locking a server. diff --git a/releasenotes/notes/compute-microversion-2-89-8c5187cc3bf6bd02.yaml b/releasenotes/notes/compute-microversion-2-89-8c5187cc3bf6bd02.yaml new file mode 100644 index 0000000000..e22a340dc3 --- /dev/null +++ b/releasenotes/notes/compute-microversion-2-89-8c5187cc3bf6bd02.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + The 2.89 API microversion is now supported for the compute service. This + adds additional fields to the ``os-volume_attachments`` API, represented + by the ``openstack.compute.v2.volume_attachment.VolumeAttachment`` + resource. diff --git a/releasenotes/notes/compute-quota-set-e664412d089945d2.yaml b/releasenotes/notes/compute-quota-set-e664412d089945d2.yaml new file mode 100644 index 0000000000..63f23898e6 --- /dev/null +++ b/releasenotes/notes/compute-quota-set-e664412d089945d2.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for QuotaSet in the compute service. diff --git a/releasenotes/notes/compute-quotas-b07a0f24dfac8444.yaml b/releasenotes/notes/compute-quotas-b07a0f24dfac8444.yaml new file mode 100644 index 0000000000..6e170359cb --- /dev/null +++ b/releasenotes/notes/compute-quotas-b07a0f24dfac8444.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add new APIs, OperatorCloud.get_compute_quotas(), OperatorCloud.set_compute_quotas() and OperatorCloud.delete_compute_quotas() to manage nova quotas for projects and users \ No newline at end of file diff --git a/releasenotes/notes/compute-restore-server-020bf091acc9f8df.yaml b/releasenotes/notes/compute-restore-server-020bf091acc9f8df.yaml new file mode 100644 index 0000000000..9269c4742b --- /dev/null +++ b/releasenotes/notes/compute-restore-server-020bf091acc9f8df.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + The ``openstack.compute.v2.server.Server`` object now provides a + ``restore`` method to restore it from a soft-deleted state, while the + compute proxy method provides an equivalent ``restore_server`` method. diff --git a/releasenotes/notes/compute-service-zone-2b25ec705b0156c4.yaml b/releasenotes/notes/compute-service-zone-2b25ec705b0156c4.yaml new file mode 100644 index 0000000000..ea69aef6a8 --- /dev/null +++ b/releasenotes/notes/compute-service-zone-2b25ec705b0156c4.yaml @@ -0,0 +1,7 @@ +--- +upgrade: + - | + The ``zone`` attribute on compute ``Service`` objects + has been renamed to ``availability_zone`` to match all + of the other resources, and also to better integrate + with the ``Resource.location`` attribute. diff --git a/releasenotes/notes/compute-usage-defaults-5f5b2936f17ff400.yaml b/releasenotes/notes/compute-usage-defaults-5f5b2936f17ff400.yaml new file mode 100644 index 0000000000..7ca6b37f5c --- /dev/null +++ b/releasenotes/notes/compute-usage-defaults-5f5b2936f17ff400.yaml @@ -0,0 +1,9 @@ +--- +features: + - get_compute_usage now has a default value for the start + parameter of 2010-07-06. That was the date the OpenStack + project started. It's completely impossible for someone + to have Nova usage data that goes back further in time. + Also, both the start and end date parameters now also + accept strings which will be parsed and timezones will + be properly converted to UTC which is what Nova expects. diff --git a/releasenotes/notes/compute-volume-attachment-proxy-method-rework-dc35fe9ca3af1c16.yaml b/releasenotes/notes/compute-volume-attachment-proxy-method-rework-dc35fe9ca3af1c16.yaml new file mode 100644 index 0000000000..5962e6e82e --- /dev/null +++ b/releasenotes/notes/compute-volume-attachment-proxy-method-rework-dc35fe9ca3af1c16.yaml @@ -0,0 +1,13 @@ +--- +upgrade: + - | + The signatures of the various volume attachment-related methods in the + compute API proxy layer have changed. These were previously incomplete and + did not function as expected in many scenarios. Some callers may need to be + reworked. The affected proxy methods are: + + - ``create_volume_attachment`` + - ``delete_volume_attachment`` + - ``update_volume_attachment`` + - ``get_volume_attachment`` + - ``volume_attachments`` diff --git a/releasenotes/notes/conf-object-ctr-c0e1da0a67dad841.yaml b/releasenotes/notes/conf-object-ctr-c0e1da0a67dad841.yaml new file mode 100644 index 0000000000..60bb03594d --- /dev/null +++ b/releasenotes/notes/conf-object-ctr-c0e1da0a67dad841.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added the ability to create a ``Connection`` from an ``oslo.config`` + ``CONF`` object. This is primarily intended to be used by OpenStack + services using SDK for inter-service communication. diff --git a/releasenotes/notes/config-aliases-0f6297eafd05c07c.yaml b/releasenotes/notes/config-aliases-0f6297eafd05c07c.yaml new file mode 100644 index 0000000000..d398c2ad93 --- /dev/null +++ b/releasenotes/notes/config-aliases-0f6297eafd05c07c.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Config values now support service-type aliases. The correct config names + are based on the official service type, such as + ``block_storage_api_version``, but with this change, legacy aliases such + as ``volume_api_version`` are also supported. diff --git a/releasenotes/notes/config-flavor-specs-ca712e17971482b6.yaml b/releasenotes/notes/config-flavor-specs-ca712e17971482b6.yaml new file mode 100644 index 0000000000..4bb1e9013e --- /dev/null +++ b/releasenotes/notes/config-flavor-specs-ca712e17971482b6.yaml @@ -0,0 +1,4 @@ +--- +features: + - Adds ability to add a config setting to clouds.yaml to + disable fetching extra_specs from flavors. diff --git a/releasenotes/notes/configdrive-f8ca9f94b2981db7.yaml b/releasenotes/notes/configdrive-f8ca9f94b2981db7.yaml new file mode 100644 index 0000000000..4337a5424a --- /dev/null +++ b/releasenotes/notes/configdrive-f8ca9f94b2981db7.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Supports Bare Metal API version 1.56, which allows building a config drive + on the server side from a provided dictionary. diff --git a/releasenotes/notes/consistent-volume-status-5a527cd561af5e7a.yaml b/releasenotes/notes/consistent-volume-status-5a527cd561af5e7a.yaml new file mode 100644 index 0000000000..e161e23c78 --- /dev/null +++ b/releasenotes/notes/consistent-volume-status-5a527cd561af5e7a.yaml @@ -0,0 +1,11 @@ +--- +upgrade: + - | + The following volume (v2 and v3) helpers have been renamed: + + - ``reset_snapshot`` -> ``reset_snapshot_status`` + - ``reset_backup`` -> ``reset_backup_status`` + - ``reset_group_state`` -> ``reset_group_status`` + - ``reset_group_snapshot_state`` -> ``reset_group_snapshot_status`` + + Aliases are provided for backwards compatibility. diff --git a/releasenotes/notes/container-search-b0f4253ce2deeda5.yaml b/releasenotes/notes/container-search-b0f4253ce2deeda5.yaml new file mode 100644 index 0000000000..3587a7b5c2 --- /dev/null +++ b/releasenotes/notes/container-search-b0f4253ce2deeda5.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Containers are now searchable both with a JMESPath expression or a dict of + container attributes via the + ``openstack.connection.Connection.search_containers`` function. diff --git a/releasenotes/notes/create-image-support-all-import-methods-48e4e382b7091dd3.yaml b/releasenotes/notes/create-image-support-all-import-methods-48e4e382b7091dd3.yaml new file mode 100644 index 0000000000..f42ea61ef6 --- /dev/null +++ b/releasenotes/notes/create-image-support-all-import-methods-48e4e382b7091dd3.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + The ``create_image`` method now takes new parameters (``import_method``, + ``uri``, ``remote_region``, ``remote_image_id`` and + ``remote_service_interface``) to support all import methods from Glance. diff --git a/releasenotes/notes/create-object-data-870cb543543aa983.yaml b/releasenotes/notes/create-object-data-870cb543543aa983.yaml new file mode 100644 index 0000000000..4e0255050c --- /dev/null +++ b/releasenotes/notes/create-object-data-870cb543543aa983.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add a data parameter to ``openstack.connection.Connection.create_object`` + so that data can be passed in directly instead of through a file. diff --git a/releasenotes/notes/create-object-directory-98e2cae175cc5082.yaml b/releasenotes/notes/create-object-directory-98e2cae175cc5082.yaml new file mode 100644 index 0000000000..08cce16655 --- /dev/null +++ b/releasenotes/notes/create-object-directory-98e2cae175cc5082.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Added a ``create_directory_marker_object``' method to allow for easy + creation of zero-byte 'directory' marker objects. These are not needed + in most cases, but on some clouds they are used by Static + Web and Web Listings in swift to facilitate directory traversal. diff --git a/releasenotes/notes/create-stack-fix-12dbb59a48ac7442.yaml b/releasenotes/notes/create-stack-fix-12dbb59a48ac7442.yaml new file mode 100644 index 0000000000..35bb8c0b68 --- /dev/null +++ b/releasenotes/notes/create-stack-fix-12dbb59a48ac7442.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - The create_stack() call was fixed to call the correct iterator + method and to return the updated stack object when waiting. diff --git a/releasenotes/notes/create-subnet-by-subnetpool-eba1129c67ed4d96.yaml b/releasenotes/notes/create-subnet-by-subnetpool-eba1129c67ed4d96.yaml new file mode 100644 index 0000000000..bb7127ac6a --- /dev/null +++ b/releasenotes/notes/create-subnet-by-subnetpool-eba1129c67ed4d96.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support for specifying the subnetpool to use when creating subnets + (``subnetpool_name_or_id``) diff --git a/releasenotes/notes/create_server_network_fix-c4a56b31d2850a4b.yaml b/releasenotes/notes/create_server_network_fix-c4a56b31d2850a4b.yaml new file mode 100644 index 0000000000..9f9bd5474d --- /dev/null +++ b/releasenotes/notes/create_server_network_fix-c4a56b31d2850a4b.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - The create_server() API call would not use the supplied 'network' + parameter if the 'nics' parameter was also supplied, even though it would + be an empty list. It now uses 'network' if 'nics' is not supplied or if + it is an empty list. diff --git a/releasenotes/notes/create_service_norm-319a97433d68fa6a.yaml b/releasenotes/notes/create_service_norm-319a97433d68fa6a.yaml new file mode 100644 index 0000000000..2f6d018a67 --- /dev/null +++ b/releasenotes/notes/create_service_norm-319a97433d68fa6a.yaml @@ -0,0 +1,3 @@ +--- +fixes: + - The returned data from a create_service() call was not being normalized. diff --git a/releasenotes/notes/cron_triggers_proxy-51aa89e91bbb9798.yaml b/releasenotes/notes/cron_triggers_proxy-51aa89e91bbb9798.yaml new file mode 100644 index 0000000000..c9ec39144b --- /dev/null +++ b/releasenotes/notes/cron_triggers_proxy-51aa89e91bbb9798.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add workflow CronTrigger resource and proxy methods. diff --git a/releasenotes/notes/data-model-cf50d86982646370.yaml b/releasenotes/notes/data-model-cf50d86982646370.yaml new file mode 100644 index 0000000000..66a814aaee --- /dev/null +++ b/releasenotes/notes/data-model-cf50d86982646370.yaml @@ -0,0 +1,8 @@ +--- +features: + - Explicit data model contracts are now defined for + Flavors, Images, Security Groups, Security Group Rules, + and Servers. + - Resources with data model contracts are now being returned with + 'location' attribute. The location carries cloud name, region + name and information about the project that owns the resource. diff --git a/releasenotes/notes/default-cloud-7ee0bcb9e5dd24b9.yaml b/releasenotes/notes/default-cloud-7ee0bcb9e5dd24b9.yaml new file mode 100644 index 0000000000..49aba3c9cc --- /dev/null +++ b/releasenotes/notes/default-cloud-7ee0bcb9e5dd24b9.yaml @@ -0,0 +1,7 @@ +--- +issues: + - If there was only one cloud defined in clouds.yaml + os-client-config was requiring the cloud parameter + be passed. This is inconsistent with how the envvars + cloud works which WILL work without setting the cloud + parameter if it's the only cloud. diff --git a/releasenotes/notes/default-microversion-b2401727cb591002.yaml b/releasenotes/notes/default-microversion-b2401727cb591002.yaml new file mode 100644 index 0000000000..3ef8ad9573 --- /dev/null +++ b/releasenotes/notes/default-microversion-b2401727cb591002.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Versions set in config via ``*_api_version`` or ``OS_*_API_VERSION`` + that have a ``.`` in them will be also passed as the default microversion + to the Adapter constructor. An additional config option, + ``*_default_microversion`` has been added to support being more explicit. diff --git a/releasenotes/notes/delete-autocreated-1839187b0aa35022.yaml b/releasenotes/notes/delete-autocreated-1839187b0aa35022.yaml new file mode 100644 index 0000000000..a0c2f8d766 --- /dev/null +++ b/releasenotes/notes/delete-autocreated-1839187b0aa35022.yaml @@ -0,0 +1,5 @@ +--- +features: + - Added new method, delete_autocreated_image_objects + that can be used to delete any leaked objects shade + may have created on behalf of the user. diff --git a/releasenotes/notes/delete-image-objects-9d4b4e0fff36a23f.yaml b/releasenotes/notes/delete-image-objects-9d4b4e0fff36a23f.yaml new file mode 100644 index 0000000000..00ce4998d1 --- /dev/null +++ b/releasenotes/notes/delete-image-objects-9d4b4e0fff36a23f.yaml @@ -0,0 +1,18 @@ +--- +fixes: + - Delete swift objects uploaded in service of uploading images + at the time that the corresponding image is deleted. On some clouds, + image uploads are accomplished by uploading the image to swift and + then running a task-import. As shade does this action on behalf of the + user, it is not reasonable to assume that the user would then be aware + of or manage the swift objects shade created, which led to an ongoing + leak of swift objects. + - Upload swift Large Objects as Static Large Objects by default. Shade + automatically uploads objects as Large Objects when they are over a + segment_size threshold. It had been doing this as Dynamic Large Objects, + which sound great, but which have the downside of not deleting their + sub-segments when the primary object is deleted. Since nothing in the + shade interface exposes that the object was segmented, the user would not + know they would also need to find and delete the segments. Instead, we + now upload as Static Large Objects which behave as expected and delete + segments when the object is deleted. diff --git a/releasenotes/notes/delete-obj-return-a3ecf0415b7a2989.yaml b/releasenotes/notes/delete-obj-return-a3ecf0415b7a2989.yaml new file mode 100644 index 0000000000..381bcb99a9 --- /dev/null +++ b/releasenotes/notes/delete-obj-return-a3ecf0415b7a2989.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - The delete_object() method was not returning True/False, + similar to other delete methods. It is now consistent with + the other delete APIs. diff --git a/releasenotes/notes/delete_project-399f9b3107014dde.yaml b/releasenotes/notes/delete_project-399f9b3107014dde.yaml new file mode 100644 index 0000000000..e4cf39fb97 --- /dev/null +++ b/releasenotes/notes/delete_project-399f9b3107014dde.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - The delete_project() API now conforms to our standard of returning True + when the delete succeeds, or False when the project was not found. It + would previously raise an expection if the project was not found. diff --git a/releasenotes/notes/deprecate-influxdb-support-aca3c74f7dc25572.yaml b/releasenotes/notes/deprecate-influxdb-support-aca3c74f7dc25572.yaml new file mode 100644 index 0000000000..63523760ec --- /dev/null +++ b/releasenotes/notes/deprecate-influxdb-support-aca3c74f7dc25572.yaml @@ -0,0 +1,6 @@ +--- +deprecations: + - | + Support for reporting metrics to InfluxDB has been deprecated for removal. + The implementation relied on an EOL Python library and only supported + InfluxDB v1. diff --git a/releasenotes/notes/deprecate-remote_ip_prefix-metering-label-rules-843d5a962e4e428c.yaml b/releasenotes/notes/deprecate-remote_ip_prefix-metering-label-rules-843d5a962e4e428c.yaml new file mode 100644 index 0000000000..351fb2200d --- /dev/null +++ b/releasenotes/notes/deprecate-remote_ip_prefix-metering-label-rules-843d5a962e4e428c.yaml @@ -0,0 +1,7 @@ +--- +deprecations: + - | + Deprecate the use of 'remote_ip_prefix' in metering label rules, and it + will be removed in future releases. One should use instead the + 'source_ip_prefix' and/or 'destination_ip_prefix' parameters. For more + details, you can check the spec: https://review.opendev.org/#/c/744702/. diff --git a/releasenotes/notes/deprecated-compute-image-proxy-apis-986263f6aa1b1b25.yaml b/releasenotes/notes/deprecated-compute-image-proxy-apis-986263f6aa1b1b25.yaml new file mode 100644 index 0000000000..c63ff8205d --- /dev/null +++ b/releasenotes/notes/deprecated-compute-image-proxy-apis-986263f6aa1b1b25.yaml @@ -0,0 +1,12 @@ +--- +deprecations: + - | + The following Compute service proxy methods are now deprecated: + + * ``find_image`` + * ``get_image`` + * ``delete_image`` + * ``images`` + + These are proxy APIs for the Image service. You should use the Image + service instead via the Image service proxy methods. diff --git a/releasenotes/notes/deprecated-profile-762afdef0e8fc9e8.yaml b/releasenotes/notes/deprecated-profile-762afdef0e8fc9e8.yaml new file mode 100644 index 0000000000..e09d17c345 --- /dev/null +++ b/releasenotes/notes/deprecated-profile-762afdef0e8fc9e8.yaml @@ -0,0 +1,6 @@ +--- +deprecations: + - | + ``openstack.profile.Profile`` has been deprecated and will be removed + in the ``1.0`` release. Users should use the functions in + ``openstack.config`` instead. diff --git a/releasenotes/notes/disable-service-39df96ef8a817785.yaml b/releasenotes/notes/disable-service-39df96ef8a817785.yaml new file mode 100644 index 0000000000..aab0d58244 --- /dev/null +++ b/releasenotes/notes/disable-service-39df96ef8a817785.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + ``has_{service_type}`` is a boolean config option that allows + asserting that a given service does not exist or should not be used + in a given cloud. Doing this will now cause the corresponding + service ``Proxy`` object to not be created and in its place is + an object that will throw exceptions if used. + - | + ``{service_type}_disabled_reason`` is a new string config option + that can be set to indicate a reason why a service has been disabled. + This string will be used in exceptions or log warnings emitted. diff --git a/releasenotes/notes/dns-domain-parameter-d3acfc3287a9d632.yaml b/releasenotes/notes/dns-domain-parameter-d3acfc3287a9d632.yaml new file mode 100644 index 0000000000..605cae2a5a --- /dev/null +++ b/releasenotes/notes/dns-domain-parameter-d3acfc3287a9d632.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added dns_domain parameter into the create_network and update_network + methods. diff --git a/releasenotes/notes/domain_operations_name_or_id-baba4cac5b67234d.yaml b/releasenotes/notes/domain_operations_name_or_id-baba4cac5b67234d.yaml new file mode 100644 index 0000000000..6d58e43c1e --- /dev/null +++ b/releasenotes/notes/domain_operations_name_or_id-baba4cac5b67234d.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added name_or_id parameter to domain operations, allowing + an admin to update/delete/get by domain name. diff --git a/releasenotes/notes/drop-Resource-allow_get-attribute-fec75b551fb79465.yaml b/releasenotes/notes/drop-Resource-allow_get-attribute-fec75b551fb79465.yaml new file mode 100644 index 0000000000..422029a735 --- /dev/null +++ b/releasenotes/notes/drop-Resource-allow_get-attribute-fec75b551fb79465.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + The ``allow_get`` attribute of ``openstack.resource.Resource`` has been + removed. Use ``allow_fetch`` or ``allow_list`` instead. diff --git a/releasenotes/notes/drop-formatter-deserialize-30b19956fb79bb8d.yaml b/releasenotes/notes/drop-formatter-deserialize-30b19956fb79bb8d.yaml new file mode 100644 index 0000000000..d8415bf62a --- /dev/null +++ b/releasenotes/notes/drop-formatter-deserialize-30b19956fb79bb8d.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + The ``openstack.format.Formatter`` class no longer defines a ``serialize`` + method to override. This was unused and unneccessary complexity. diff --git a/releasenotes/notes/drop-python-37-38-2a6336af44050fec.yaml b/releasenotes/notes/drop-python-37-38-2a6336af44050fec.yaml new file mode 100644 index 0000000000..ab605dfadf --- /dev/null +++ b/releasenotes/notes/drop-python-37-38-2a6336af44050fec.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + Support for Python 3.7 and 3.8 has been dropped. Python 3.7 support was + untested and known to be broken for multiple releases, while Python 3.8 + is going EOL in October 2024. diff --git a/releasenotes/notes/drop-python-39-e2d54d859007a575.yaml b/releasenotes/notes/drop-python-39-e2d54d859007a575.yaml new file mode 100644 index 0000000000..4583e98cf7 --- /dev/null +++ b/releasenotes/notes/drop-python-39-e2d54d859007a575.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Support for Python 3.9 has been dropped. The minimum version of Python now + supported in 3.10. diff --git a/releasenotes/notes/drop-python27-b824f9ce51cb1ab7.yaml b/releasenotes/notes/drop-python27-b824f9ce51cb1ab7.yaml new file mode 100644 index 0000000000..80f1f86f29 --- /dev/null +++ b/releasenotes/notes/drop-python27-b824f9ce51cb1ab7.yaml @@ -0,0 +1,3 @@ +--- +prelude: > + As of this release, python v2 is neither tested nor supported. diff --git a/releasenotes/notes/drop-senlin-cloud-layer-c06d496acc70b014.yaml b/releasenotes/notes/drop-senlin-cloud-layer-c06d496acc70b014.yaml new file mode 100644 index 0000000000..784d97cdc1 --- /dev/null +++ b/releasenotes/notes/drop-senlin-cloud-layer-c06d496acc70b014.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Cloud layer operations for Senlin service are dropped due to big amount of bugs there. diff --git a/releasenotes/notes/dropped-python-3.5-b154887cce87947c.yaml b/releasenotes/notes/dropped-python-3.5-b154887cce87947c.yaml new file mode 100644 index 0000000000..a78c85f45a --- /dev/null +++ b/releasenotes/notes/dropped-python-3.5-b154887cce87947c.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - | + Python 3.5 is no longer supported. diff --git a/releasenotes/notes/dual-stack-networks-8a81941c97d28deb.yaml b/releasenotes/notes/dual-stack-networks-8a81941c97d28deb.yaml new file mode 100644 index 0000000000..70e28e7b15 --- /dev/null +++ b/releasenotes/notes/dual-stack-networks-8a81941c97d28deb.yaml @@ -0,0 +1,8 @@ +--- +features: + - Added support for dual stack networks where the IPv4 subnet and the + IPv6 subnet have opposite public/private qualities. It is now possible + to add configuration to clouds.yaml that will indicate that a network + is public for v6 and private for v4, which is otherwise very difficult + to correctly infer while setting server attributes like private_v4, + public_v4 and public_v6. diff --git a/releasenotes/notes/endpoint-from-catalog-bad36cb0409a4e6a.yaml b/releasenotes/notes/endpoint-from-catalog-bad36cb0409a4e6a.yaml new file mode 100644 index 0000000000..2db7bc9474 --- /dev/null +++ b/releasenotes/notes/endpoint-from-catalog-bad36cb0409a4e6a.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add new method, 'endpoint_for' which will return the + raw endpoint for a given service from the current catalog. diff --git a/releasenotes/notes/expose-client-side-rate-limit-ddb82df7cb92091c.yaml b/releasenotes/notes/expose-client-side-rate-limit-ddb82df7cb92091c.yaml new file mode 100644 index 0000000000..3d7b503f3b --- /dev/null +++ b/releasenotes/notes/expose-client-side-rate-limit-ddb82df7cb92091c.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Client-side rate limiting is now directly exposed via ``rate_limit`` + and ``concurrency`` parameters. A single value can be given that applies + to all services, or a dict of service-type and value if different + client-side rate or concurrency limits should be used for different + services. diff --git a/releasenotes/notes/false-not-attribute-error-49484d0fdc61f75d.yaml b/releasenotes/notes/false-not-attribute-error-49484d0fdc61f75d.yaml new file mode 100644 index 0000000000..e474e02661 --- /dev/null +++ b/releasenotes/notes/false-not-attribute-error-49484d0fdc61f75d.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - delete_image used to fail with an AttributeError if an invalid image + name or id was passed, rather than returning False which was the + intent. This is worthy of note because it's a behavior change, but the + previous behavior was a bug. diff --git a/releasenotes/notes/feature-server-metadata-50caf18cec532160.yaml b/releasenotes/notes/feature-server-metadata-50caf18cec532160.yaml new file mode 100644 index 0000000000..e0a3f6c836 --- /dev/null +++ b/releasenotes/notes/feature-server-metadata-50caf18cec532160.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add new APIs, OpenStackCloud.set_server_metadata() and OpenStackCloud.delete_server_metadata() to manage metadata of existing nova compute instances diff --git a/releasenotes/notes/find_server-use-details-9a22e83ec6540c98.yaml b/releasenotes/notes/find_server-use-details-9a22e83ec6540c98.yaml new file mode 100644 index 0000000000..79cd7c2fa9 --- /dev/null +++ b/releasenotes/notes/find_server-use-details-9a22e83ec6540c98.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Make sure find_server returns server details when looking up by name. diff --git a/releasenotes/notes/fip_timeout-035c4bb3ff92fa1f.yaml b/releasenotes/notes/fip_timeout-035c4bb3ff92fa1f.yaml new file mode 100644 index 0000000000..2f98ebbda1 --- /dev/null +++ b/releasenotes/notes/fip_timeout-035c4bb3ff92fa1f.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - When creating a new server, the timeout was not being passed through to + floating IP creation, which could also timeout. diff --git a/releasenotes/notes/firewall-resources-c7589d288dd57e35.yaml b/releasenotes/notes/firewall-resources-c7589d288dd57e35.yaml new file mode 100644 index 0000000000..92e4a09bdf --- /dev/null +++ b/releasenotes/notes/firewall-resources-c7589d288dd57e35.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Implement fwaas v2 resources for managing firewall groups, rules + and policies. diff --git a/releasenotes/notes/fix-bug-9e1a976958d2543b.yaml b/releasenotes/notes/fix-bug-9e1a976958d2543b.yaml new file mode 100644 index 0000000000..913d33a108 --- /dev/null +++ b/releasenotes/notes/fix-bug-9e1a976958d2543b.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed the issue that unshelving a server to a specific availability zone + was failed due to unhandled ``availability_zone`` option. diff --git a/releasenotes/notes/fix-compat-with-old-keystoneauth-66e11ee9d008b962.yaml b/releasenotes/notes/fix-compat-with-old-keystoneauth-66e11ee9d008b962.yaml new file mode 100644 index 0000000000..80d09fb837 --- /dev/null +++ b/releasenotes/notes/fix-compat-with-old-keystoneauth-66e11ee9d008b962.yaml @@ -0,0 +1,7 @@ +--- +issues: + - Fixed a regression when using latest os-client-config with + the keystoneauth from stable/newton. Although this isn't a + super common combination, the added feature that broke the + interaction is really not worthy of the incompatibility, so + a workaround was added. diff --git a/releasenotes/notes/fix-config-drive-a148b7589f7e1022.yaml b/releasenotes/notes/fix-config-drive-a148b7589f7e1022.yaml new file mode 100644 index 0000000000..cd08b87cfc --- /dev/null +++ b/releasenotes/notes/fix-config-drive-a148b7589f7e1022.yaml @@ -0,0 +1,6 @@ +--- +issues: + - Fixed an issue where nodepool could cause config_drive + to be passed explicitly as None, which was getting directly + passed through to the JSON. Also fix the same logic for key_name + and scheduler_hints while we're in there. diff --git a/releasenotes/notes/fix-delete-ips-1d4eebf7bc4d4733.yaml b/releasenotes/notes/fix-delete-ips-1d4eebf7bc4d4733.yaml new file mode 100644 index 0000000000..7d8199dee6 --- /dev/null +++ b/releasenotes/notes/fix-delete-ips-1d4eebf7bc4d4733.yaml @@ -0,0 +1,6 @@ +--- +issues: + - Fixed the logic in delete_ips and added regression + tests to cover it. The old logic was incorrectly looking + for floating ips using port syntax. It was also not + swallowing errors when it should. diff --git a/releasenotes/notes/fix-dns-return-c810d5e6736322f1.yaml b/releasenotes/notes/fix-dns-return-c810d5e6736322f1.yaml new file mode 100644 index 0000000000..78b95dedd0 --- /dev/null +++ b/releasenotes/notes/fix-dns-return-c810d5e6736322f1.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Fixed issue where the dns methods were returning False instead of None + when resources were not found. + - | + Fixed jsonification under python3. diff --git a/releasenotes/notes/fix-dns-secondary-zones-creation-78ed84fa7d514998.yaml b/releasenotes/notes/fix-dns-secondary-zones-creation-78ed84fa7d514998.yaml new file mode 100644 index 0000000000..90b9cd8571 --- /dev/null +++ b/releasenotes/notes/fix-dns-secondary-zones-creation-78ed84fa7d514998.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed DNS secondary zone creation. Now secondary zones do not + require email not ttl, as both get overriden by the following + zone transfer. diff --git a/releasenotes/notes/fix-endpoint-override-ac41baeec9549ab3.yaml b/releasenotes/notes/fix-endpoint-override-ac41baeec9549ab3.yaml new file mode 100644 index 0000000000..0496f24cbb --- /dev/null +++ b/releasenotes/notes/fix-endpoint-override-ac41baeec9549ab3.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed issue where ``endpoint_override`` settings were not getting passed + to the Adapter constructor in ``get_session_client``. diff --git a/releasenotes/notes/fix-floating-ip-private-matching-84e369eee380a185.yaml b/releasenotes/notes/fix-floating-ip-private-matching-84e369eee380a185.yaml new file mode 100644 index 0000000000..6ff00ef273 --- /dev/null +++ b/releasenotes/notes/fix-floating-ip-private-matching-84e369eee380a185.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed an issue where an optimization in the logic to find floating ips + first when looking for public ip addresses broke finding the correct + private address. diff --git a/releasenotes/notes/fix-for-microversion-70cd686b6d6e3fd0.yaml b/releasenotes/notes/fix-for-microversion-70cd686b6d6e3fd0.yaml new file mode 100644 index 0000000000..be402ef982 --- /dev/null +++ b/releasenotes/notes/fix-for-microversion-70cd686b6d6e3fd0.yaml @@ -0,0 +1,14 @@ +--- +fixes: + - | + In April 2019 the microversion support for the Server resource was increased + to ``2.72``. Unfortunately, due to an issue with version discovery documents, + this increase never actually became effective. A fix is coming in ``3.17.2`` of + ``keystoneauth`` which will unbreak version discovery and cause the microversion + support to start working. +upgrade: + - | + Due to the fix in microversion support in `keystoneauth`, Servers will be + fetched using microversion ``2.72``. Code that assumes the existence of a + ``flavor.id`` field in the Server record should be removed, as it does not exist + in new microversions and cannot be filled in behind the scenes. diff --git a/releasenotes/notes/fix-image-hw_qemu_guest_agent-bf1147e52c84b5e8.yaml b/releasenotes/notes/fix-image-hw_qemu_guest_agent-bf1147e52c84b5e8.yaml new file mode 100644 index 0000000000..3d67a068a9 --- /dev/null +++ b/releasenotes/notes/fix-image-hw_qemu_guest_agent-bf1147e52c84b5e8.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + hw_qemu_guest_agent attribute of the image is a string boolean with values + `yes` and `no`. diff --git a/releasenotes/notes/fix-image-task-ae79502dd5c7ecba.yaml b/releasenotes/notes/fix-image-task-ae79502dd5c7ecba.yaml new file mode 100644 index 0000000000..8a6513b3ce --- /dev/null +++ b/releasenotes/notes/fix-image-task-ae79502dd5c7ecba.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed a regression in image upload when the cloud uses the task + upload method. A refactor led to attempting to update the disk_format + and container_format values after the image had been imported. diff --git a/releasenotes/notes/fix-keypair-user-id-bug-2095312-a01dc5b9b26dbe93.yaml b/releasenotes/notes/fix-keypair-user-id-bug-2095312-a01dc5b9b26dbe93.yaml new file mode 100644 index 0000000000..e309674576 --- /dev/null +++ b/releasenotes/notes/fix-keypair-user-id-bug-2095312-a01dc5b9b26dbe93.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fix the ``delete_keypair``, ``get_keypair`` and ``find_keypair`` methods + not including the optional ``user_id`` parameter in API queries. + [`bug 2095312 `_] diff --git a/releasenotes/notes/fix-list-networks-a592725df64c306e.yaml b/releasenotes/notes/fix-list-networks-a592725df64c306e.yaml new file mode 100644 index 0000000000..eecc255e6c --- /dev/null +++ b/releasenotes/notes/fix-list-networks-a592725df64c306e.yaml @@ -0,0 +1,3 @@ +--- +fixes: + - Fix for list_networks() ignoring any filters. diff --git a/releasenotes/notes/fix-microversion-354dc70deb2b2f0b.yaml b/releasenotes/notes/fix-microversion-354dc70deb2b2f0b.yaml new file mode 100644 index 0000000000..3cb5745b36 --- /dev/null +++ b/releasenotes/notes/fix-microversion-354dc70deb2b2f0b.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Modify microversion handling. Microversion chosen by the client/user is + respected in the microversion negotiation. For features, requiring + particular microversion, it would be ensured it is supported by the server + side and required microversion is <= chosen microversion, otherwise call + will be rejected. diff --git a/releasenotes/notes/fix-missing-futures-a0617a1c1ce6e659.yaml b/releasenotes/notes/fix-missing-futures-a0617a1c1ce6e659.yaml new file mode 100644 index 0000000000..94a2ab8577 --- /dev/null +++ b/releasenotes/notes/fix-missing-futures-a0617a1c1ce6e659.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - Added missing dependency on futures library for python 2. + The depend was missed in testing due to it having been listed + in test-requirements already. diff --git a/releasenotes/notes/fix-neutron-endpoint-mangling-a9dd89dd09bc71ec.yaml b/releasenotes/notes/fix-neutron-endpoint-mangling-a9dd89dd09bc71ec.yaml new file mode 100644 index 0000000000..0f4a2d1c7f --- /dev/null +++ b/releasenotes/notes/fix-neutron-endpoint-mangling-a9dd89dd09bc71ec.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed incorrect neutron endpoint mangling for the cases when the catalog + contains a versioned neutron endpoint. diff --git a/releasenotes/notes/fix-os_auth_type-v3multifactor-049cf52573d9e00e.yaml b/releasenotes/notes/fix-os_auth_type-v3multifactor-049cf52573d9e00e.yaml new file mode 100644 index 0000000000..73a288cd6d --- /dev/null +++ b/releasenotes/notes/fix-os_auth_type-v3multifactor-049cf52573d9e00e.yaml @@ -0,0 +1,12 @@ +--- +fixes: + - | + It is now possible to configure ``v3multifactor`` auth type using + environment variables. For example: + + export OS_AUTH_TYPE=v3multifactor + export OS_AUTH_METHODS=v3password,v3totp + export OS_USERNAME=admin + export OS_PASSWORD=password + export OS_PASSCODE=12345 + openstack server list diff --git a/releasenotes/notes/fix-properties-key-conflict-2161ca1faaad6731.yaml b/releasenotes/notes/fix-properties-key-conflict-2161ca1faaad6731.yaml new file mode 100644 index 0000000000..d681f93caa --- /dev/null +++ b/releasenotes/notes/fix-properties-key-conflict-2161ca1faaad6731.yaml @@ -0,0 +1,4 @@ +--- +issues: + - Images in the cloud with a string property named "properties" + caused image normalization to bomb. diff --git a/releasenotes/notes/fix-quota-show-defaults-0a8c388926eae18b.yaml b/releasenotes/notes/fix-quota-show-defaults-0a8c388926eae18b.yaml new file mode 100644 index 0000000000..428abef0ee --- /dev/null +++ b/releasenotes/notes/fix-quota-show-defaults-0a8c388926eae18b.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed issue with ``quota show --default`` command by + correcting the API URL. diff --git a/releasenotes/notes/fix-restore-resp-4e0bf3a246f3dc59.yaml b/releasenotes/notes/fix-restore-resp-4e0bf3a246f3dc59.yaml new file mode 100644 index 0000000000..884b93c4ce --- /dev/null +++ b/releasenotes/notes/fix-restore-resp-4e0bf3a246f3dc59.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Previously the volume backup restore response only + returned ``id`` and now it also returns ``volume_id`` + and ``volume_name`` fields. diff --git a/releasenotes/notes/fix-server-unshelve-to-host-cb02eee8a20ba478.yaml b/releasenotes/notes/fix-server-unshelve-to-host-cb02eee8a20ba478.yaml new file mode 100644 index 0000000000..449d34ab88 --- /dev/null +++ b/releasenotes/notes/fix-server-unshelve-to-host-cb02eee8a20ba478.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed the issue that unshelving a server to a specific host was failed + due to unhandled host option. diff --git a/releasenotes/notes/fix-supplemental-fips-c9cd58aac12eb30e.yaml b/releasenotes/notes/fix-supplemental-fips-c9cd58aac12eb30e.yaml new file mode 100644 index 0000000000..66a5f33c90 --- /dev/null +++ b/releasenotes/notes/fix-supplemental-fips-c9cd58aac12eb30e.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - Fixed an issue where shade could report a floating IP being attached + to a server erroneously due to only matching on fixed ip. Changed the + lookup to match on port ids. This adds an API call in the case where + the workaround is needed because of a bug in the cloud, but in most + cases it should have no difference. diff --git a/releasenotes/notes/fix-task-timing-048afea680adc62e.yaml b/releasenotes/notes/fix-task-timing-048afea680adc62e.yaml new file mode 100644 index 0000000000..ef9e219e97 --- /dev/null +++ b/releasenotes/notes/fix-task-timing-048afea680adc62e.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fix a regression where the ``TaskManager.post_run_task`` ``elapsed_time`` + argument was not reflecting the time taken by the actual task. diff --git a/releasenotes/notes/fix-update-domain-af47b066ac52eb7f.yaml b/releasenotes/notes/fix-update-domain-af47b066ac52eb7f.yaml new file mode 100644 index 0000000000..060461d095 --- /dev/null +++ b/releasenotes/notes/fix-update-domain-af47b066ac52eb7f.yaml @@ -0,0 +1,3 @@ +--- +fixes: + - Fix for update_domain() where 'name' was not updatable. diff --git a/releasenotes/notes/fix-yaml-load-3e6bd852afe549b4.yaml b/releasenotes/notes/fix-yaml-load-3e6bd852afe549b4.yaml new file mode 100644 index 0000000000..ac34188cca --- /dev/null +++ b/releasenotes/notes/fix-yaml-load-3e6bd852afe549b4.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed an issue where importing openstacksdk changed the behavior of + ``yaml.load`` globally. diff --git a/releasenotes/notes/fixed-magnum-type-7406f0a60525f858.yaml b/releasenotes/notes/fixed-magnum-type-7406f0a60525f858.yaml new file mode 100644 index 0000000000..bc0f768bd0 --- /dev/null +++ b/releasenotes/notes/fixed-magnum-type-7406f0a60525f858.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - Fixed magnum service_type. shade was using it as 'container' + but the correct type is 'container-infra'. It's possible that on + old clouds with magnum shade may now do the wrong thing. If that + occurs, please file a bug. diff --git a/releasenotes/notes/flavor-cloud-layer-0b4d130ac1c5e7c4.yaml b/releasenotes/notes/flavor-cloud-layer-0b4d130ac1c5e7c4.yaml new file mode 100644 index 0000000000..5c35b42aa3 --- /dev/null +++ b/releasenotes/notes/flavor-cloud-layer-0b4d130ac1c5e7c4.yaml @@ -0,0 +1,4 @@ +--- +other: + - Flavor operations of the cloud layer are switched to the rely on + the proxy layer diff --git a/releasenotes/notes/flavor_fix-a53c6b326dc34a2c.yaml b/releasenotes/notes/flavor_fix-a53c6b326dc34a2c.yaml new file mode 100644 index 0000000000..9a7ba7de16 --- /dev/null +++ b/releasenotes/notes/flavor_fix-a53c6b326dc34a2c.yaml @@ -0,0 +1,7 @@ +--- +features: + - Flavors will always contain an 'extra_specs' attribute. Client cruft, + such as 'links', 'HUMAN_ID', etc. has been removed. +fixes: + - Setting and unsetting flavor extra specs now works. This had + been broken since the 1.2.0 release. diff --git a/releasenotes/notes/floating_ip_normalization-41e0edcdb0c98aee.yaml b/releasenotes/notes/floating_ip_normalization-41e0edcdb0c98aee.yaml new file mode 100644 index 0000000000..82de33d141 --- /dev/null +++ b/releasenotes/notes/floating_ip_normalization-41e0edcdb0c98aee.yaml @@ -0,0 +1,10 @@ +--- +upgrade: + - | + No Munch conversion and normalization of the floating ips is happening + anymore. For Neutron network a pure FloatingIP object is being returned, + for Nova still munch object. +deprecations: + - | + search_floating_ips method is deprecated and should not be used anymore. It + is going to be dropped approximately after one major cycle. diff --git a/releasenotes/notes/fnmatch-name-or-id-f658fe26f84086c8.yaml b/releasenotes/notes/fnmatch-name-or-id-f658fe26f84086c8.yaml new file mode 100644 index 0000000000..dcdccd2497 --- /dev/null +++ b/releasenotes/notes/fnmatch-name-or-id-f658fe26f84086c8.yaml @@ -0,0 +1,5 @@ +--- +features: + - name_or_id parameters to search/get methods now support + filename-like globbing. This means search_servers('nb0*') + will return all servers whose names start with 'nb0'. diff --git a/releasenotes/notes/force_ipv4_no_ipv6_address-9842168b5d05d262.yaml b/releasenotes/notes/force_ipv4_no_ipv6_address-9842168b5d05d262.yaml new file mode 100644 index 0000000000..8cca5a205b --- /dev/null +++ b/releasenotes/notes/force_ipv4_no_ipv6_address-9842168b5d05d262.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + Cloud with the `force_ipv4` flag will no longer return a + `public_v6` value, even if one is provided by the cloud. This is + to avoid having entries for unconfigured interfaces. diff --git a/releasenotes/notes/futurist-b54b0f449d410997.yaml b/releasenotes/notes/futurist-b54b0f449d410997.yaml new file mode 100644 index 0000000000..1d1ca2095c --- /dev/null +++ b/releasenotes/notes/futurist-b54b0f449d410997.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Switched to the ``futurist`` library for managing background + concurrent tasks. Introduced a new ``pool_executor`` parameter + to `Connection` that allows passing any any futurist Executor + for cases where the default ``ThreadPoolExecutor`` would not + be appropriate. diff --git a/releasenotes/notes/generate-form-signature-294ca46812f291d6.yaml b/releasenotes/notes/generate-form-signature-294ca46812f291d6.yaml new file mode 100644 index 0000000000..50289502d8 --- /dev/null +++ b/releasenotes/notes/generate-form-signature-294ca46812f291d6.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added methods to manage object store temp-url keys and + generate signatures needed for FormPost middleware. diff --git a/releasenotes/notes/get-image-tasks-c66a05c2c67976db.yaml b/releasenotes/notes/get-image-tasks-c66a05c2c67976db.yaml new file mode 100644 index 0000000000..a6841099dd --- /dev/null +++ b/releasenotes/notes/get-image-tasks-c66a05c2c67976db.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Adds operation which retrieves tasks associated + to a particular image. diff --git a/releasenotes/notes/get-limits-c383c512f8e01873.yaml b/releasenotes/notes/get-limits-c383c512f8e01873.yaml new file mode 100644 index 0000000000..58ed1e1002 --- /dev/null +++ b/releasenotes/notes/get-limits-c383c512f8e01873.yaml @@ -0,0 +1,3 @@ +--- +features: + - Allow to retrieve the limits of a specific project diff --git a/releasenotes/notes/get-object-raw-e58284e59c81c8ef.yaml b/releasenotes/notes/get-object-raw-e58284e59c81c8ef.yaml new file mode 100644 index 0000000000..d854d8ea4b --- /dev/null +++ b/releasenotes/notes/get-object-raw-e58284e59c81c8ef.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added ``get_object_raw`` method for downloading an object from swift + and returning a raw requests Response object. diff --git a/releasenotes/notes/get-server-by-id-none-3e8538800fa09d82.yaml b/releasenotes/notes/get-server-by-id-none-3e8538800fa09d82.yaml new file mode 100644 index 0000000000..b3ef3fec31 --- /dev/null +++ b/releasenotes/notes/get-server-by-id-none-3e8538800fa09d82.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + The ``get_server_by_id`` method is supposed to return ``None`` if the + server in question can't be found, but a regression was introduced + causing it to raise ``ResourceNotFound`` instead. This has been corrected + and ``get_server_by_id`` returns ``None`` correctly again. diff --git a/releasenotes/notes/get-usage-72d249ff790d1b8f.yaml b/releasenotes/notes/get-usage-72d249ff790d1b8f.yaml new file mode 100644 index 0000000000..4b447f4d4c --- /dev/null +++ b/releasenotes/notes/get-usage-72d249ff790d1b8f.yaml @@ -0,0 +1,3 @@ +--- +features: + - Allow to retrieve the usage of a specific project diff --git a/releasenotes/notes/get_compute_usage-01811dccd60dc92a.yaml b/releasenotes/notes/get_compute_usage-01811dccd60dc92a.yaml new file mode 100644 index 0000000000..7eaf86982c --- /dev/null +++ b/releasenotes/notes/get_compute_usage-01811dccd60dc92a.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + cloud.get_compute_usage method return instance of compute.usage.Usage class + instead of munch. diff --git a/releasenotes/notes/get_object_api-968483adb016bce1.yaml b/releasenotes/notes/get_object_api-968483adb016bce1.yaml new file mode 100644 index 0000000000..bc830d57cf --- /dev/null +++ b/releasenotes/notes/get_object_api-968483adb016bce1.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added a new API call, OpenStackCloud.get_object(), to download objects from swift. diff --git a/releasenotes/notes/glance-image-pagination-0b4dfef22b25852b.yaml b/releasenotes/notes/glance-image-pagination-0b4dfef22b25852b.yaml new file mode 100644 index 0000000000..3b134fcb5c --- /dev/null +++ b/releasenotes/notes/glance-image-pagination-0b4dfef22b25852b.yaml @@ -0,0 +1,4 @@ +--- +issues: + - Fixed an issue where glance image list pagination was being ignored, + leading to truncated image lists. diff --git a/releasenotes/notes/glance-image-stores-2baa66e6743a2f2d.yaml b/releasenotes/notes/glance-image-stores-2baa66e6743a2f2d.yaml new file mode 100644 index 0000000000..8721e1f035 --- /dev/null +++ b/releasenotes/notes/glance-image-stores-2baa66e6743a2f2d.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for specifying stores when doing glance image uploads. diff --git a/releasenotes/notes/global-request-id-d7c0736f43929165.yaml b/releasenotes/notes/global-request-id-d7c0736f43929165.yaml new file mode 100644 index 0000000000..b2677a0c81 --- /dev/null +++ b/releasenotes/notes/global-request-id-d7c0736f43929165.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + Added support for setting ``global_request_id`` on a ``Connection``. + If done, this will cause all requests sent to send the request id + header to the OpenStack services. Since ``Connection`` can otherwise + be used multi-threaded, add a method ``global_request`` that returns + a new ``Connection`` based on the old ``Connection`` but on which + the new ``global_request_id`` has been set. Since a ``Connection`` + can be used as a context manager, this also means the ``global_request`` + method can be used in ``with`` statements. diff --git a/releasenotes/notes/grant-revoke-assignments-231d3f9596a1ae75.yaml b/releasenotes/notes/grant-revoke-assignments-231d3f9596a1ae75.yaml new file mode 100644 index 0000000000..9776030ca6 --- /dev/null +++ b/releasenotes/notes/grant-revoke-assignments-231d3f9596a1ae75.yaml @@ -0,0 +1,3 @@ +--- +features: + - add granting and revoking of roles from groups and users diff --git a/releasenotes/notes/identity-auth-url-f3ae8ef22d2bcab6.yaml b/releasenotes/notes/identity-auth-url-f3ae8ef22d2bcab6.yaml new file mode 100644 index 0000000000..3009b09477 --- /dev/null +++ b/releasenotes/notes/identity-auth-url-f3ae8ef22d2bcab6.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + The ``auth_url`` will be used for the default value of + ``identity_endpoint_override`` in the absence of project or system-scope + information. This should simplify some actions such as listing available + projects. diff --git a/releasenotes/notes/identity-cloud-mixin-inherited-role-assignments-8fe9ac9509d99f4d.yaml b/releasenotes/notes/identity-cloud-mixin-inherited-role-assignments-8fe9ac9509d99f4d.yaml new file mode 100644 index 0000000000..ab1216793f --- /dev/null +++ b/releasenotes/notes/identity-cloud-mixin-inherited-role-assignments-8fe9ac9509d99f4d.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + Add support for ``inherited_to`` filter for listing identity role + assignments in the cloud layer. This allows filtering by whether role + grants are inheritable to sub-projects. +deprecations: + - | + Deprecate ``os-inherit-extension-inherited-to`` in favor of + ``inherited_to`` filter for listing identity role_assignments in the cloud + layer. diff --git a/releasenotes/notes/identity-cloud-mixin-inherited-roles-ed66bb78ddeca2c9.yaml b/releasenotes/notes/identity-cloud-mixin-inherited-roles-ed66bb78ddeca2c9.yaml new file mode 100644 index 0000000000..0af650b87f --- /dev/null +++ b/releasenotes/notes/identity-cloud-mixin-inherited-roles-ed66bb78ddeca2c9.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add support for granting inherited roles. + Roles assignments can be added to a user or group + on the system, a domain, or a project. diff --git a/releasenotes/notes/image-flavor-by-name-54865b00ebbf1004.yaml b/releasenotes/notes/image-flavor-by-name-54865b00ebbf1004.yaml new file mode 100644 index 0000000000..6548121046 --- /dev/null +++ b/releasenotes/notes/image-flavor-by-name-54865b00ebbf1004.yaml @@ -0,0 +1,9 @@ +--- +features: + - The image and flavor parameters for create_server + now accept name in addition to id and dict. If given + as a name or id, shade will do a get_image or a + get_flavor to find the matching image or flavor. + If you have an id already and are not using any caching + and the extra lookup is annoying, passing the id in + as "dict(id='my-id')" will avoid the lookup. diff --git a/releasenotes/notes/image-from-volume-9acf7379f5995b5b.yaml b/releasenotes/notes/image-from-volume-9acf7379f5995b5b.yaml new file mode 100644 index 0000000000..6461f5edfe --- /dev/null +++ b/releasenotes/notes/image-from-volume-9acf7379f5995b5b.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added ability to create an image from a volume. diff --git a/releasenotes/notes/image-id-filter-key-b9b6b52139a27cbe.yaml b/releasenotes/notes/image-id-filter-key-b9b6b52139a27cbe.yaml new file mode 100644 index 0000000000..6b501bf4ba --- /dev/null +++ b/releasenotes/notes/image-id-filter-key-b9b6b52139a27cbe.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + It is now possible to filter ``openstack.image.v2.Image`` resources by ID + using the ``id`` filter. While this is of little value when used with + single IDs, it can be useful when combined with operators like ``in:`` + to e.g. filter by multiple image IDs. diff --git a/releasenotes/notes/image-import-proxy-params-f19d8b6166104ebe.yaml b/releasenotes/notes/image-import-proxy-params-f19d8b6166104ebe.yaml new file mode 100644 index 0000000000..56b59f4f1d --- /dev/null +++ b/releasenotes/notes/image-import-proxy-params-f19d8b6166104ebe.yaml @@ -0,0 +1,12 @@ +--- +features: + - | + The ``openstack.image.Image.import_image`` method and ``import_image`` + image proxy method now accept the following additional paramters: + + - ``remote_region`` + - ``remote_image_id`` + - ``remote_service_interface`` + + These are required to support the ``glance-download`` image import + method. diff --git a/releasenotes/notes/image-import-support-97052cdbc8ce449b.yaml b/releasenotes/notes/image-import-support-97052cdbc8ce449b.yaml new file mode 100644 index 0000000000..707d92eb2b --- /dev/null +++ b/releasenotes/notes/image-import-support-97052cdbc8ce449b.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Added support for using the image import feature when creating an + image. SDK will now fall back to using image import if there is an + error during PUT. diff --git a/releasenotes/notes/image-proxy-layer-kwarg-only-arguments-94c9b2033d386160.yaml b/releasenotes/notes/image-proxy-layer-kwarg-only-arguments-94c9b2033d386160.yaml new file mode 100644 index 0000000000..882bd86535 --- /dev/null +++ b/releasenotes/notes/image-proxy-layer-kwarg-only-arguments-94c9b2033d386160.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + The signatures of the ``openstack.image.v2.import_image`` has changed. All + arguments except ``image`` and ``method`` are now kwarg-only. diff --git a/releasenotes/notes/image-update-76bd3bf24c1c1380.yaml b/releasenotes/notes/image-update-76bd3bf24c1c1380.yaml new file mode 100644 index 0000000000..47599e0853 --- /dev/null +++ b/releasenotes/notes/image-update-76bd3bf24c1c1380.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + When using the Image API, it is no longer possible to set arbitrary + properties, not known to the SDK, via ``image.update_image`` API. diff --git a/releasenotes/notes/improve-metrics-5d7ce70ce4021d72.yaml b/releasenotes/notes/improve-metrics-5d7ce70ce4021d72.yaml new file mode 100644 index 0000000000..86d275947e --- /dev/null +++ b/releasenotes/notes/improve-metrics-5d7ce70ce4021d72.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + API metrics emitted by OpenStackSDK to StatsD now contain status_code + part of the metric name in order to improve information precision. diff --git a/releasenotes/notes/infer-secgroup-source-58d840aaf1a1f485.yaml b/releasenotes/notes/infer-secgroup-source-58d840aaf1a1f485.yaml new file mode 100644 index 0000000000..f3f35f4803 --- /dev/null +++ b/releasenotes/notes/infer-secgroup-source-58d840aaf1a1f485.yaml @@ -0,0 +1,9 @@ +--- +features: + - If a cloud does not have a neutron service, it is now + assumed that Nova will be the source of security groups. + To handle clouds that have nova-network and do not have + the security group extension, setting secgroup_source to + None will prevent attempting to use them at all. If the + cloud has neutron but it is not a functional source of + security groups, set secgroup_source to nova. diff --git a/releasenotes/notes/inspection-rules-86b1c59def73f757.yaml b/releasenotes/notes/inspection-rules-86b1c59def73f757.yaml new file mode 100644 index 0000000000..1712e5863d --- /dev/null +++ b/releasenotes/notes/inspection-rules-86b1c59def73f757.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Adds support for inspection rules; an API feature to create a resource + containing conditions that evaluate against inspection data and actions + that run on a node when conditions are met during inspection. diff --git a/releasenotes/notes/introduce-source-and-destination-ip-prefixes-into-metering-label-rules-e04b797adac5d0d0.yaml b/releasenotes/notes/introduce-source-and-destination-ip-prefixes-into-metering-label-rules-e04b797adac5d0d0.yaml new file mode 100644 index 0000000000..5de538b147 --- /dev/null +++ b/releasenotes/notes/introduce-source-and-destination-ip-prefixes-into-metering-label-rules-e04b797adac5d0d0.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``source_ip_prefix`` and ``destination_ip_prefix`` to Neutron metering + label rules. \ No newline at end of file diff --git a/releasenotes/notes/introspection-node-6a3b7d55839ef82c.yaml b/releasenotes/notes/introspection-node-6a3b7d55839ef82c.yaml new file mode 100644 index 0000000000..4638e8959a --- /dev/null +++ b/releasenotes/notes/introspection-node-6a3b7d55839ef82c.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixes using a full `Node` object as an argument to `start_introspection`. diff --git a/releasenotes/notes/ironic-conductors-support-3bf27e8b2f0299ba.yaml b/releasenotes/notes/ironic-conductors-support-3bf27e8b2f0299ba.yaml new file mode 100644 index 0000000000..e2aea13a21 --- /dev/null +++ b/releasenotes/notes/ironic-conductors-support-3bf27e8b2f0299ba.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Support for Ironic Conductor API. diff --git a/releasenotes/notes/ironic-deploy-steps-2c0f39d7d2a13289.yaml b/releasenotes/notes/ironic-deploy-steps-2c0f39d7d2a13289.yaml new file mode 100644 index 0000000000..b6adb8cd11 --- /dev/null +++ b/releasenotes/notes/ironic-deploy-steps-2c0f39d7d2a13289.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds ``deploy_steps`` to baremetal node provisioning. diff --git a/releasenotes/notes/ironic-deploy-template-support-fa56005365ed6e4d.yaml b/releasenotes/notes/ironic-deploy-template-support-fa56005365ed6e4d.yaml new file mode 100644 index 0000000000..d8b13ea7aa --- /dev/null +++ b/releasenotes/notes/ironic-deploy-template-support-fa56005365ed6e4d.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Support Deploy Templates for Ironic API \ No newline at end of file diff --git a/releasenotes/notes/ironic-introspection_rules_support-18b0488a76800122.yaml b/releasenotes/notes/ironic-introspection_rules_support-18b0488a76800122.yaml new file mode 100644 index 0000000000..7aede06786 --- /dev/null +++ b/releasenotes/notes/ironic-introspection_rules_support-18b0488a76800122.yaml @@ -0,0 +1,3 @@ +features: + - | + Add support for Ironic Inspector Introspection Rules API. diff --git a/releasenotes/notes/ironic-microversion-ba5b0f36f11196a6.yaml b/releasenotes/notes/ironic-microversion-ba5b0f36f11196a6.yaml new file mode 100644 index 0000000000..62e36277d6 --- /dev/null +++ b/releasenotes/notes/ironic-microversion-ba5b0f36f11196a6.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for passing Ironic microversion to the ironicclient + constructor in get_legacy_client. diff --git a/releasenotes/notes/ironic-node-shard-35f2557c3dbfff1d.yaml b/releasenotes/notes/ironic-node-shard-35f2557c3dbfff1d.yaml new file mode 100644 index 0000000000..d4e334f7af --- /dev/null +++ b/releasenotes/notes/ironic-node-shard-35f2557c3dbfff1d.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds support for Node shards to baremetal service. diff --git a/releasenotes/notes/ironic-volume_target-support-8130361804366787.yaml b/releasenotes/notes/ironic-volume_target-support-8130361804366787.yaml new file mode 100644 index 0000000000..eed88c50b6 --- /dev/null +++ b/releasenotes/notes/ironic-volume_target-support-8130361804366787.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Support for Ironic Volume Target API. diff --git a/releasenotes/notes/ksa-discovery-86a4ef00d85ea87f.yaml b/releasenotes/notes/ksa-discovery-86a4ef00d85ea87f.yaml new file mode 100644 index 0000000000..fa27dce9fe --- /dev/null +++ b/releasenotes/notes/ksa-discovery-86a4ef00d85ea87f.yaml @@ -0,0 +1,5 @@ +--- +other: + - | + All endpoint discovery logic is now handled by keystoneauth. There should + be no behavior differences. diff --git a/releasenotes/notes/less-file-hashing-d2497337da5acbef.yaml b/releasenotes/notes/less-file-hashing-d2497337da5acbef.yaml new file mode 100644 index 0000000000..4d0fd1a1fc --- /dev/null +++ b/releasenotes/notes/less-file-hashing-d2497337da5acbef.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - shade will now only generate file hashes for glance + images if both hashes are empty. If only one is given, + the other will be treated as an empty string. diff --git a/releasenotes/notes/list-all_projects-filter-27f1d471a7848507.yaml b/releasenotes/notes/list-all_projects-filter-27f1d471a7848507.yaml new file mode 100644 index 0000000000..cb90309e14 --- /dev/null +++ b/releasenotes/notes/list-all_projects-filter-27f1d471a7848507.yaml @@ -0,0 +1,33 @@ +--- +features: + - | + A number of APIs support passing an admin-only ``all_projects`` filter when + listing certain resources, allowing you to retrieve resources from all + projects rather than just the current projects. This filter is now + explicitly supported at the proxy layer for services and resources that + support it. These are: + + * Block storage (v2) + + * ``find_snapshot`` + * ``snapshots`` + * ``find_volume`` + * ``volumes`` + + * Block storage (v3) + + * ``find_snapshot`` + * ``snapshots`` + * ``find_volume`` + * ``volumes`` + + * Compute (v2) + + * ``find_server`` + * ``find_server_group`` + * ``server_groups`` + + * Workflow (v2) + + * ``find_cron_triggers`` + * ``cron_triggers`` diff --git a/releasenotes/notes/list-az-names-a38c277d1192471b.yaml b/releasenotes/notes/list-az-names-a38c277d1192471b.yaml new file mode 100644 index 0000000000..7b492716dd --- /dev/null +++ b/releasenotes/notes/list-az-names-a38c277d1192471b.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added list_availability_zone_names API call. diff --git a/releasenotes/notes/list-network-resources-empty-list-6aa760c01e7d97d7.yaml b/releasenotes/notes/list-network-resources-empty-list-6aa760c01e7d97d7.yaml new file mode 100644 index 0000000000..2c7be2c72a --- /dev/null +++ b/releasenotes/notes/list-network-resources-empty-list-6aa760c01e7d97d7.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Basic networking list calls in the cloud layer been fixed to return + an empty list if neutron is not running. diff --git a/releasenotes/notes/list-role-assignments-keystone-v2-b127b12b4860f50c.yaml b/releasenotes/notes/list-role-assignments-keystone-v2-b127b12b4860f50c.yaml new file mode 100644 index 0000000000..df0d96b3da --- /dev/null +++ b/releasenotes/notes/list-role-assignments-keystone-v2-b127b12b4860f50c.yaml @@ -0,0 +1,3 @@ +--- +features: + - Implement list_role_assignments for keystone v2, using roles_for_user. diff --git a/releasenotes/notes/list-servers-all-projects-349e6dc665ba2e8d.yaml b/releasenotes/notes/list-servers-all-projects-349e6dc665ba2e8d.yaml new file mode 100644 index 0000000000..c993d2d811 --- /dev/null +++ b/releasenotes/notes/list-servers-all-projects-349e6dc665ba2e8d.yaml @@ -0,0 +1,6 @@ +--- +features: + - Add 'all_projects' parameter to list_servers and + search_servers which will tell Nova to return servers for all projects + rather than just for the current project. This is only available to + cloud admins. diff --git a/releasenotes/notes/load-yaml-3177efca78e5c67a.yaml b/releasenotes/notes/load-yaml-3177efca78e5c67a.yaml new file mode 100644 index 0000000000..2438f83a4a --- /dev/null +++ b/releasenotes/notes/load-yaml-3177efca78e5c67a.yaml @@ -0,0 +1,7 @@ +--- +features: + - Added a flag, 'load_yaml_config' that defaults to True. + If set to false, no clouds.yaml files will be loaded. This + is beneficial if os-client-config wants to be used inside of + a service where end-user clouds.yaml files would make things + more confusing. diff --git a/releasenotes/notes/location-server-resource-af77fdab5d35d421.yaml b/releasenotes/notes/location-server-resource-af77fdab5d35d421.yaml new file mode 100644 index 0000000000..d548881d88 --- /dev/null +++ b/releasenotes/notes/location-server-resource-af77fdab5d35d421.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Corrected the location property on the ``Server`` resource to + use the ``project_id`` from the remote resource rather than the + information from the token of the user. diff --git a/releasenotes/notes/log-request-ids-37507cb6eed9a7da.yaml b/releasenotes/notes/log-request-ids-37507cb6eed9a7da.yaml new file mode 100644 index 0000000000..6c81b77566 --- /dev/null +++ b/releasenotes/notes/log-request-ids-37507cb6eed9a7da.yaml @@ -0,0 +1,5 @@ +--- +other: + - The contents of x-openstack-request-id are no longer + added to object returned. Instead, they are logged to + a logger named 'openstack.cloud.request_ids'. diff --git a/releasenotes/notes/machine-get-update-microversions-4b910e63cebd65e2.yaml b/releasenotes/notes/machine-get-update-microversions-4b910e63cebd65e2.yaml new file mode 100644 index 0000000000..7c0f47749f --- /dev/null +++ b/releasenotes/notes/machine-get-update-microversions-4b910e63cebd65e2.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + The ``get_machine``, ``update_machine`` and ``patch_machine`` calls now + support all Bare Metal API microversions supported by the SDK. Previously + they used 1.6 unconditionally. +upgrade: + - | + The baremetal API now returns ``available`` as provision state for nodes + available for deployment. Previously, ``None`` could be returned for API + version 1.1 (early Kilo) and older. diff --git a/releasenotes/notes/magic-fixes-dca4ae4dac2441a8.yaml b/releasenotes/notes/magic-fixes-dca4ae4dac2441a8.yaml new file mode 100644 index 0000000000..570e4dccab --- /dev/null +++ b/releasenotes/notes/magic-fixes-dca4ae4dac2441a8.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - Refactor ``OpenStackConfig._fix_backward_madness()`` into + ``OpenStackConfig.magic_fixes()`` that allows subclasses + to inject more fixup magic into the flow during + ``get_one_cloud()`` processing. diff --git a/releasenotes/notes/make-cloud-region-standalone-848a2c4b5f3ebc29.yaml b/releasenotes/notes/make-cloud-region-standalone-848a2c4b5f3ebc29.yaml new file mode 100644 index 0000000000..d5745f4292 --- /dev/null +++ b/releasenotes/notes/make-cloud-region-standalone-848a2c4b5f3ebc29.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Updated the ``openstack.config.cloud_config.CloudRegion`` object to be + able to store and retreive cache settings and the password callback object + without needing an ``openstack.config.loader.OpenStackConfig`` object. diff --git a/releasenotes/notes/make-rest-client-dd3d365632a26fa0.yaml b/releasenotes/notes/make-rest-client-dd3d365632a26fa0.yaml new file mode 100644 index 0000000000..8e34e51980 --- /dev/null +++ b/releasenotes/notes/make-rest-client-dd3d365632a26fa0.yaml @@ -0,0 +1,4 @@ +--- +deprecations: + - Renamed session_client to make_rest_client. session_client + will continue to be supported for backwards compatability. diff --git a/releasenotes/notes/make-rest-client-version-discovery-84125700f159491a.yaml b/releasenotes/notes/make-rest-client-version-discovery-84125700f159491a.yaml new file mode 100644 index 0000000000..7326978f9f --- /dev/null +++ b/releasenotes/notes/make-rest-client-version-discovery-84125700f159491a.yaml @@ -0,0 +1,6 @@ +--- +features: + - Add version argument to make_rest_client and plumb + version discovery through get_session_client so that + versioned endpoints are properly found if unversioned + are in the catalog. diff --git a/releasenotes/notes/make_object_metadata_easier.yaml-e9751723e002e06f.yaml b/releasenotes/notes/make_object_metadata_easier.yaml-e9751723e002e06f.yaml new file mode 100644 index 0000000000..eaa7183074 --- /dev/null +++ b/releasenotes/notes/make_object_metadata_easier.yaml-e9751723e002e06f.yaml @@ -0,0 +1,5 @@ +--- +features: + - create_object() now has a "metadata" parameter that can be used to create + an object with metadata of each key and value pair in that dictionary + - Add an update_object() function that updates the metadata of a swift object diff --git a/releasenotes/notes/merge-shade-os-client-config-29878734ad643e33.yaml b/releasenotes/notes/merge-shade-os-client-config-29878734ad643e33.yaml new file mode 100644 index 0000000000..f7718aabb0 --- /dev/null +++ b/releasenotes/notes/merge-shade-os-client-config-29878734ad643e33.yaml @@ -0,0 +1,4 @@ +--- +other: + - The shade and os-client-config libraries have been + merged into python-openstacksdk. diff --git a/releasenotes/notes/meta-passthrough-d695bff4f9366b65.yaml b/releasenotes/notes/meta-passthrough-d695bff4f9366b65.yaml new file mode 100644 index 0000000000..13eb7ca2f5 --- /dev/null +++ b/releasenotes/notes/meta-passthrough-d695bff4f9366b65.yaml @@ -0,0 +1,7 @@ +--- +features: + - Added a parameter to create_image 'meta' which allows + for providing parameters to the API that will not have + any type conversions performed. For the simple case, + the existing kwargs approach to image metadata is still + the best bet. diff --git a/releasenotes/notes/metadata-key-name-bugfix-77612a825c5145d7.yaml b/releasenotes/notes/metadata-key-name-bugfix-77612a825c5145d7.yaml new file mode 100644 index 0000000000..892fb85f26 --- /dev/null +++ b/releasenotes/notes/metadata-key-name-bugfix-77612a825c5145d7.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - Fixed a bug related to metadata's key name. An exception was + raised when setting it to "delete"," clear" or "key" diff --git a/releasenotes/notes/min-max-legacy-version-301242466ddefa93.yaml b/releasenotes/notes/min-max-legacy-version-301242466ddefa93.yaml new file mode 100644 index 0000000000..30a380225d --- /dev/null +++ b/releasenotes/notes/min-max-legacy-version-301242466ddefa93.yaml @@ -0,0 +1,15 @@ +--- +features: + - Add min_version and max_version to get_legacy_client + and to get_session_endpoint. At the moment this is only + really fully plumbed through for cinder, which has extra + special fun around volume, volumev2 and volumev3. Min and max + versions to both methods will look through the options available + in the service catalog and try to return the latest one available + from the span of requested versions. This means a user can say + volume_api_version=None, min_version=2, max_version=3 will get + an endpoint from get_session_endpoint or a Client from cinderclient + that will be either v2 or v3 but not v1. In the future, min and max + version for get_session_endpoint should be able to sort out + appropriate endpoints via version discovery, but that does not + currently exist. diff --git a/releasenotes/notes/mtu-settings-8ce8b54d096580a2.yaml b/releasenotes/notes/mtu-settings-8ce8b54d096580a2.yaml new file mode 100644 index 0000000000..4de74d6624 --- /dev/null +++ b/releasenotes/notes/mtu-settings-8ce8b54d096580a2.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + create_network now exposes the mtu api option in accordance to network + v2 api. This allows the operator to adjust the given MTU value which + is needed in various complex network deployments. diff --git a/releasenotes/notes/multiple-updates-b48cc2f6db2e526d.yaml b/releasenotes/notes/multiple-updates-b48cc2f6db2e526d.yaml new file mode 100644 index 0000000000..5df3f6d51a --- /dev/null +++ b/releasenotes/notes/multiple-updates-b48cc2f6db2e526d.yaml @@ -0,0 +1,14 @@ +--- +features: + - Removed unneeded calls that were made when deleting servers with + floating ips. + - Added pagination support for volume listing. +upgrade: + - Removed designateclient as a dependency. All designate operations + are now performed with direct REST calls using keystoneauth + Adapter. + - Server creation calls are now done with direct REST calls. +fixes: + - Fixed a bug related to neutron endpoints that did not have trailing + slashes. + - Fixed issue with ports not having a created_at attribute. diff --git a/releasenotes/notes/munch-sub-dict-e1619c71c26879cb.yaml b/releasenotes/notes/munch-sub-dict-e1619c71c26879cb.yaml new file mode 100644 index 0000000000..2fc59a2487 --- /dev/null +++ b/releasenotes/notes/munch-sub-dict-e1619c71c26879cb.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixed a regression with sub-dicts of server objects + were not usable with object notation. diff --git a/releasenotes/notes/nat-source-field-7c7db2a724616d59.yaml b/releasenotes/notes/nat-source-field-7c7db2a724616d59.yaml new file mode 100644 index 0000000000..3341c9f255 --- /dev/null +++ b/releasenotes/notes/nat-source-field-7c7db2a724616d59.yaml @@ -0,0 +1,6 @@ +--- +features: + - Added nat_source flag for networks. In some more complex clouds there + can not only be more than one valid network on a server that NAT can + attach to, there can also be more than one valid network from which to + get a NAT address. Allow flagging a network so that it can be found. diff --git a/releasenotes/notes/nat-source-support-92aaf6b336d0b848.yaml b/releasenotes/notes/nat-source-support-92aaf6b336d0b848.yaml new file mode 100644 index 0000000000..efd8713a45 --- /dev/null +++ b/releasenotes/notes/nat-source-support-92aaf6b336d0b848.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for networks being configured as the + primary nat_source in clouds.yaml. diff --git a/releasenotes/notes/net_provider-dd64b697476b7094.yaml b/releasenotes/notes/net_provider-dd64b697476b7094.yaml new file mode 100644 index 0000000000..65a007302e --- /dev/null +++ b/releasenotes/notes/net_provider-dd64b697476b7094.yaml @@ -0,0 +1,3 @@ +--- +features: + - Network provider options are now accepted in create_network(). diff --git a/releasenotes/notes/network-add-tap-mirror-46376bd98ee69c81.yaml b/releasenotes/notes/network-add-tap-mirror-46376bd98ee69c81.yaml new file mode 100644 index 0000000000..d0253fbdfc --- /dev/null +++ b/releasenotes/notes/network-add-tap-mirror-46376bd98ee69c81.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``Tap Mirror`` and introduce the support for creating, reading, + updating and deleting ``tap_mirrors``. diff --git a/releasenotes/notes/network-create-tags-method-ccb37b01ed52a58c.yaml b/releasenotes/notes/network-create-tags-method-ccb37b01ed52a58c.yaml new file mode 100644 index 0000000000..ff48f14eda --- /dev/null +++ b/releasenotes/notes/network-create-tags-method-ccb37b01ed52a58c.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added a method to create (POST) tags in the network resources. This method + is idempotent. diff --git a/releasenotes/notes/network-data-bd94e4a499ba3e0d.yaml b/releasenotes/notes/network-data-bd94e4a499ba3e0d.yaml new file mode 100644 index 0000000000..22e5bd7066 --- /dev/null +++ b/releasenotes/notes/network-data-bd94e4a499ba3e0d.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Fixes ``openstack.baremetal.configdrive.build`` to actually handle the + ``network_data`` argument. diff --git a/releasenotes/notes/network-data-deb5772edc111428.yaml b/releasenotes/notes/network-data-deb5772edc111428.yaml new file mode 100644 index 0000000000..3cd9e2dbd8 --- /dev/null +++ b/releasenotes/notes/network-data-deb5772edc111428.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Adds support for `network_data + `_ + when building baremetal configdrives. diff --git a/releasenotes/notes/network-list-e6e9dafdd8446263.yaml b/releasenotes/notes/network-list-e6e9dafdd8446263.yaml new file mode 100644 index 0000000000..8f793c2bca --- /dev/null +++ b/releasenotes/notes/network-list-e6e9dafdd8446263.yaml @@ -0,0 +1,10 @@ +--- +features: + - Support added for configuring metadata about networks + for a cloud in a list of dicts, rather than in the + external_network and internal_network entries. The dicts + support a name, a routes_externally field, a nat_destination + field and a default_interface field. +deprecations: + - external_network and internal_network are deprecated and + should be replaced with the list of network dicts. diff --git a/releasenotes/notes/network-qos-rule-filter-keys-324e3222510fd362.yaml b/releasenotes/notes/network-qos-rule-filter-keys-324e3222510fd362.yaml new file mode 100644 index 0000000000..28b35a1166 --- /dev/null +++ b/releasenotes/notes/network-qos-rule-filter-keys-324e3222510fd362.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Added two filtering keys to ``QoSRuleType`` class query mapping, used for + filtering the "list" command: "all_rules", to list all network QoS rule + types implemented in Neutron, and "all_supported", to list all network QoS + rule types supported by at least one networking mechanism driver. diff --git a/releasenotes/notes/network-quotas-b98cce9ffeffdbf4.yaml b/releasenotes/notes/network-quotas-b98cce9ffeffdbf4.yaml new file mode 100644 index 0000000000..a58cbeab46 --- /dev/null +++ b/releasenotes/notes/network-quotas-b98cce9ffeffdbf4.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add new APIs, OperatorCloud.get_network_quotas(), OperatorCloud.set_network_quotas() and OperatorCloud.delete_network_quotas() to manage neutron quotas for projects and users \ No newline at end of file diff --git a/releasenotes/notes/network-security-group-query-parameter-id-f6dda45b2c09dbaa.yaml b/releasenotes/notes/network-security-group-query-parameter-id-f6dda45b2c09dbaa.yaml new file mode 100644 index 0000000000..5b0db68082 --- /dev/null +++ b/releasenotes/notes/network-security-group-query-parameter-id-f6dda45b2c09dbaa.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + The ``id`` field was added a query parameter for security_groups. A single + security group id, or a list of security group ids can be passed. For + example:: + + conn.network.security_groups(id=['f959e85a-1a87-4b5c-ae56-dc917ceeb584', + 'a55c0100-7ded-40af-9c61-1d1b9a9c2692']) diff --git a/releasenotes/notes/network_add_bgp_resources-c182dc2873d6db18.yaml b/releasenotes/notes/network_add_bgp_resources-c182dc2873d6db18.yaml new file mode 100644 index 0000000000..5356c7773c --- /dev/null +++ b/releasenotes/notes/network_add_bgp_resources-c182dc2873d6db18.yaml @@ -0,0 +1,10 @@ +--- +features: + - | + Add BGP Speaker and BGP Peer resources, and introduce support for CRUD + operations for these. Additional REST operations introduced for speakers: + add_bgp_peer, remove_bgp_peer, add_gateway_network, remove_gateway_network, + get_advertised_routes, get_bgp_dragents, add_bgp_speaker_to_draget, + remove_bgp_speaker_from_dragent. + One new REST method is added to agents to cover the features + of Dynamic Routing Agents schedulers: get_bgp_speakers_hosted_by_dragent diff --git a/releasenotes/notes/network_add_bgpvpn_resources-b3bd0b568c3c99db.yaml b/releasenotes/notes/network_add_bgpvpn_resources-b3bd0b568c3c99db.yaml new file mode 100644 index 0000000000..daa777f323 --- /dev/null +++ b/releasenotes/notes/network_add_bgpvpn_resources-b3bd0b568c3c99db.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Add BGPVPN, BGPVPN Network Association, BGPVPN Port Association, + and BGPVPN Router Association resources and introduce support + for CRUD operations for these. + diff --git a/releasenotes/notes/network_add_sfc_resources-8a52c0c8c1f8e932.yaml b/releasenotes/notes/network_add_sfc_resources-8a52c0c8c1f8e932.yaml new file mode 100644 index 0000000000..382257b8a1 --- /dev/null +++ b/releasenotes/notes/network_add_sfc_resources-8a52c0c8c1f8e932.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Add SFC resources: FlowClassifier, PortChain, PortPair, PortPairGroup + and ServiceGraph resources and introduce support for CRUD operations + for these. diff --git a/releasenotes/notes/network_add_taas_resources-86a947265e11ce84.yaml b/releasenotes/notes/network_add_taas_resources-86a947265e11ce84.yaml new file mode 100644 index 0000000000..54fb3730b3 --- /dev/null +++ b/releasenotes/notes/network_add_taas_resources-86a947265e11ce84.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``Tap Service`` and ``Tap Flow`` resources, and introduce support for + CRUD operations for these. diff --git a/releasenotes/notes/neutron-discovery-54399116d5f810ee.yaml b/releasenotes/notes/neutron-discovery-54399116d5f810ee.yaml new file mode 100644 index 0000000000..e102a1ef7b --- /dev/null +++ b/releasenotes/notes/neutron-discovery-54399116d5f810ee.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Added workaround for using neutron on older clouds where the version + discovery document requires auth. diff --git a/releasenotes/notes/neutron_availability_zone_extension-675c2460ebb50a09.yaml b/releasenotes/notes/neutron_availability_zone_extension-675c2460ebb50a09.yaml new file mode 100644 index 0000000000..058f40bbc9 --- /dev/null +++ b/releasenotes/notes/neutron_availability_zone_extension-675c2460ebb50a09.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + ``availability_zone_hints`` now accepted for ``create_network()`` when + ``network_availability_zone`` extension is enabled on target cloud. + - | + ``availability_zone_hints`` now accepted for ``create_router()`` when + ``router_availability_zone`` extension is enabled on target cloud. diff --git a/releasenotes/notes/new-floating-attributes-213cdf5681d337e1.yaml b/releasenotes/notes/new-floating-attributes-213cdf5681d337e1.yaml new file mode 100644 index 0000000000..61f4ec1dbc --- /dev/null +++ b/releasenotes/notes/new-floating-attributes-213cdf5681d337e1.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added support for created_at, updated_at, description + and revision_number attributes for floating ips. diff --git a/releasenotes/notes/no-import-fallback-a09b5d5a11299933.yaml b/releasenotes/notes/no-import-fallback-a09b5d5a11299933.yaml new file mode 100644 index 0000000000..6f897a3041 --- /dev/null +++ b/releasenotes/notes/no-import-fallback-a09b5d5a11299933.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Image upload will no longer fall back to attempting to use + the import workflow if the initial upload does not work. diff --git a/releasenotes/notes/no-inspect-associated-563e272785bb6016.yaml b/releasenotes/notes/no-inspect-associated-563e272785bb6016.yaml new file mode 100644 index 0000000000..c2faab6a95 --- /dev/null +++ b/releasenotes/notes/no-inspect-associated-563e272785bb6016.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + Machine inspection is now blocked for machines associated with an instance. + This is to avoid "stealing" a machine from under a provisioner (e.g. Nova). diff --git a/releasenotes/notes/no-more-troveclient-0a4739c21432ac63.yaml b/releasenotes/notes/no-more-troveclient-0a4739c21432ac63.yaml new file mode 100644 index 0000000000..1096921a5f --- /dev/null +++ b/releasenotes/notes/no-more-troveclient-0a4739c21432ac63.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - troveclient is no longer a hard dependency. Users + who were using shade to construct a troveclient + Client object should use os_client_config.make_legacy_client + instead. diff --git a/releasenotes/notes/no-start-task-manager-56773f3ea5eb3a59.yaml b/releasenotes/notes/no-start-task-manager-56773f3ea5eb3a59.yaml new file mode 100644 index 0000000000..e40507d12c --- /dev/null +++ b/releasenotes/notes/no-start-task-manager-56773f3ea5eb3a59.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed a regression in the new `TaskManager` code which caused programs that + were passing in a `TaskManager` that they had been running `start` on to + fail due to a double call. diff --git a/releasenotes/notes/node-boot-devices-2ab4991d75a2ab52.yaml b/releasenotes/notes/node-boot-devices-2ab4991d75a2ab52.yaml new file mode 100644 index 0000000000..c568d9e99b --- /dev/null +++ b/releasenotes/notes/node-boot-devices-2ab4991d75a2ab52.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Adds ``get_boot_device`` and ``get_supported_boot_devices`` to + ``openstack.baremetal.v1.Node``. + - | + Adds ``get_node_boot_device`` and ``get_node_supported_boot_devices`` + to the baremetal Proxy. diff --git a/releasenotes/notes/node-consoles-63589f22da98a689.yaml b/releasenotes/notes/node-consoles-63589f22da98a689.yaml new file mode 100644 index 0000000000..95a2451d85 --- /dev/null +++ b/releasenotes/notes/node-consoles-63589f22da98a689.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Adds ``get_console`` and ``set_console_state`` to + ``openstack.baremetal.v1.Node``. + - | + Adds ``get_node_console``, ``enable_node_console`` and + ``disable_node_console`` to the baremetal Proxy. diff --git a/releasenotes/notes/node-create-027ea99193f344ef.yaml b/releasenotes/notes/node-create-027ea99193f344ef.yaml new file mode 100644 index 0000000000..3f74ff2703 --- /dev/null +++ b/releasenotes/notes/node-create-027ea99193f344ef.yaml @@ -0,0 +1,9 @@ +--- +upgrade: + - | + Changes the baremetal ``create_node`` call to be closer to how Ironic + behaves. If no provision state is requested, the default state of the + current microversion is used (which usually means ``enroll``). + If the ``available`` state is requested, the node does not go through + cleaning (it won't work without creating ports), an old API version is + used to achieve this provision state. diff --git a/releasenotes/notes/node-inject-nmi-53d12681026e0b6c.yaml b/releasenotes/notes/node-inject-nmi-53d12681026e0b6c.yaml new file mode 100644 index 0000000000..41f0654515 --- /dev/null +++ b/releasenotes/notes/node-inject-nmi-53d12681026e0b6c.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Adds ``inject_nmi`` ``openstack.baremetal.v1.Node``. + - | + Adds ``inject_nmi_to_node`` to the baremetal Proxy. diff --git a/releasenotes/notes/node-owner-7f4b083ff9da8cce.yaml b/releasenotes/notes/node-owner-7f4b083ff9da8cce.yaml new file mode 100644 index 0000000000..88fb5f3d05 --- /dev/null +++ b/releasenotes/notes/node-owner-7f4b083ff9da8cce.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + The ``openstack.baremetal.v1.Node`` resource now has an ``owner`` property + which was added in the baremetal API `microversion 1.50`_. + + .. _microversion 1.50: https://docs.openstack.org/ironic/latest/contributor/webapi-version-history.html#id7 diff --git a/releasenotes/notes/node-set-provision-state-3472cbd81c47458f.yaml b/releasenotes/notes/node-set-provision-state-3472cbd81c47458f.yaml new file mode 100644 index 0000000000..f75f6dfec1 --- /dev/null +++ b/releasenotes/notes/node-set-provision-state-3472cbd81c47458f.yaml @@ -0,0 +1,11 @@ +--- +features: + - | + Adds ``set_provision_state`` and ``wait_for_provision_state`` to + ``openstack.baremetal.v1.Node``. + - | + Adds ``node_set_provision_state`` and ``wait_for_nodes_provision_state`` + to the baremetal Proxy. + - | + The ``node_set_provision_state`` call now supports provision states + up to the Queens release. diff --git a/releasenotes/notes/norm_role_assignments-a13f41768e62d40c.yaml b/releasenotes/notes/norm_role_assignments-a13f41768e62d40c.yaml new file mode 100644 index 0000000000..39ee2765d5 --- /dev/null +++ b/releasenotes/notes/norm_role_assignments-a13f41768e62d40c.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - Role assignments were being returned as plain dicts instead of Munch objects. + This has been corrected. diff --git a/releasenotes/notes/normalize-images-1331bea7bfffa36a.yaml b/releasenotes/notes/normalize-images-1331bea7bfffa36a.yaml new file mode 100644 index 0000000000..bbe2dfb51f --- /dev/null +++ b/releasenotes/notes/normalize-images-1331bea7bfffa36a.yaml @@ -0,0 +1,6 @@ +--- +features: + - Image dicts that are returned are now normalized across glance v1 + and glance v2. Extra key/value properties are now both in the root + dict and in a properties dict. Additionally, cloud and region have + been added like they are for server. diff --git a/releasenotes/notes/normalize-machine-290d9f2a3b3a7ef0.yaml b/releasenotes/notes/normalize-machine-290d9f2a3b3a7ef0.yaml new file mode 100644 index 0000000000..04b36e4d80 --- /dev/null +++ b/releasenotes/notes/normalize-machine-290d9f2a3b3a7ef0.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - | + Fixes normalization of bare metal machines in the ``patch_machine`` call. diff --git a/releasenotes/notes/nova-flavor-to-rest-0a5757e35714a690.yaml b/releasenotes/notes/nova-flavor-to-rest-0a5757e35714a690.yaml new file mode 100644 index 0000000000..1e1f501c2d --- /dev/null +++ b/releasenotes/notes/nova-flavor-to-rest-0a5757e35714a690.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - Nova flavor operations are now handled via REST calls + instead of via novaclient. There should be no noticable + difference. diff --git a/releasenotes/notes/nova-old-microversion-5e4b8e239ba44096.yaml b/releasenotes/notes/nova-old-microversion-5e4b8e239ba44096.yaml new file mode 100644 index 0000000000..013ed82fa3 --- /dev/null +++ b/releasenotes/notes/nova-old-microversion-5e4b8e239ba44096.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - Nova microversion is being requested. Since shade is not yet + actively microversion aware, but has been dealing with the 2.0 structures + anyway, this should not affect anyone. diff --git a/releasenotes/notes/object-checksum-generation-ea1c1e47d2290054.yaml b/releasenotes/notes/object-checksum-generation-ea1c1e47d2290054.yaml new file mode 100644 index 0000000000..e27a873963 --- /dev/null +++ b/releasenotes/notes/object-checksum-generation-ea1c1e47d2290054.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add flag for disabling object checksum generation + diff --git a/releasenotes/notes/object-chunked-data-ee619b7d4759b8d2.yaml b/releasenotes/notes/object-chunked-data-ee619b7d4759b8d2.yaml new file mode 100644 index 0000000000..99c317a33d --- /dev/null +++ b/releasenotes/notes/object-chunked-data-ee619b7d4759b8d2.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixed an issue where passing an iterator to the ``data`` parameter of + ``create_object`` for chunked uploads failed due to attempting to + calculate the length of the data. diff --git a/releasenotes/notes/object-search-a5f5ec4b2df3e045.yaml b/releasenotes/notes/object-search-a5f5ec4b2df3e045.yaml new file mode 100644 index 0000000000..1ac05e9bfd --- /dev/null +++ b/releasenotes/notes/object-search-a5f5ec4b2df3e045.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Objects are now searchable both with a JMESPath expression or a dict of + object attributes via the + ``openstack.connection.Connection.search_object`` function. diff --git a/releasenotes/notes/old-placement-4b3c34abb8fe7b81.yaml b/releasenotes/notes/old-placement-4b3c34abb8fe7b81.yaml new file mode 100644 index 0000000000..402e873433 --- /dev/null +++ b/releasenotes/notes/old-placement-4b3c34abb8fe7b81.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Workaround an issue using openstacksdk with older versions of + the placement service that are missing a status field in + their version discovery doc. diff --git a/releasenotes/notes/optimize-server-console-1d27c107b9a1cdc3.yaml b/releasenotes/notes/optimize-server-console-1d27c107b9a1cdc3.yaml new file mode 100644 index 0000000000..9cb44b4a85 --- /dev/null +++ b/releasenotes/notes/optimize-server-console-1d27c107b9a1cdc3.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Optimizes compute server console creation by adding older + get_server_console method to the server and create_console proxy method + calling appropriate method depending on the supported microversion. diff --git a/releasenotes/notes/option-precedence-1fecab21fdfb2c33.yaml b/releasenotes/notes/option-precedence-1fecab21fdfb2c33.yaml new file mode 100644 index 0000000000..06e6bd2f65 --- /dev/null +++ b/releasenotes/notes/option-precedence-1fecab21fdfb2c33.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - Reverse the order of option selction in + ``OpenStackConfig._validate_auth()`` to prefer auth options + passed in (from argparse) over those found in clouds.yaml. + This allows the application to override config profile + auth settings. diff --git a/releasenotes/notes/parse_connect_retries_delay-4306e9a0f50ee006.yaml b/releasenotes/notes/parse_connect_retries_delay-4306e9a0f50ee006.yaml new file mode 100644 index 0000000000..91680ba527 --- /dev/null +++ b/releasenotes/notes/parse_connect_retries_delay-4306e9a0f50ee006.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + ``Session.connect_retry_delay`` is now configurable via ``clouds.yaml`` using + the ``_connect_retry_delay`` and ``connect_retry_delay`` options. diff --git a/releasenotes/notes/port-device-profile-af91e25c45321691.yaml b/releasenotes/notes/port-device-profile-af91e25c45321691.yaml new file mode 100644 index 0000000000..e6abf24886 --- /dev/null +++ b/releasenotes/notes/port-device-profile-af91e25c45321691.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Add ``device_profile`` attribute to ``port`` resource. This parameter + can be define during the port creation. This parameter is nullable string. diff --git a/releasenotes/notes/power-wait-751083852f958cb4.yaml b/releasenotes/notes/power-wait-751083852f958cb4.yaml new file mode 100644 index 0000000000..359f1d35bb --- /dev/null +++ b/releasenotes/notes/power-wait-751083852f958cb4.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Support waiting for bare metal power states. diff --git a/releasenotes/notes/project-cleanup-exclude-option-65cba962eaa5b61a.yaml b/releasenotes/notes/project-cleanup-exclude-option-65cba962eaa5b61a.yaml new file mode 100644 index 0000000000..17516d552c --- /dev/null +++ b/releasenotes/notes/project-cleanup-exclude-option-65cba962eaa5b61a.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Project cleanup now supports skipping specific resources, + which will be kept as-is. Resource names are based on the + resource registry names, e. g. "block_storage.volume". diff --git a/releasenotes/notes/project-cleanup-swift-f67615e5c3ab8fd8.yaml b/releasenotes/notes/project-cleanup-swift-f67615e5c3ab8fd8.yaml new file mode 100644 index 0000000000..8e62029ed7 --- /dev/null +++ b/releasenotes/notes/project-cleanup-swift-f67615e5c3ab8fd8.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Project cleanup now supports cleaning Swift (object-store). If supported + by the server bulk deletion is used. Currently only filtering based on + updated_at (last_modified) is supported. diff --git a/releasenotes/notes/provision-state-negotiation-0155b4d0e932054c.yaml b/releasenotes/notes/provision-state-negotiation-0155b4d0e932054c.yaml new file mode 100644 index 0000000000..3656cf9b3f --- /dev/null +++ b/releasenotes/notes/provision-state-negotiation-0155b4d0e932054c.yaml @@ -0,0 +1,12 @@ +--- +fixes: + - | + Fixes API version negotiation in the following bare metal node calls: + + * ``set_node_provision_state`` + * ``set_node_power_state`` + * ``patch_node`` + + Previously an unexpectingly low version could be negotiated, breaking + certain features, for example calling the ``provide`` provisioning action + with a node name. diff --git a/releasenotes/notes/python-3.5-629817cec092d528.yaml b/releasenotes/notes/python-3.5-629817cec092d528.yaml new file mode 100644 index 0000000000..472ef33f40 --- /dev/null +++ b/releasenotes/notes/python-3.5-629817cec092d528.yaml @@ -0,0 +1,8 @@ +--- +fixes: + - | + openstacksdk does not test or support python2 as of 0.40, + but the releases have still accidentally worked (except for + 0.44 which was broken for python2). We're now explicitly + marking releases as requiring >= 3.5 so that things don't + attempt to install something that's bound to be broken. diff --git a/releasenotes/notes/qos-min-pps-rule-52df1b150b1d3f68.yaml b/releasenotes/notes/qos-min-pps-rule-52df1b150b1d3f68.yaml new file mode 100644 index 0000000000..dfa95c708c --- /dev/null +++ b/releasenotes/notes/qos-min-pps-rule-52df1b150b1d3f68.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added QoS minimum packet rate rule object and introduced support for CRUD + operations. diff --git a/releasenotes/notes/qos-packet-rate-limit-rule-385945e2e831ab0d.yaml b/releasenotes/notes/qos-packet-rate-limit-rule-385945e2e831ab0d.yaml new file mode 100644 index 0000000000..83c600f3e2 --- /dev/null +++ b/releasenotes/notes/qos-packet-rate-limit-rule-385945e2e831ab0d.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added Neutron QoS packet rate limit rule object and introduced support for + CRUD operations. diff --git a/releasenotes/notes/qos-port-network-policy-cab43faa0f8bc036.yaml b/releasenotes/notes/qos-port-network-policy-cab43faa0f8bc036.yaml new file mode 100644 index 0000000000..e472ddf489 --- /dev/null +++ b/releasenotes/notes/qos-port-network-policy-cab43faa0f8bc036.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + ``qos_network_policy_id`` attribute support has been added to the network + port resource diff --git a/releasenotes/notes/r1-cab94ae7d749a1ec.yaml b/releasenotes/notes/r1-cab94ae7d749a1ec.yaml new file mode 100644 index 0000000000..3f429a3bf2 --- /dev/null +++ b/releasenotes/notes/r1-cab94ae7d749a1ec.yaml @@ -0,0 +1,31 @@ +--- +prelude: > + This is a first major release of OpenStackSDK. + + From now on interface can be considered stable and will also in future + strictly follow SemVer model. This release includes work in ensuring + methods and attribute naming are consistent across the code basis and first + steps in implementing even more generalizations in the processing logic. + + Microversion support is now considered as stable and session will be + established with the highest version supported by both client and server. +upgrade: + - | + This release includes work in enforcing consistency of the cloud layer + methods. Now they all return SDK resource objects where previously Munch + objects could have been returned. This leads to few important facts: + + - Return object types of various cloud.XXX calls now rely on proxy layer + functions and strictly return SDK resources. + - Some attributes of various resources may be named differently to + follow SDK attribute naming convention. + - Returned objects may forbid setting attributes (read-only attributes). + + Mentioned changes are affecting Ansible modules (which rely on + OpenStackSDK). Historically Ansible modules return to the Ansible engine + whatever SDK returns to it. Under some conditions Ansible may decide to + unset properties (if it decides it contain sensitive information). While + this is correct SDK forbids setting of some attributes what leads to + errors. This release is therefore marking incompatibility with OpenStack + Ansible modules in R1.X.X and the work on fixing it is being done in + R2.X.X of modules repository. diff --git a/releasenotes/notes/r1-d4efe289ebf0cbcd.yaml b/releasenotes/notes/r1-d4efe289ebf0cbcd.yaml new file mode 100644 index 0000000000..c2e79ad642 --- /dev/null +++ b/releasenotes/notes/r1-d4efe289ebf0cbcd.yaml @@ -0,0 +1,40 @@ +--- +prelude: > + This is a final R1.0 release of the OpenStackSDK. A few technical issues + caused us not to reach this milestone cleanly, therefore we decided to one + more time explicitly log everything what should be considered as R1.0. For + detailed list of changes please see individual release notes from 0.99.0 to + 0.103.0. Most important changes are explicitly repeated here. There were + issues with maintainability of multiple available access interfaces, which + forced us to consider what we are able to maintain in the long run and what + we can not. That means that certain things were dropped, which is why we + are releasing this as a major release. R1.0 is considered as a first major + release with corresponding promise regarding backwards-compatibility. +features: + - | + Cloud layer is now consistently returning ``Resource`` class objects. + Previously this was not always the case. + - | + API response caching is implemented deep inside the code which will + minimize roundtrips for repeated requests. + - | + The majority of services were verified and adapted to the latest state of + the API. + - | + Certain code reorganization to further help in code reduction has been made + (metadata, tag and quota support moved to standalone common classes). +upgrade: + - | + Cloud layer methods are returning ``Resource`` class objects instead of + ``Munch`` objects. In some cases this cause renaming of the attributes. + ``Resource`` class is ``Munch`` compatible and allows both dictionary and + attribute base access. + - | + Some historical methods, which were never properly tested were dropped. +deprecations: + - | + ``Munch`` is dropped as a dependency. The project has no releases since + multiple years and was causing huge performance impact already during + import. This has directly no negative imapct to SDK users (it now starts + faster), but in the code we copied used ``Munch`` pieces. They are going to + be consistently eliminated in next releases. diff --git a/releasenotes/notes/rackspace-block-storage-v2-fe0dd69b9e037599.yaml b/releasenotes/notes/rackspace-block-storage-v2-fe0dd69b9e037599.yaml new file mode 100644 index 0000000000..e6b81f92f0 --- /dev/null +++ b/releasenotes/notes/rackspace-block-storage-v2-fe0dd69b9e037599.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + Rackspace Cloud's vendor profile has been updated to use v2 + of the Block Storage API. This introduces an endpoint override + for the service based on ``region_name`` and ``project_id``. diff --git a/releasenotes/notes/register-machine-72ac3e65a1ed55b1.yaml b/releasenotes/notes/register-machine-72ac3e65a1ed55b1.yaml new file mode 100644 index 0000000000..ba1eaaca64 --- /dev/null +++ b/releasenotes/notes/register-machine-72ac3e65a1ed55b1.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + The default behavior of the ``register_machine`` call has been modified to + run cleaning by default, if enabled in Ironic. You can pass + ``provision_state="enroll"/"manageable"`` to avoid it. diff --git a/releasenotes/notes/remote-address-group-id-6291816888cb3de7.yaml b/releasenotes/notes/remote-address-group-id-6291816888cb3de7.yaml new file mode 100644 index 0000000000..e1ce09505f --- /dev/null +++ b/releasenotes/notes/remote-address-group-id-6291816888cb3de7.yaml @@ -0,0 +1,6 @@ +--- +fixes: + - | + Fixes a regression sending an unsupported field + ``remote_address_group_id`` when creating security groups with an + older Neutron (introduced 0.53.0). diff --git a/releasenotes/notes/remote-profile-100218d08b25019d.yaml b/releasenotes/notes/remote-profile-100218d08b25019d.yaml new file mode 100644 index 0000000000..5cfe09d6c9 --- /dev/null +++ b/releasenotes/notes/remote-profile-100218d08b25019d.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Vendor profiles can now be fetched from an RFC 5785 compliant URL on a + cloud, namely, ``https://example.com/.well-known/openstack/api``. A cloud + can manage their own vendor profile and serve it from that URL, allowing + a user to simply list ``https://example.com`` as the profile name. diff --git a/releasenotes/notes/remove-auto-container-527f1807605b42c0.yaml b/releasenotes/notes/remove-auto-container-527f1807605b42c0.yaml new file mode 100644 index 0000000000..ef36ff6a3b --- /dev/null +++ b/releasenotes/notes/remove-auto-container-527f1807605b42c0.yaml @@ -0,0 +1,6 @@ +--- +upgrade: + - | + ``openstack.connection.Connection.create_object`` no longer creates + a container if one doesn't exist. It is the user's responsibility to + create a container before using it. diff --git a/releasenotes/notes/remove-block-store-details-classes-158ab1f46655320a.yaml b/releasenotes/notes/remove-block-store-details-classes-158ab1f46655320a.yaml new file mode 100644 index 0000000000..b53b6b1823 --- /dev/null +++ b/releasenotes/notes/remove-block-store-details-classes-158ab1f46655320a.yaml @@ -0,0 +1,6 @@ +--- +deprecations: + - | + Requesting volumes or backups with details from block_storage will return + objects of classes Volume and Backup correspondingly, instead + of VolumeDetail and BackupDetail. diff --git a/releasenotes/notes/remove-cloud-caching-layer-2b0384870a45e8a3.yaml b/releasenotes/notes/remove-cloud-caching-layer-2b0384870a45e8a3.yaml new file mode 100644 index 0000000000..e8f60b0327 --- /dev/null +++ b/releasenotes/notes/remove-cloud-caching-layer-2b0384870a45e8a3.yaml @@ -0,0 +1,7 @@ +--- +upgrade: + - | + The cloud-layer caching functionality has been removed in favour of the + proxy-layer caching functionality first introduced in openstacksdk 1.0.0. + This migration to proxy-layer caching was designed to be transparent to + end-users and there should be no user-facing impact from this removal. diff --git a/releasenotes/notes/remove-magnumclient-875b3e513f98f57c.yaml b/releasenotes/notes/remove-magnumclient-875b3e513f98f57c.yaml new file mode 100644 index 0000000000..249d1725bf --- /dev/null +++ b/releasenotes/notes/remove-magnumclient-875b3e513f98f57c.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - magnumclient is no longer a direct dependency as + magnum API calls are now made directly via REST. diff --git a/releasenotes/notes/remove-metric-fe5ddfd52b43c852.yaml b/releasenotes/notes/remove-metric-fe5ddfd52b43c852.yaml new file mode 100644 index 0000000000..971c4e296b --- /dev/null +++ b/releasenotes/notes/remove-metric-fe5ddfd52b43c852.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Removed the metric service. It is not an OpenStack service and does not + have an entry in service-types-authority. diff --git a/releasenotes/notes/remove-novaclient-3f8d4db20d5f9582.yaml b/releasenotes/notes/remove-novaclient-3f8d4db20d5f9582.yaml new file mode 100644 index 0000000000..27db18cb9e --- /dev/null +++ b/releasenotes/notes/remove-novaclient-3f8d4db20d5f9582.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - All Nova interactions are done via direct REST calls. + python-novaclient is no longer a direct dependency of + openstack.cloud. diff --git a/releasenotes/notes/remove-serverdetails-resource-f66cb278b224627d.yaml b/releasenotes/notes/remove-serverdetails-resource-f66cb278b224627d.yaml new file mode 100644 index 0000000000..4f50e55879 --- /dev/null +++ b/releasenotes/notes/remove-serverdetails-resource-f66cb278b224627d.yaml @@ -0,0 +1,5 @@ +--- +deprecations: + - | + Listing servers with details `servers(details=True)` will return + instances of the Server class instead of ServerDetails. diff --git a/releasenotes/notes/removed-deprecated-things-8700fe3592c3bf18.yaml b/releasenotes/notes/removed-deprecated-things-8700fe3592c3bf18.yaml new file mode 100644 index 0000000000..120c7d1cb9 --- /dev/null +++ b/releasenotes/notes/removed-deprecated-things-8700fe3592c3bf18.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + In anticipation of the upcoming 1.0 release, all the things that have been + marked as deprecated have been removed. diff --git a/releasenotes/notes/removed-glanceclient-105c7fba9481b9be.yaml b/releasenotes/notes/removed-glanceclient-105c7fba9481b9be.yaml new file mode 100644 index 0000000000..b926f4b446 --- /dev/null +++ b/releasenotes/notes/removed-glanceclient-105c7fba9481b9be.yaml @@ -0,0 +1,28 @@ +--- +prelude: > + The ``shade`` and ``os-client-config`` libraries have been merged + in to openstacksdk. As a result, their functionality is being + integrated into the sdk functionality, and in some cases is replacing + exisiting things. + + The ``openstack.profile.Profile`` and + ``openstack.auth.base.BaseAuthPlugin`` classes are no more. Profile has + been replace by ``openstack.config.cloud_region.CloudRegion`` from + `os-client-config + `_ + ``openstack.auth.base.BaseAuthPlugin`` has been replaced with the Auth + plugins from keystoneauth. + + Service proxy names on the ``openstack.connection.Connection`` are all + based on the official names from the OpenStack Service Types Authority. + + ``openstack.proxy.Proxy`` is now a subclass of + ``keystoneauth1.adapter.Adapter``. Removed local logic that duplicates + keystoneauth logic. This means every proxy also has direct REST primitives + available. + + .. code-block:: python + + connection = connection.Connection() + servers = connection.compute.servers() + server_response = connection.compute.get('/servers') diff --git a/releasenotes/notes/removed-meter-6f6651b6e452e000.yaml b/releasenotes/notes/removed-meter-6f6651b6e452e000.yaml new file mode 100644 index 0000000000..c4c5a1e451 --- /dev/null +++ b/releasenotes/notes/removed-meter-6f6651b6e452e000.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + Meter and Alarm services have been removed. The Ceilometer REST API has + been deprecated for quite some time and is no longer supported. diff --git a/releasenotes/notes/removed-profile-437f3038025b0fb3.yaml b/releasenotes/notes/removed-profile-437f3038025b0fb3.yaml new file mode 100644 index 0000000000..84bc3bd269 --- /dev/null +++ b/releasenotes/notes/removed-profile-437f3038025b0fb3.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - The Profile object has been replaced with the use of + CloudRegion objects from openstack.config. + - The openstacksdk specific Session object has been removed. + - Proxy objects are now subclasses of + keystoneauth1.adapter.Adapter. + - REST interactions all go through TaskManager now. diff --git a/releasenotes/notes/removed-profile-b033d870937868a1.yaml b/releasenotes/notes/removed-profile-b033d870937868a1.yaml new file mode 100644 index 0000000000..c5cac152b4 --- /dev/null +++ b/releasenotes/notes/removed-profile-b033d870937868a1.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - | + ``openstack.profile.Profile`` has been removed. ``openstack.config`` + should be used directly instead. diff --git a/releasenotes/notes/removed-swiftclient-aff22bfaeee5f59f.yaml b/releasenotes/notes/removed-swiftclient-aff22bfaeee5f59f.yaml new file mode 100644 index 0000000000..4927c1e68b --- /dev/null +++ b/releasenotes/notes/removed-swiftclient-aff22bfaeee5f59f.yaml @@ -0,0 +1,5 @@ +--- +upgrade: + - Removed swiftclient as a dependency. All swift operations + are now performed with direct REST calls using keystoneauth + Adapter. diff --git a/releasenotes/notes/rename-base-proxy-b9fcb22d373864a2.yaml b/releasenotes/notes/rename-base-proxy-b9fcb22d373864a2.yaml new file mode 100644 index 0000000000..f34cdd9702 --- /dev/null +++ b/releasenotes/notes/rename-base-proxy-b9fcb22d373864a2.yaml @@ -0,0 +1,5 @@ +--- +deprecations: + - | + `openstack.proxy.BaseProxy` has been renamed to `openstack.proxy.Proxy`. + A ``BaseProxy`` class remains for easing transition. diff --git a/releasenotes/notes/rename-resource-methods-5f2a716b08156765.yaml b/releasenotes/notes/rename-resource-methods-5f2a716b08156765.yaml new file mode 100644 index 0000000000..071371c735 --- /dev/null +++ b/releasenotes/notes/rename-resource-methods-5f2a716b08156765.yaml @@ -0,0 +1,12 @@ +--- +upgrade: + - | + ``openstack.resource.Resource.get`` has been renamed to + ``openstack.resource.Resource.fetch`` to prevent conflicting with a + ``dict`` method of the same name. While most consumer code is unlikely + to call this method directly, this is a breaking change. + - | + ``openstack.resource.Resource.update`` has been renamed to + ``openstack.resource.Resource.commit`` to prevent conflicting with a + ``dict`` method of the same name. While most consumer code is unlikely + to call this method directly, this is a breaking change. diff --git a/releasenotes/notes/rename-service-force-down-6f462d62959a5315.yaml b/releasenotes/notes/rename-service-force-down-6f462d62959a5315.yaml new file mode 100644 index 0000000000..bad5c790ba --- /dev/null +++ b/releasenotes/notes/rename-service-force-down-6f462d62959a5315.yaml @@ -0,0 +1,9 @@ +--- +upgrade: + - | + compute.force_service_down function is renamed to + update_service_forced_down to better fit the operation meaning. + - | + compute.v2.service.force_down is renamed to set_forced_down to fit the operation meaning. + - | + return of compute.service modification operations is changed to be the service itself diff --git a/releasenotes/notes/renamed-bare-metal-b1cdbc52af14e042.yaml b/releasenotes/notes/renamed-bare-metal-b1cdbc52af14e042.yaml new file mode 100644 index 0000000000..d39f1a1cb5 --- /dev/null +++ b/releasenotes/notes/renamed-bare-metal-b1cdbc52af14e042.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - Renamed bare-metal to baremetal to align with the official + service type. diff --git a/releasenotes/notes/renamed-block-store-bc5e0a7315bfeb67.yaml b/releasenotes/notes/renamed-block-store-bc5e0a7315bfeb67.yaml new file mode 100644 index 0000000000..3d5a5d34cf --- /dev/null +++ b/releasenotes/notes/renamed-block-store-bc5e0a7315bfeb67.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - The block_store service object has been renamed to block_storage to + align the API with the official service types. diff --git a/releasenotes/notes/renamed-cluster-743da6d321fffcba.yaml b/releasenotes/notes/renamed-cluster-743da6d321fffcba.yaml new file mode 100644 index 0000000000..0796d92b88 --- /dev/null +++ b/releasenotes/notes/renamed-cluster-743da6d321fffcba.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - Renamed cluster to clustering to align with the official + service type. diff --git a/releasenotes/notes/renamed-telemetry-c08ae3e72afca24f.yaml b/releasenotes/notes/renamed-telemetry-c08ae3e72afca24f.yaml new file mode 100644 index 0000000000..5929d4f551 --- /dev/null +++ b/releasenotes/notes/renamed-telemetry-c08ae3e72afca24f.yaml @@ -0,0 +1,4 @@ +--- +upgrade: + - Renamed telemetry to meter to align with the official + service type. diff --git a/releasenotes/notes/replace-appdirs-with-platformdirs-d3f5bcbe726b7829.yaml b/releasenotes/notes/replace-appdirs-with-platformdirs-d3f5bcbe726b7829.yaml new file mode 100644 index 0000000000..01a0815a2f --- /dev/null +++ b/releasenotes/notes/replace-appdirs-with-platformdirs-d3f5bcbe726b7829.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + The ``appdirs`` dependency is replaced by a requirement for + ``platformdirs`` 3.0.0 or later. Users on macOS may need to move + configuration files to ``*/Library/Application Support``. See its release + notes for further details: + https://platformdirs.readthedocs.io/en/latest/changelog.html#platformdirs-3-0-0-2023-02-06 diff --git a/releasenotes/notes/replace-netifaces-632f60884fb7ae00.yaml b/releasenotes/notes/replace-netifaces-632f60884fb7ae00.yaml new file mode 100644 index 0000000000..a7f97eec32 --- /dev/null +++ b/releasenotes/notes/replace-netifaces-632f60884fb7ae00.yaml @@ -0,0 +1,7 @@ +--- +upgrade: + - | + IPv6 support now is detected according to the IP addresses assigned to all + network interfaces, instead of presence of IPv6 default route. In case + there is any IP v6 address, which is not loopback or link-local, then + the host is considered to support IPv6. diff --git a/releasenotes/notes/request-stats-9d70480bebbdb4d6.yaml b/releasenotes/notes/request-stats-9d70480bebbdb4d6.yaml new file mode 100644 index 0000000000..04748cae69 --- /dev/null +++ b/releasenotes/notes/request-stats-9d70480bebbdb4d6.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added support for collecting and reporting stats on calls made to + statsd and prometheus. diff --git a/releasenotes/notes/resource-find-filter-by-name-e647e5c507ff4b6c.yaml b/releasenotes/notes/resource-find-filter-by-name-e647e5c507ff4b6c.yaml new file mode 100644 index 0000000000..f736cf93cc --- /dev/null +++ b/releasenotes/notes/resource-find-filter-by-name-e647e5c507ff4b6c.yaml @@ -0,0 +1,7 @@ +--- +other: + - | + ``openstack.resource.Resource.find`` now can use the database back-end to + filter by name. If the resource class has "name" in the query parameters, + this function will add this filter parameter in the "list" command, instead + of retrieving the whole list and then manually filtering. diff --git a/releasenotes/notes/resource2-migration-835590b300bef621.yaml b/releasenotes/notes/resource2-migration-835590b300bef621.yaml new file mode 100644 index 0000000000..ecf6adc4db --- /dev/null +++ b/releasenotes/notes/resource2-migration-835590b300bef621.yaml @@ -0,0 +1,11 @@ +--- +upgrade: + - | + The ``Resource2`` and ``Proxy2`` migration has been completed. The original + ``Resource`` and ``Proxy`` clases have been removed and replaced with + ``Resource2`` and ``Proxy2``. +deprecations: + - | + The ``shade`` functionality that has been merged in to openstacksdk is + found in ``openstack.cloud`` currently. None of these interfaces should + be relied upon as the merge has not yet completed. diff --git a/releasenotes/notes/retrieve-detailed-view-for-find-proxy-methods-10ecdff59f5c6913.yaml b/releasenotes/notes/retrieve-detailed-view-for-find-proxy-methods-10ecdff59f5c6913.yaml new file mode 100644 index 0000000000..3497f18f73 --- /dev/null +++ b/releasenotes/notes/retrieve-detailed-view-for-find-proxy-methods-10ecdff59f5c6913.yaml @@ -0,0 +1,14 @@ +--- +features: + - | + The following proxy ``find_*`` operations will now retrieve a detailed + resource by default when retrieving by name: + + * Bare metal (v1) + + * ``find_chassis`` + * ``find_node`` + * ``find_port`` + * ``find_port_group`` + * ``find_volume_connector`` + * ``find_volume_target`` diff --git a/releasenotes/notes/retrieve-detailed-view-for-find-proxy-methods-947a3280732c448a.yaml b/releasenotes/notes/retrieve-detailed-view-for-find-proxy-methods-947a3280732c448a.yaml new file mode 100644 index 0000000000..347ac2aca3 --- /dev/null +++ b/releasenotes/notes/retrieve-detailed-view-for-find-proxy-methods-947a3280732c448a.yaml @@ -0,0 +1,25 @@ +--- +features: + - | + The following proxy ``find_*`` operations will now retrieve a detailed + resource by default when retrieving by name: + + * Block storage (v2) + + * ``find_volume`` + * ``find_snapshot`` + * ``find_backup`` + + * Block storage (v3) + + * ``find_volume`` + * ``find_snapshot`` + * ``find_backup`` + * ``find_group`` + * ``find_group_snapshot`` + + * Compute (v2) + + * ``find_image`` + * ``find_server`` + * ``find_hypervisor`` diff --git a/releasenotes/notes/return-list-of-agent-for-BGP-dragents-3608d8119012b11c.yaml b/releasenotes/notes/return-list-of-agent-for-BGP-dragents-3608d8119012b11c.yaml new file mode 100644 index 0000000000..8c9b509dbc --- /dev/null +++ b/releasenotes/notes/return-list-of-agent-for-BGP-dragents-3608d8119012b11c.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + ``get_dragents`` and ``get_bgp_speakers_hosted_by_dragent`` return list + of Agents and BgpSpeakers, see https://launchpad.net/bugs/2067039 diff --git a/releasenotes/notes/revert-futurist-34acc42fd3f0e7f3.yaml b/releasenotes/notes/revert-futurist-34acc42fd3f0e7f3.yaml new file mode 100644 index 0000000000..28712cd8f4 --- /dev/null +++ b/releasenotes/notes/revert-futurist-34acc42fd3f0e7f3.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + Removed the dependency on futurist, which isn't necessary. + Users can still pass futurist executors if they want, as + the API is the same, but if nothing is passed, + ``concurrent.futures.ThreadPoolExecutor`` will be used as + the default. diff --git a/releasenotes/notes/rework-compute-hypervisor-a62f275a0fd1f074.yaml b/releasenotes/notes/rework-compute-hypervisor-a62f275a0fd1f074.yaml new file mode 100644 index 0000000000..c82bf62842 --- /dev/null +++ b/releasenotes/notes/rework-compute-hypervisor-a62f275a0fd1f074.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Compute Hypervisor resource and functions are reworked to comply 2.88 + microversion with deprecating misleading attributes. diff --git a/releasenotes/notes/router-extraroute-atomic-1a0c84c3fd90ceb1.yaml b/releasenotes/notes/router-extraroute-atomic-1a0c84c3fd90ceb1.yaml new file mode 100644 index 0000000000..4edd5c9ffa --- /dev/null +++ b/releasenotes/notes/router-extraroute-atomic-1a0c84c3fd90ceb1.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for methods of Neutron extension: ``extraroute-atomic``. diff --git a/releasenotes/notes/router_ext_gw-b86582317bca8b39.yaml b/releasenotes/notes/router_ext_gw-b86582317bca8b39.yaml new file mode 100644 index 0000000000..84d9a1ac04 --- /dev/null +++ b/releasenotes/notes/router_ext_gw-b86582317bca8b39.yaml @@ -0,0 +1,4 @@ +--- +fixes: + - No longer fail in list_router_interfaces() if a router does + not have the external_gateway_info key. diff --git a/releasenotes/notes/sdk-helper-41f8d815cfbcfb00.yaml b/releasenotes/notes/sdk-helper-41f8d815cfbcfb00.yaml new file mode 100644 index 0000000000..a18b57dc3a --- /dev/null +++ b/releasenotes/notes/sdk-helper-41f8d815cfbcfb00.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added helper method for constructing OpenStack SDK + Connection objects. diff --git a/releasenotes/notes/search_resource-b9c2f772e01d3b2c.yaml b/releasenotes/notes/search_resource-b9c2f772e01d3b2c.yaml new file mode 100644 index 0000000000..70efca4a25 --- /dev/null +++ b/releasenotes/notes/search_resource-b9c2f772e01d3b2c.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Add search_resources method implementing generic search interface accepting + resource name (as "service.resource"), name_or_id and list of additional + filters and returning 0 or many resources matching those. This interface is + primarily designed to be used by Ansible modules. diff --git a/releasenotes/notes/secret-payload-as-bytes-d04370d85c9efc4c.yaml b/releasenotes/notes/secret-payload-as-bytes-d04370d85c9efc4c.yaml new file mode 100644 index 0000000000..446728fb55 --- /dev/null +++ b/releasenotes/notes/secret-payload-as-bytes-d04370d85c9efc4c.yaml @@ -0,0 +1,16 @@ +--- +features: + - | + For Barbican secrets with detected or provided content type other than + "text/plain" SDK now returns the secret payload as raw bytes. + For secrets with content type "text/plain", the payload is returned + as string, decoded to UTF-8. + This behavior is following python-barbicanclient, and allows to use + SDK with Barbican secrets that have binary payloads + (e.g. "application/octet-stream"). +upgrade: + - | + The payload of Barbican secrets with other than "text/plain" content type + is now returned as raw bytes. + For secrets with content type "text/plain", the payload is returned + as string, decoded to UTF-8. diff --git a/releasenotes/notes/self-service-via-runbooks-66ca5f6fda681228.yaml b/releasenotes/notes/self-service-via-runbooks-66ca5f6fda681228.yaml new file mode 100644 index 0000000000..87ed2c6c59 --- /dev/null +++ b/releasenotes/notes/self-service-via-runbooks-66ca5f6fda681228.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Adds support for runbooks; an API feature that enables project members + to self-serve maintenance tasks via predefined step lists in lieu of + an arbitrary list of clean/service steps. diff --git a/releasenotes/notes/server-actions-microversion-support-f14b293d9c3d3d5e.yaml b/releasenotes/notes/server-actions-microversion-support-f14b293d9c3d3d5e.yaml new file mode 100644 index 0000000000..1421a703d7 --- /dev/null +++ b/releasenotes/notes/server-actions-microversion-support-f14b293d9c3d3d5e.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Server actions such as reboot and resize will now default to the + latest microversion instead of 2.1 as before. diff --git a/releasenotes/notes/server-create-error-id-66c698c7e633fb8b.yaml b/releasenotes/notes/server-create-error-id-66c698c7e633fb8b.yaml new file mode 100644 index 0000000000..673c7dcb86 --- /dev/null +++ b/releasenotes/notes/server-create-error-id-66c698c7e633fb8b.yaml @@ -0,0 +1,4 @@ +--- +features: + - server creation errors now include the server id in the + Exception to allow people to clean up. diff --git a/releasenotes/notes/server-security-groups-840ab28c04f359de.yaml b/releasenotes/notes/server-security-groups-840ab28c04f359de.yaml new file mode 100644 index 0000000000..d9de793e99 --- /dev/null +++ b/releasenotes/notes/server-security-groups-840ab28c04f359de.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add the `add_server_security_groups` and `remove_server_security_groups` + functions to add and remove security groups from a specific server. diff --git a/releasenotes/notes/service_enabled_flag-c917b305d3f2e8fd.yaml b/releasenotes/notes/service_enabled_flag-c917b305d3f2e8fd.yaml new file mode 100644 index 0000000000..089d297c9b --- /dev/null +++ b/releasenotes/notes/service_enabled_flag-c917b305d3f2e8fd.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - Keystone service descriptions were missing an attribute describing whether + or not the service was enabled. A new 'enabled' boolean attribute has been + added to the service data. diff --git a/releasenotes/notes/session-client-b581a6e5d18c8f04.yaml b/releasenotes/notes/session-client-b581a6e5d18c8f04.yaml new file mode 100644 index 0000000000..11219016b9 --- /dev/null +++ b/releasenotes/notes/session-client-b581a6e5d18c8f04.yaml @@ -0,0 +1,6 @@ +--- +features: + - Added kwargs and argparse processing for session_client. +deprecations: + - Renamed simple_client to session_client. simple_client + will remain as an alias for backwards compat. diff --git a/releasenotes/notes/set-bootable-volume-454a7a41e7e77d08.yaml b/releasenotes/notes/set-bootable-volume-454a7a41e7e77d08.yaml new file mode 100644 index 0000000000..c7d84fe03e --- /dev/null +++ b/releasenotes/notes/set-bootable-volume-454a7a41e7e77d08.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added a ``set_volume_bootable`` call to allow toggling the bootable state + of a volume. diff --git a/releasenotes/notes/shade-helper-568f8cb372eef6d9.yaml b/releasenotes/notes/shade-helper-568f8cb372eef6d9.yaml new file mode 100644 index 0000000000..70aab0a134 --- /dev/null +++ b/releasenotes/notes/shade-helper-568f8cb372eef6d9.yaml @@ -0,0 +1,4 @@ +--- +features: + - Added helper method for constructing shade + OpenStackCloud objects. diff --git a/releasenotes/notes/shade-into-connection-81191fb3d0ddaf6e.yaml b/releasenotes/notes/shade-into-connection-81191fb3d0ddaf6e.yaml new file mode 100644 index 0000000000..7fdbf3b114 --- /dev/null +++ b/releasenotes/notes/shade-into-connection-81191fb3d0ddaf6e.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + All of the methods formerly part of the ``shade`` library have been added + to the `openstack.connection.Connection`` object. diff --git a/releasenotes/notes/shade-location-b0d2e5cae743b738.yaml b/releasenotes/notes/shade-location-b0d2e5cae743b738.yaml new file mode 100644 index 0000000000..616a475da1 --- /dev/null +++ b/releasenotes/notes/shade-location-b0d2e5cae743b738.yaml @@ -0,0 +1,8 @@ +--- +upgrade: + - | + The base ``Resource`` field ``location`` is no longer drawn from the + ``Location`` HTTP header, but is instead a dict containing information + about cloud, domain and project. The location dict is a feature of shade + objects and is being added to all objects as part of the alignment of + shade and sdk. diff --git a/releasenotes/notes/snap-updated_at-a46711b6160e3a26.yaml b/releasenotes/notes/snap-updated_at-a46711b6160e3a26.yaml new file mode 100644 index 0000000000..927d6d11a8 --- /dev/null +++ b/releasenotes/notes/snap-updated_at-a46711b6160e3a26.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added support for the updated_at attribute for volume snapshots. diff --git a/releasenotes/notes/stack-update-5886e91fd6e423bf.yaml b/releasenotes/notes/stack-update-5886e91fd6e423bf.yaml new file mode 100644 index 0000000000..29a155236b --- /dev/null +++ b/releasenotes/notes/stack-update-5886e91fd6e423bf.yaml @@ -0,0 +1,4 @@ +--- +features: + - Implement update_stack to perform the update action on existing + orchestration stacks. diff --git a/releasenotes/notes/started-using-reno-242e2b0cd27f9480.yaml b/releasenotes/notes/started-using-reno-242e2b0cd27f9480.yaml new file mode 100644 index 0000000000..d7cfb5145a --- /dev/null +++ b/releasenotes/notes/started-using-reno-242e2b0cd27f9480.yaml @@ -0,0 +1,3 @@ +--- +other: +- Started using reno for release notes. diff --git a/releasenotes/notes/stateful-security-group-f32a78b9bbb49874.yaml b/releasenotes/notes/stateful-security-group-f32a78b9bbb49874.yaml new file mode 100644 index 0000000000..d0d8945e50 --- /dev/null +++ b/releasenotes/notes/stateful-security-group-f32a78b9bbb49874.yaml @@ -0,0 +1,4 @@ +--- +features: + - New stateful parameter can be used in security group + diff --git a/releasenotes/notes/stop-using-tenant-id-42eb35139ba9eeff.yaml b/releasenotes/notes/stop-using-tenant-id-42eb35139ba9eeff.yaml new file mode 100644 index 0000000000..5e4cbb010e --- /dev/null +++ b/releasenotes/notes/stop-using-tenant-id-42eb35139ba9eeff.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Stop sending tenant_id attribute to Neutron. diff --git a/releasenotes/notes/story-2010784-21d23043155497f5.yaml b/releasenotes/notes/story-2010784-21d23043155497f5.yaml new file mode 100644 index 0000000000..557eef5f3b --- /dev/null +++ b/releasenotes/notes/story-2010784-21d23043155497f5.yaml @@ -0,0 +1,33 @@ +--- +upgrade: + - | + Many cloud administrators use universal cloud-wide credentials. This is + supported in keystone via 'inherited' roles that can be applied cloud- + or domain-wide. + + In previous releases, these credentials could not be usefully defined + within ```clouds.yaml``` because ```clouds.yaml``` supports only + specifying a single domain and project for auth purposes. This project + or domain could not be overridden on the commandline. +fixes: + - | + When some config settings are specified multiple times, the order of + precendence has been changed to prefer command-line or env settings over + those found in ```clouds.yaml```. The same reordering has been done when + a setting is specified multiple times within ```clouds.yaml```; now a + higher-level setting will take precedence over that specified within + the auth section. + + Affected settings are: + + - ``domain_id`` + - ``domain_name`` + - ``user_domain_id`` + - ``user_domain_name`` + - ``project_domain_id`` + - ``project_domain_name`` + - ``auth-token`` + - ``project_id`` + - ``tenant_id`` + - ``project_name`` + - ``tenant_name`` diff --git a/releasenotes/notes/stream-object-6ecd43511dca726b.yaml b/releasenotes/notes/stream-object-6ecd43511dca726b.yaml new file mode 100644 index 0000000000..9e102c8fc0 --- /dev/null +++ b/releasenotes/notes/stream-object-6ecd43511dca726b.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added ``stream_object`` method for getting object content in an iterator. diff --git a/releasenotes/notes/stream-to-file-91f48d6dcea399c6.yaml b/releasenotes/notes/stream-to-file-91f48d6dcea399c6.yaml new file mode 100644 index 0000000000..60e6d64c8e --- /dev/null +++ b/releasenotes/notes/stream-to-file-91f48d6dcea399c6.yaml @@ -0,0 +1,3 @@ +--- +features: + - get_object now supports streaming output directly to a file. diff --git a/releasenotes/notes/strict-mode-d493abc0c3e87945.yaml b/releasenotes/notes/strict-mode-d493abc0c3e87945.yaml new file mode 100644 index 0000000000..ea81b138bf --- /dev/null +++ b/releasenotes/notes/strict-mode-d493abc0c3e87945.yaml @@ -0,0 +1,6 @@ +--- +features: + - Added 'strict' mode, which is set by passing strict=True + to the OpenStackCloud constructor. strict mode tells shade + to only return values in resources that are part of shade's + declared data model contract. diff --git a/releasenotes/notes/strict-proxies-4a315f68f387ee89.yaml b/releasenotes/notes/strict-proxies-4a315f68f387ee89.yaml new file mode 100644 index 0000000000..ae2e11bc15 --- /dev/null +++ b/releasenotes/notes/strict-proxies-4a315f68f387ee89.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + Added new option for Connection, ``strict_proxies``. When set to ``True``, + Connection will throw a ``ServiceDiscoveryException`` if the endpoint for + a given service doesn't work. This is useful for OpenStack services using + sdk to talk to other OpenStack services where it can be expected that the + deployer config is correct and errors should be reported immediately. diff --git a/releasenotes/notes/support-migration-to-host-b2958b3b8c5ca1fb.yaml b/releasenotes/notes/support-migration-to-host-b2958b3b8c5ca1fb.yaml new file mode 100644 index 0000000000..839b4c0f29 --- /dev/null +++ b/releasenotes/notes/support-migration-to-host-b2958b3b8c5ca1fb.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + The ``migrate_server`` compute proxy API and the ``Server.migrate`` API now + accept a ``host`` parameter to migrate to a given host. diff --git a/releasenotes/notes/support_stdin_image_upload-305c04fb2daeb32c.yaml b/releasenotes/notes/support_stdin_image_upload-305c04fb2daeb32c.yaml new file mode 100644 index 0000000000..0d315e9f84 --- /dev/null +++ b/releasenotes/notes/support_stdin_image_upload-305c04fb2daeb32c.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Add support for creating image from STDIN (i.e. from OSC). When creating from STDIN however, no checksum verification is possible, and thus validate_checksum must be also set to False. diff --git a/releasenotes/notes/swift-set-metadata-c18c60e440f9e4a7.yaml b/releasenotes/notes/swift-set-metadata-c18c60e440f9e4a7.yaml new file mode 100644 index 0000000000..6fe599ef49 --- /dev/null +++ b/releasenotes/notes/swift-set-metadata-c18c60e440f9e4a7.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - | + It is now possible to pass `metadata` parameter directly into the create_container, + create_object object_store methods and will not be ignored. diff --git a/releasenotes/notes/swift-upload-lock-d18f3d42b3a0719a.yaml b/releasenotes/notes/swift-upload-lock-d18f3d42b3a0719a.yaml new file mode 100644 index 0000000000..27848a5d21 --- /dev/null +++ b/releasenotes/notes/swift-upload-lock-d18f3d42b3a0719a.yaml @@ -0,0 +1,5 @@ +--- +fixes: + - Fixed an issue where a section of code that was supposed to be resetting + the SwiftService object was instead resetting the protective mutex around + the SwiftService object leading to an exception of "__exit__" diff --git a/releasenotes/notes/switch-coe-to-proxy-c18789ed27cc1d95.yaml b/releasenotes/notes/switch-coe-to-proxy-c18789ed27cc1d95.yaml new file mode 100644 index 0000000000..ad4e0e3e09 --- /dev/null +++ b/releasenotes/notes/switch-coe-to-proxy-c18789ed27cc1d95.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Convert container_infrastructure_management cloud operations to rely fully + on service proxy with all resource classes created. diff --git a/releasenotes/notes/switch-nova-to-created_at-45b7b50af6a2d59e.yaml b/releasenotes/notes/switch-nova-to-created_at-45b7b50af6a2d59e.yaml new file mode 100644 index 0000000000..68cf0a5e3b --- /dev/null +++ b/releasenotes/notes/switch-nova-to-created_at-45b7b50af6a2d59e.yaml @@ -0,0 +1,5 @@ +--- +features: + - The `created` field which was returned by the Nova API is now returned as + `created_at` as well when not using strict mode for consistency with other + models. diff --git a/releasenotes/notes/switch-to-warnings-333955d19afc99ca.yaml b/releasenotes/notes/switch-to-warnings-333955d19afc99ca.yaml new file mode 100644 index 0000000000..a55c71f419 --- /dev/null +++ b/releasenotes/notes/switch-to-warnings-333955d19afc99ca.yaml @@ -0,0 +1,7 @@ +--- +upgrade: + - | + Warnings about deprecated behavior or deprecated/modified APIs are now + raised using the ``warnings`` module, rather than the ``logging`` module. + This allows users to filter these warnings or silence them entirely if + necessary. diff --git a/releasenotes/notes/task-manager-parameter-c6606653532248f2.yaml b/releasenotes/notes/task-manager-parameter-c6606653532248f2.yaml new file mode 100644 index 0000000000..e1e2c4b5bd --- /dev/null +++ b/releasenotes/notes/task-manager-parameter-c6606653532248f2.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + A new ``task_manager`` parameter to ``Connection`` has been added for + passing a TaskManager object. This was present in shade and is used by + nodepool, but was missing from the Connection constructor. diff --git a/releasenotes/notes/toggle-port-security-f5bc606e82141feb.yaml b/releasenotes/notes/toggle-port-security-f5bc606e82141feb.yaml new file mode 100644 index 0000000000..821a20fb67 --- /dev/null +++ b/releasenotes/notes/toggle-port-security-f5bc606e82141feb.yaml @@ -0,0 +1,9 @@ +--- +features: + - | + Added a new property, 'port_security_enabled' which is a boolean + to enable or disable port_secuirty during network creation. The + default behavior will enable port security, security group and + anti spoofing will act as before. When the attribute is set to + False, security group and anti spoofing are disabled on the ports + created on this network. diff --git a/releasenotes/notes/unprocessed-2d75133911945869.yaml b/releasenotes/notes/unprocessed-2d75133911945869.yaml new file mode 100644 index 0000000000..d8738090b8 --- /dev/null +++ b/releasenotes/notes/unprocessed-2d75133911945869.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Supports fetching raw (unprocessed) introspection data from the bare metal + introspection service. diff --git a/releasenotes/notes/unshelve-to-specific-host-84666d440dce4a73.yaml b/releasenotes/notes/unshelve-to-specific-host-84666d440dce4a73.yaml new file mode 100644 index 0000000000..5a73890a22 --- /dev/null +++ b/releasenotes/notes/unshelve-to-specific-host-84666d440dce4a73.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Add SDK support for Nova microversion 2.91. This microversion + allows specifying a destination host to unshelve a shelve + offloaded server. And availability zone can be set to None to unpin + the availability zone of a server. diff --git a/releasenotes/notes/update-role-property-b16e902e913c7b25.yaml b/releasenotes/notes/update-role-property-b16e902e913c7b25.yaml new file mode 100644 index 0000000000..c35cec03f8 --- /dev/null +++ b/releasenotes/notes/update-role-property-b16e902e913c7b25.yaml @@ -0,0 +1,5 @@ +--- +features: + - | + Added the newly supported ``description`` parameter and the missing + ``domain_id`` parameter to ``Role`` resource. diff --git a/releasenotes/notes/update_endpoint-f87c1f42d0c0d1ef.yaml b/releasenotes/notes/update_endpoint-f87c1f42d0c0d1ef.yaml new file mode 100644 index 0000000000..a7b6a458bf --- /dev/null +++ b/releasenotes/notes/update_endpoint-f87c1f42d0c0d1ef.yaml @@ -0,0 +1,8 @@ +--- +features: + - Added update_endpoint as a new function that allows + the user to update a created endpoint with new values + rather than deleting and recreating that endpoint. + This feature only works with keystone v3, with v2 it + will raise an exception stating the feature is not + available. diff --git a/releasenotes/notes/update_workflow-ecdef6056ef2687b.yaml b/releasenotes/notes/update_workflow-ecdef6056ef2687b.yaml new file mode 100644 index 0000000000..516004ff55 --- /dev/null +++ b/releasenotes/notes/update_workflow-ecdef6056ef2687b.yaml @@ -0,0 +1,3 @@ +features: + - | + Added ``update_workflow`` to the workflow proxy. diff --git a/releasenotes/notes/use-interface-ip-c5cb3e7c91150096.yaml b/releasenotes/notes/use-interface-ip-c5cb3e7c91150096.yaml new file mode 100644 index 0000000000..14a4fd4a1b --- /dev/null +++ b/releasenotes/notes/use-interface-ip-c5cb3e7c91150096.yaml @@ -0,0 +1,13 @@ +--- +fixes: + - shade now correctly does not try to attach a floating ip with auto_ip + if the cloud has given a public IPv6 address and the calling context + supports IPv6 routing. shade has always used this logic to determine + the server 'interface_ip', but the auto floating ip was incorrectly only + looking at the 'public_v4' value to determine whether the server needed + additional networking. +upgrade: + - If your cloud presents a default split IPv4/IPv6 stack with a public + v6 and a private v4 address and you have the expectation that auto_ip + should procure a v4 floating ip, you need to set 'force_ipv4' to True in + your clouds.yaml entry for the cloud. diff --git a/releasenotes/notes/use-proxy-layer-dfc3764d52bc1f2a.yaml b/releasenotes/notes/use-proxy-layer-dfc3764d52bc1f2a.yaml new file mode 100644 index 0000000000..5e9882d4fc --- /dev/null +++ b/releasenotes/notes/use-proxy-layer-dfc3764d52bc1f2a.yaml @@ -0,0 +1,7 @@ +--- +upgrade: + - | + Networking functions of the cloud layer return now resource objects + `openstack.resource`. While those still implement Munch interface and are + accessible as dictionary modification of an instance might be causing + issues (i.e. forbidden). diff --git a/releasenotes/notes/v4-fixed-ip-325740fdae85ffa9.yaml b/releasenotes/notes/v4-fixed-ip-325740fdae85ffa9.yaml new file mode 100644 index 0000000000..99fe5b8d01 --- /dev/null +++ b/releasenotes/notes/v4-fixed-ip-325740fdae85ffa9.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Re-added support for `v4-fixed-ip` and `v6-fixed-ip` in the `nics` + parameter to `create_server`. These are aliaes for `fixed_ip` provided + by novaclient which shade used to use. The switch to REST didn't include + support for these aliases, resulting in a behavior regression. diff --git a/releasenotes/notes/validate-machine-dcf528b8f587e3f0.yaml b/releasenotes/notes/validate-machine-dcf528b8f587e3f0.yaml new file mode 100644 index 0000000000..0f73ae8d3b --- /dev/null +++ b/releasenotes/notes/validate-machine-dcf528b8f587e3f0.yaml @@ -0,0 +1,5 @@ +--- +deprecations: + - | + The ``OpenStackCloud.validate_node`` call was deprecated in favor of + ``OpenStackCloud.validate_machine``. diff --git a/releasenotes/notes/vendor-add-betacloud-03872c3485104853.yaml b/releasenotes/notes/vendor-add-betacloud-03872c3485104853.yaml new file mode 100644 index 0000000000..8fdded4b81 --- /dev/null +++ b/releasenotes/notes/vendor-add-betacloud-03872c3485104853.yaml @@ -0,0 +1,3 @@ +--- +other: + - Add betacloud region for Germany diff --git a/releasenotes/notes/vendor-add-limestonenetworks-99b2ffab9fc23b08.yaml b/releasenotes/notes/vendor-add-limestonenetworks-99b2ffab9fc23b08.yaml new file mode 100644 index 0000000000..d0e8332cfb --- /dev/null +++ b/releasenotes/notes/vendor-add-limestonenetworks-99b2ffab9fc23b08.yaml @@ -0,0 +1,4 @@ +--- +other: + - | + Add Limestone Networks vendor info for us-dfw-1 and us-slc regions diff --git a/releasenotes/notes/vendor-update-betacloud-37dac22d8d91a3c5.yaml b/releasenotes/notes/vendor-update-betacloud-37dac22d8d91a3c5.yaml new file mode 100644 index 0000000000..f2e249d19f --- /dev/null +++ b/releasenotes/notes/vendor-update-betacloud-37dac22d8d91a3c5.yaml @@ -0,0 +1,3 @@ +--- +other: + - Update betacloud region for Germany diff --git a/releasenotes/notes/vendor-updates-f11184ba56bb27cf.yaml b/releasenotes/notes/vendor-updates-f11184ba56bb27cf.yaml new file mode 100644 index 0000000000..e1d6d41a21 --- /dev/null +++ b/releasenotes/notes/vendor-updates-f11184ba56bb27cf.yaml @@ -0,0 +1,4 @@ +--- +other: + - Add citycloud regions for Buffalo, Frankfurt, Karlskrona and Los Angles + - Add new DreamCompute cloud and deprecate DreamHost cloud diff --git a/releasenotes/notes/version-command-70c37dd7f880e9ae.yaml b/releasenotes/notes/version-command-70c37dd7f880e9ae.yaml new file mode 100644 index 0000000000..db9b5d6729 --- /dev/null +++ b/releasenotes/notes/version-command-70c37dd7f880e9ae.yaml @@ -0,0 +1,4 @@ +--- +features: + - The installed version can now be quickly checked with ``python -m + openstack version``. diff --git a/releasenotes/notes/version-discovery-a501c4e9e9869f77.yaml b/releasenotes/notes/version-discovery-a501c4e9e9869f77.yaml new file mode 100644 index 0000000000..c55792fe82 --- /dev/null +++ b/releasenotes/notes/version-discovery-a501c4e9e9869f77.yaml @@ -0,0 +1,13 @@ +--- +features: + - Version discovery is now done via the keystoneauth + library. shade still has one behavioral difference + from default keystoneauth behavior, which is that + shade will use a version it understands if it can + find one even if the user has requested a different + version. This change opens the door for shade to + start being able to consume API microversions as + needed. +upgrade: + - keystoneauth version 3.2.0 or higher is required + because of version discovery. diff --git a/releasenotes/notes/vol-updated_at-274c3a2bb94c8939.yaml b/releasenotes/notes/vol-updated_at-274c3a2bb94c8939.yaml new file mode 100644 index 0000000000..e23371458c --- /dev/null +++ b/releasenotes/notes/vol-updated_at-274c3a2bb94c8939.yaml @@ -0,0 +1,3 @@ +--- +features: + - Added support for the updated_at attribute for volume objects. diff --git a/releasenotes/notes/volume-quotas-5b674ee8c1f71eb6.yaml b/releasenotes/notes/volume-quotas-5b674ee8c1f71eb6.yaml new file mode 100644 index 0000000000..2507aacf2e --- /dev/null +++ b/releasenotes/notes/volume-quotas-5b674ee8c1f71eb6.yaml @@ -0,0 +1,3 @@ +--- +features: + - Add new APIs, OperatorCloud.get_volume_quotas(), OperatorCloud.set_volume_quotas() and OperatorCloud.delete_volume_quotas() to manage cinder quotas for projects and users \ No newline at end of file diff --git a/releasenotes/notes/volume-types-a07a14ae668e7dd2.yaml b/releasenotes/notes/volume-types-a07a14ae668e7dd2.yaml new file mode 100644 index 0000000000..59fea21bba --- /dev/null +++ b/releasenotes/notes/volume-types-a07a14ae668e7dd2.yaml @@ -0,0 +1,4 @@ +--- +features: + - Add support for listing volume types. + - Add support for managing volume type access. diff --git a/releasenotes/notes/volume-update-876e6540c8471440.yaml b/releasenotes/notes/volume-update-876e6540c8471440.yaml new file mode 100644 index 0000000000..18ac0ed226 --- /dev/null +++ b/releasenotes/notes/volume-update-876e6540c8471440.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Added ``update_volume`` to the block storage proxy. diff --git a/releasenotes/notes/volume_connector-api-f001e6f5fc4d1688.yaml b/releasenotes/notes/volume_connector-api-f001e6f5fc4d1688.yaml new file mode 100644 index 0000000000..776263cf70 --- /dev/null +++ b/releasenotes/notes/volume_connector-api-f001e6f5fc4d1688.yaml @@ -0,0 +1,4 @@ +--- +features: + - | + Adds support for the baremetal volume connector API. diff --git a/releasenotes/notes/wait-on-image-snapshot-27cd2eacab2fabd8.yaml b/releasenotes/notes/wait-on-image-snapshot-27cd2eacab2fabd8.yaml new file mode 100644 index 0000000000..ae434e28b5 --- /dev/null +++ b/releasenotes/notes/wait-on-image-snapshot-27cd2eacab2fabd8.yaml @@ -0,0 +1,7 @@ +--- +features: + - Adds a new pair of options to create_image_snapshot(), wait and timeout, + to have the function wait until the image snapshot being created goes + into an active state. + - Adds a new function wait_for_image() which will wait for an image to go + into an active state. diff --git a/releasenotes/notes/wait-provision-state-no-fail-efa74dd39f687df8.yaml b/releasenotes/notes/wait-provision-state-no-fail-efa74dd39f687df8.yaml new file mode 100644 index 0000000000..5c4fbca5a9 --- /dev/null +++ b/releasenotes/notes/wait-provision-state-no-fail-efa74dd39f687df8.yaml @@ -0,0 +1,6 @@ +--- +features: + - | + Adds an ability for the bare metal ``wait_for_nodes_provision_state`` call + to return an object with nodes that succeeded, failed or timed out instead + of raising an exception. diff --git a/releasenotes/notes/wait_for_server-8dc8446b7c673d36.yaml b/releasenotes/notes/wait_for_server-8dc8446b7c673d36.yaml new file mode 100644 index 0000000000..58bc54c5ca --- /dev/null +++ b/releasenotes/notes/wait_for_server-8dc8446b7c673d36.yaml @@ -0,0 +1,3 @@ +--- +features: + - New wait_for_server() API call to wait for a server to reach ACTIVE status. diff --git a/releasenotes/notes/wait_for_status_delete_callback_param-68d30161e23340bb.yaml b/releasenotes/notes/wait_for_status_delete_callback_param-68d30161e23340bb.yaml new file mode 100644 index 0000000000..1e088ef054 --- /dev/null +++ b/releasenotes/notes/wait_for_status_delete_callback_param-68d30161e23340bb.yaml @@ -0,0 +1,8 @@ +--- +features: + - | + The ``Resource.wait_for_status``, ``Resource.wait_for_delete``, and related + proxy wrappers now accept a ``callback`` argument that can be used to pass + a callback function. When provided, the wait function will attempt to + retrieve a ``progress`` value from the resource in question and pass it to + the callback function each time it iterates. diff --git a/releasenotes/notes/wire-in-retries-10898f7bc81e2269.yaml b/releasenotes/notes/wire-in-retries-10898f7bc81e2269.yaml new file mode 100644 index 0000000000..a3de250f76 --- /dev/null +++ b/releasenotes/notes/wire-in-retries-10898f7bc81e2269.yaml @@ -0,0 +1,7 @@ +--- +features: + - | + Allows configuring Session's ``connect_retries`` and + ``status_code_retries`` via the cloud configuration (options + ``_connect_retries``, ``connect_retries``, + ``_status_code_retries`` and ``status_code_retries``). diff --git a/releasenotes/notes/workaround-transitive-deps-1e7a214f3256b77e.yaml b/releasenotes/notes/workaround-transitive-deps-1e7a214f3256b77e.yaml new file mode 100644 index 0000000000..aa1b361ddf --- /dev/null +++ b/releasenotes/notes/workaround-transitive-deps-1e7a214f3256b77e.yaml @@ -0,0 +1,9 @@ +--- +fixes: + - Added requests and Babel to the direct dependencies list to work around + issues with pip installation, entrypoints and transitive dependencies + with conflicting exclusion ranges. Packagers of shade do not need to + add these two new requirements to shade's dependency list - they are + transitive depends and should be satisfied by the other things in the + requirements list. Both will be removed from the list again once the + python client libraries that pull them in have been removed. diff --git a/releasenotes/notes/xenapi-use-agent-ecc33e520da81ffa.yaml b/releasenotes/notes/xenapi-use-agent-ecc33e520da81ffa.yaml new file mode 100644 index 0000000000..eeb3ed77de --- /dev/null +++ b/releasenotes/notes/xenapi-use-agent-ecc33e520da81ffa.yaml @@ -0,0 +1,7 @@ +--- +fixes: + - | + Updated the Rackspace vendor entry to use `"False"` for the value of + `xenapi_use_agent` instead of `false`, because that's what the remote + side expects. The recent update to use the Resource layer exposed + the incorrect setting causing image uploads to Rackspace to fail. diff --git a/releasenotes/source/2023.1.rst b/releasenotes/source/2023.1.rst new file mode 100644 index 0000000000..2c9a36fae4 --- /dev/null +++ b/releasenotes/source/2023.1.rst @@ -0,0 +1,6 @@ +=========================== +2023.1 Series Release Notes +=========================== + +.. release-notes:: + :branch: unmaintained/2023.1 diff --git a/releasenotes/source/2023.2.rst b/releasenotes/source/2023.2.rst new file mode 100644 index 0000000000..a4838d7d0e --- /dev/null +++ b/releasenotes/source/2023.2.rst @@ -0,0 +1,6 @@ +=========================== +2023.2 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2023.2 diff --git a/releasenotes/source/2024.1.rst b/releasenotes/source/2024.1.rst new file mode 100644 index 0000000000..6896656be6 --- /dev/null +++ b/releasenotes/source/2024.1.rst @@ -0,0 +1,6 @@ +=========================== +2024.1 Series Release Notes +=========================== + +.. release-notes:: + :branch: unmaintained/2024.1 diff --git a/releasenotes/source/2024.2.rst b/releasenotes/source/2024.2.rst new file mode 100644 index 0000000000..aaebcbc8c3 --- /dev/null +++ b/releasenotes/source/2024.2.rst @@ -0,0 +1,6 @@ +=========================== +2024.2 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2024.2 diff --git a/releasenotes/source/2025.1.rst b/releasenotes/source/2025.1.rst new file mode 100644 index 0000000000..3add0e53aa --- /dev/null +++ b/releasenotes/source/2025.1.rst @@ -0,0 +1,6 @@ +=========================== +2025.1 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2025.1 diff --git a/releasenotes/source/2025.2.rst b/releasenotes/source/2025.2.rst new file mode 100644 index 0000000000..4dae18d869 --- /dev/null +++ b/releasenotes/source/2025.2.rst @@ -0,0 +1,6 @@ +=========================== +2025.2 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2025.2 diff --git a/releasenotes/source/2026.1.rst b/releasenotes/source/2026.1.rst new file mode 100644 index 0000000000..3d28615808 --- /dev/null +++ b/releasenotes/source/2026.1.rst @@ -0,0 +1,6 @@ +=========================== +2026.1 Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/2026.1 diff --git a/releasenotes/source/_static/.placeholder b/releasenotes/source/_static/.placeholder new file mode 100644 index 0000000000..e69de29bb2 diff --git a/releasenotes/source/_templates/.placeholder b/releasenotes/source/_templates/.placeholder new file mode 100644 index 0000000000..e69de29bb2 diff --git a/releasenotes/source/conf.py b/releasenotes/source/conf.py new file mode 100644 index 0000000000..8507c1df9b --- /dev/null +++ b/releasenotes/source/conf.py @@ -0,0 +1,277 @@ +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +# oslo.config Release Notes documentation build configuration file, created by +# sphinx-quickstart on Tue Nov 3 17:40:50 2015. +# +# This file is execfile()d with the current directory set to its +# containing dir. +# +# Note that not all possible configuration values are present in this +# autogenerated file. +# +# All configuration values have a default; values that are commented out +# serve to show the default. + +# If extensions (or modules to document with autodoc) are in another directory, +# add these directories to sys.path here. If the directory is relative to the +# documentation root, use os.path.abspath to make it absolute, like shown here. +# sys.path.insert(0, os.path.abspath('.')) + +# -- General configuration ------------------------------------------------ + +# If your documentation needs a minimal Sphinx version, state it here. +# needs_sphinx = '1.0' + +# Add any Sphinx extension module names here, as strings. They can be +# extensions coming with Sphinx (named 'sphinx.ext.*') or your custom +# ones. +extensions = [ + 'openstackdocstheme', + 'reno.sphinxext', +] + +# openstackdocstheme options +openstackdocs_repo_name = 'openstack/openstacksdk' +openstackdocs_use_storyboard = False + +# Add any paths that contain templates here, relative to this directory. +templates_path = ['_templates'] + +# The suffix of source filenames. +source_suffix = '.rst' + +# The encoding of source files. +# source_encoding = 'utf-8-sig' + +# The master toctree document. +master_doc = 'index' + +# General information about the project. +copyright = '2017, Various members of the OpenStack Foundation' + +# Release notes are version independent. +# The short X.Y version. +version = '' +# The full version, including alpha/beta/rc tags. +release = '' + +# The language for content autogenerated by Sphinx. Refer to documentation +# for a list of supported languages. +# language = None + +# There are two options for replacing |today|: either, you set today to some +# non-false value, then it is used: +# today = '' +# Else, today_fmt is used as the format for a strftime call. +# today_fmt = '%B %d, %Y' + +# List of patterns, relative to source directory, that match files and +# directories to ignore when looking for source files. +exclude_patterns = [] + +# The reST default role (used for this markup: `text`) to use for all +# documents. +# default_role = None + +# If true, '()' will be appended to :func: etc. cross-reference text. +# add_function_parentheses = True + +# If true, the current module name will be prepended to all description +# unit titles (such as .. function::). +# add_module_names = True + +# If true, sectionauthor and moduleauthor directives will be shown in the +# output. They are ignored by default. +# show_authors = False + +# The name of the Pygments (syntax highlighting) style to use. +pygments_style = 'native' + +# A list of ignored prefixes for module index sorting. +# modindex_common_prefix = [] + +# If true, keep warnings as "system message" paragraphs in the built documents. +# keep_warnings = False + + +# -- Options for HTML output ---------------------------------------------- + +# The theme to use for HTML and HTML Help pages. See the documentation for +# a list of builtin themes. +html_theme = 'openstackdocs' + +# Theme options are theme-specific and customize the look and feel of a theme +# further. For a list of options available for each theme, see the +# documentation. +# html_theme_options = {} + +# Add any paths that contain custom themes here, relative to this directory. +# html_theme_path = [] + +# The name for this set of Sphinx documents. If None, it defaults to +# " v documentation". +# html_title = None + +# A shorter title for the navigation bar. Default is the same as html_title. +# html_short_title = None + +# The name of an image file (relative to this directory) to place at the top +# of the sidebar. +# html_logo = None + +# The name of an image file (within the static path) to use as favicon of the +# docs. This file should be a Windows icon file (.ico) being 16x16 or 32x32 +# pixels large. +# html_favicon = None + +# Add any paths that contain custom static files (such as style sheets) here, +# relative to this directory. They are copied after the builtin static files, +# so a file named "default.css" will overwrite the builtin "default.css". +html_static_path = ['_static'] + +# Add any extra paths that contain custom files (such as robots.txt or +# .htaccess) here, relative to this directory. These files are copied +# directly to the root of the documentation. +# html_extra_path = [] + +# If not '', a 'Last updated on:' timestamp is inserted at every page bottom, +# using the given strftime format. +# html_last_updated_fmt = '%b %d, %Y' + +# If true, SmartyPants will be used to convert quotes and dashes to +# typographically correct entities. +# html_use_smartypants = True + +# Custom sidebar templates, maps document names to template names. +# html_sidebars = {} + +# Additional templates that should be rendered to pages, maps page names to +# template names. +# html_additional_pages = {} + +# If false, no module index is generated. +# html_domain_indices = True + +# If false, no index is generated. +# html_use_index = True + +# If true, the index is split into individual pages for each letter. +# html_split_index = False + +# If true, links to the reST sources are added to the pages. +# html_show_sourcelink = True + +# If true, "Created using Sphinx" is shown in the HTML footer. Default is True. +# html_show_sphinx = True + +# If true, "(C) Copyright ..." is shown in the HTML footer. Default is True. +# html_show_copyright = True + +# If true, an OpenSearch description file will be output, and all pages will +# contain a tag referring to it. The value of this option must be the +# base URL from which the finished HTML is served. +# html_use_opensearch = '' + +# This is the file name suffix for HTML files (e.g. ".xhtml"). +# html_file_suffix = None + +# Output file base name for HTML help builder. +htmlhelp_basename = 'shadeReleaseNotesdoc' + + +# -- Options for LaTeX output --------------------------------------------- + +# Grouping the document tree into LaTeX files. List of tuples +# (source start file, target name, title, +# author, documentclass [howto, manual, or own class]). +latex_documents = [ + ( + 'index', + 'shadeReleaseNotes.tex', + 'Shade Release Notes Documentation', + 'Shade Developers', + 'manual', + ), +] + +# The name of an image file (relative to this directory) to place at the top of +# the title page. +# latex_logo = None + +# For "manual" documents, if this is true, then toplevel headings are parts, +# not chapters. +# latex_use_parts = False + +# If true, show page references after internal links. +# latex_show_pagerefs = False + +# If true, show URL addresses after external links. +# latex_show_urls = False + +# Documents to append as an appendix to all manuals. +# latex_appendices = [] + +# If false, no module index is generated. +# latex_domain_indices = True + + +# -- Options for manual page output --------------------------------------- + +# One entry per manual page. List of tuples +# (source start file, name, description, authors, manual section). +man_pages = [ + ( + 'index', + 'shadereleasenotes', + 'shade Release Notes Documentation', + ['shade Developers'], + 1, + ) +] + +# If true, show URL addresses after external links. +# man_show_urls = False + + +# -- Options for Texinfo output ------------------------------------------- + +# Grouping the document tree into Texinfo files. List of tuples +# (source start file, target name, title, author, +# dir menu entry, description, category) +texinfo_documents = [ + ( + 'index', + 'shadeReleaseNotes', + 'shade Release Notes Documentation', + 'shade Developers', + 'shadeReleaseNotes', + 'A client library for interacting with OpenStack clouds', + 'Miscellaneous', + ), +] + +# Documents to append as an appendix to all manuals. +# texinfo_appendices = [] + +# If false, no module index is generated. +# texinfo_domain_indices = True + +# How to display URL addresses: 'footnote', 'no', or 'inline'. +# texinfo_show_urls = 'footnote' + +# If true, do not generate a @detailmenu in the "Top" node's menu. +# texinfo_no_detailmenu = False + +# -- Options for Internationalization output ------------------------------ +locale_dirs = ['locale/'] diff --git a/releasenotes/source/index.rst b/releasenotes/source/index.rst new file mode 100644 index 0000000000..31ac66e999 --- /dev/null +++ b/releasenotes/source/index.rst @@ -0,0 +1,27 @@ +============================ + openstacksdk Release Notes +============================ + +.. toctree:: + :maxdepth: 1 + + unreleased + 2026.1 + 2025.2 + 2025.1 + 2024.2 + 2024.1 + 2023.2 + 2023.1 + zed + yoga + xena + wallaby + victoria + ussuri + train + stein + rocky + queens + pike + ocata diff --git a/releasenotes/source/ocata.rst b/releasenotes/source/ocata.rst new file mode 100644 index 0000000000..ebe62f42e1 --- /dev/null +++ b/releasenotes/source/ocata.rst @@ -0,0 +1,6 @@ +=================================== + Ocata Series Release Notes +=================================== + +.. release-notes:: + :branch: origin/stable/ocata diff --git a/releasenotes/source/pike.rst b/releasenotes/source/pike.rst new file mode 100644 index 0000000000..e43bfc0ce1 --- /dev/null +++ b/releasenotes/source/pike.rst @@ -0,0 +1,6 @@ +=================================== + Pike Series Release Notes +=================================== + +.. release-notes:: + :branch: stable/pike diff --git a/releasenotes/source/queens.rst b/releasenotes/source/queens.rst new file mode 100644 index 0000000000..36ac6160ca --- /dev/null +++ b/releasenotes/source/queens.rst @@ -0,0 +1,6 @@ +=================================== + Queens Series Release Notes +=================================== + +.. release-notes:: + :branch: stable/queens diff --git a/releasenotes/source/rocky.rst b/releasenotes/source/rocky.rst new file mode 100644 index 0000000000..40dd517b75 --- /dev/null +++ b/releasenotes/source/rocky.rst @@ -0,0 +1,6 @@ +=================================== + Rocky Series Release Notes +=================================== + +.. release-notes:: + :branch: stable/rocky diff --git a/releasenotes/source/stein.rst b/releasenotes/source/stein.rst new file mode 100644 index 0000000000..efaceb667b --- /dev/null +++ b/releasenotes/source/stein.rst @@ -0,0 +1,6 @@ +=================================== + Stein Series Release Notes +=================================== + +.. release-notes:: + :branch: stable/stein diff --git a/releasenotes/source/train.rst b/releasenotes/source/train.rst new file mode 100644 index 0000000000..583900393c --- /dev/null +++ b/releasenotes/source/train.rst @@ -0,0 +1,6 @@ +========================== +Train Series Release Notes +========================== + +.. release-notes:: + :branch: stable/train diff --git a/releasenotes/source/unreleased.rst b/releasenotes/source/unreleased.rst new file mode 100644 index 0000000000..875030f9d0 --- /dev/null +++ b/releasenotes/source/unreleased.rst @@ -0,0 +1,5 @@ +============================ +Current Series Release Notes +============================ + +.. release-notes:: diff --git a/releasenotes/source/ussuri.rst b/releasenotes/source/ussuri.rst new file mode 100644 index 0000000000..e21e50e0c6 --- /dev/null +++ b/releasenotes/source/ussuri.rst @@ -0,0 +1,6 @@ +=========================== +Ussuri Series Release Notes +=========================== + +.. release-notes:: + :branch: stable/ussuri diff --git a/releasenotes/source/victoria.rst b/releasenotes/source/victoria.rst new file mode 100644 index 0000000000..8ce9334198 --- /dev/null +++ b/releasenotes/source/victoria.rst @@ -0,0 +1,6 @@ +============================= +Victoria Series Release Notes +============================= + +.. release-notes:: + :branch: unmaintained/victoria diff --git a/releasenotes/source/wallaby.rst b/releasenotes/source/wallaby.rst new file mode 100644 index 0000000000..bcf35c5f80 --- /dev/null +++ b/releasenotes/source/wallaby.rst @@ -0,0 +1,6 @@ +============================ +Wallaby Series Release Notes +============================ + +.. release-notes:: + :branch: unmaintained/wallaby diff --git a/releasenotes/source/xena.rst b/releasenotes/source/xena.rst new file mode 100644 index 0000000000..d19eda4886 --- /dev/null +++ b/releasenotes/source/xena.rst @@ -0,0 +1,6 @@ +========================= +Xena Series Release Notes +========================= + +.. release-notes:: + :branch: unmaintained/xena diff --git a/releasenotes/source/yoga.rst b/releasenotes/source/yoga.rst new file mode 100644 index 0000000000..43cafdea89 --- /dev/null +++ b/releasenotes/source/yoga.rst @@ -0,0 +1,6 @@ +========================= +Yoga Series Release Notes +========================= + +.. release-notes:: + :branch: unmaintained/yoga diff --git a/releasenotes/source/zed.rst b/releasenotes/source/zed.rst new file mode 100644 index 0000000000..6cc2b1554c --- /dev/null +++ b/releasenotes/source/zed.rst @@ -0,0 +1,6 @@ +======================== +Zed Series Release Notes +======================== + +.. release-notes:: + :branch: unmaintained/zed diff --git a/requirements.txt b/requirements.txt index 7cae9d2330..ebb72e98cb 100644 --- a/requirements.txt +++ b/requirements.txt @@ -1,8 +1,13 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -pbr>=1.8 # Apache-2.0 -six>=1.9.0 # MIT -stevedore>=1.17.1 # Apache-2.0 -os-client-config>=1.22.0 # Apache-2.0 -keystoneauth1>=2.16.0 # Apache-2.0 +cryptography>=2.7 # BSD/Apache-2.0 +decorator>=4.4.1 # BSD +dogpile.cache>=0.6.5 # BSD +iso8601>=0.1.11 # MIT +jmespath>=0.9.0 # MIT +jsonpatch!=1.20,>=1.16 # BSD +keystoneauth1>=5.10.0 # Apache-2.0 +os-service-types>=1.8.1 # Apache-2.0 +pbr!=2.1.0,>=2.0.0 # Apache-2.0 +platformdirs>=3 # MIT License +psutil>=3.2.2 # BSD +PyYAML>=3.13 # MIT +typing-extensions>=4.12.0 # PSF diff --git a/roles/deploy-clouds-config/README.rst b/roles/deploy-clouds-config/README.rst new file mode 100644 index 0000000000..e69de29bb2 diff --git a/roles/deploy-clouds-config/defaults/main.yaml b/roles/deploy-clouds-config/defaults/main.yaml new file mode 100644 index 0000000000..9739eb171c --- /dev/null +++ b/roles/deploy-clouds-config/defaults/main.yaml @@ -0,0 +1 @@ +zuul_work_dir: "{{ zuul.project.src_dir }}" diff --git a/roles/deploy-clouds-config/tasks/main.yaml b/roles/deploy-clouds-config/tasks/main.yaml new file mode 100644 index 0000000000..f10533bdab --- /dev/null +++ b/roles/deploy-clouds-config/tasks/main.yaml @@ -0,0 +1,11 @@ +- name: Create OpenStack config dir + ansible.builtin.file: + dest: ~/.config/openstack + state: directory + recurse: true + +- name: Deploy clouds.yaml + ansible.builtin.template: + src: clouds.yaml.j2 + dest: ~/.config/openstack/clouds.yaml + mode: 0440 diff --git a/roles/deploy-clouds-config/templates/clouds.yaml.j2 b/roles/deploy-clouds-config/templates/clouds.yaml.j2 new file mode 100644 index 0000000000..267d900659 --- /dev/null +++ b/roles/deploy-clouds-config/templates/clouds.yaml.j2 @@ -0,0 +1,2 @@ +--- +{{ cloud_config | to_nice_yaml }} diff --git a/setup.cfg b/setup.cfg index a11e58a1a5..682722d6e0 100644 --- a/setup.cfg +++ b/setup.cfg @@ -1,50 +1,2 @@ [metadata] name = openstacksdk -summary = An SDK for building applications to work with OpenStack -description-file = - README.rst -author = OpenStack -author-email = openstack-dev@lists.openstack.org -home-page = http://developer.openstack.org/sdks/python/openstacksdk/ -classifier = - Environment :: OpenStack - Intended Audience :: Information Technology - Intended Audience :: System Administrators - License :: OSI Approved :: Apache Software License - Operating System :: POSIX :: Linux - Programming Language :: Python - Programming Language :: Python :: 2 - Programming Language :: Python :: 2.7 - Programming Language :: Python :: 3 - Programming Language :: Python :: 3.3 - Programming Language :: Python :: 3.4 - Programming Language :: Python :: 3.5 - -[files] -packages = - openstack - -[build_sphinx] -source-dir = doc/source -build-dir = doc/build -all_files = 1 - -[upload_sphinx] -upload-dir = doc/build/html - -[compile_catalog] -directory = openstack/locale -domain = python-openstacksdk - -[update_catalog] -domain = python-openstacksdk -output_dir = openstack/locale -input_file = openstack/locale/python-openstacksdk.pot - -[extract_messages] -keywords = _ gettext ngettext l_ lazy_gettext -mapping_file = babel.cfg -output_file = openstack/locale/python-openstacksdk.pot - -[wheel] -universal = 1 diff --git a/setup.py b/setup.py index 782bb21f06..83c92e22c8 100644 --- a/setup.py +++ b/setup.py @@ -16,14 +16,4 @@ # THIS FILE IS MANAGED BY THE GLOBAL REQUIREMENTS REPO - DO NOT EDIT import setuptools -# In python < 2.7.4, a lazy loading of package `pbr` will break -# setuptools if some other modules registered functions in `atexit`. -# solution from: http://bugs.python.org/issue15881#msg170215 -try: - import multiprocessing # noqa -except ImportError: - pass - -setuptools.setup( - setup_requires=['pbr>=1.8'], - pbr=True) +setuptools.setup(setup_requires=['pbr>=2.0.0'], pbr=True) diff --git a/test-requirements.txt b/test-requirements.txt index 75e7af76d2..846753c015 100644 --- a/test-requirements.txt +++ b/test-requirements.txt @@ -1,18 +1,14 @@ -# The order of packages is significant, because pip processes them in the order -# of appearance. Changing the order has an impact on the overall integration -# process, which may cause wedges in the gate later. -hacking<0.11,>=0.10.0 - -coverage>=4.0 # Apache-2.0 -discover # BSD +coverage!=4.4,>=4.0 # Apache-2.0 +ddt>=1.0.1 # MIT fixtures>=3.0.0 # Apache-2.0/BSD -mock>=2.0 # BSD -python-subunit>=0.0.18 # Apache-2.0/BSD -openstackdocstheme>=1.5.0 # Apache-2.0 -os-testr>=0.8.0 # Apache-2.0 -requests!=2.12.2,>=2.10.0 # Apache-2.0 -requests-mock>=1.1 # Apache-2.0 -sphinx!=1.3b1,<1.4,>=1.2.1 # BSD -testrepository>=0.0.18 # Apache-2.0/BSD +hacking>=7.0.0,<7.1.0 # Apache-2.0 +jsonschema>=3.2.0 # MIT +oslo.config>=6.1.0 # Apache-2.0 +oslotest>=3.2.0 # Apache-2.0 +prometheus-client>=0.4.2 # Apache-2.0 +requests-mock>=1.2.0 # Apache-2.0 +statsd>=3.3.0 +stestr>=1.0.0 # Apache-2.0 testscenarios>=0.4 # Apache-2.0/BSD -testtools>=1.4.0 # MIT +testtools>=2.2.0 # MIT +keyring>=24.0.0 # MIT diff --git a/tools/keystone_version.py b/tools/keystone_version.py new file mode 100644 index 0000000000..fc08bbf787 --- /dev/null +++ b/tools/keystone_version.py @@ -0,0 +1,92 @@ +# Copyright (c) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import pprint +import sys +from urllib import parse as urlparse + +import openstack.config + + +def print_versions(r): + if 'version' in r: + for version in r['version']: + print_version(version) + if 'values' in r: + for version in r['values']: + print_version(version) + if isinstance(r, list): + for version in r: + print_version(version) + + +def print_version(version): + if version['status'] in ('CURRENT', 'stable'): + print( + "\tVersion ID: {id} updated {updated}".format( + id=version.get('id'), updated=version.get('updated') + ) + ) + + +verbose = '-v' in sys.argv +ran = [] +for cloud in openstack.config.OpenStackConfig().get_all_clouds(): + if cloud.name in ran: + continue + ran.append(cloud.name) + # We don't actually need a compute client - but we'll be getting full urls + # anyway. Without this SSL cert info becomes wrong. + c = cloud.get_session_client('compute') + endpoint = cloud.config['auth']['auth_url'] + try: + print(endpoint) + r = c.get(endpoint).json() + if verbose: + pprint.pprint(r) + except Exception as e: + print(f"Error with {cloud.name}: {e!s}") + continue + if 'version' in r: + print_version(r['version']) + url = urlparse.urlparse(endpoint) + parts = url.path.split(':') + if len(parts) == 2: + path, port = parts + else: + path = url.path + port = None + stripped = path.rsplit('/', 2)[0] + if port: + stripped = f'{stripped}:{port}' + endpoint = urlparse.urlunsplit( + (url.scheme, url.netloc, stripped, url.params, url.query) + ) + print(f" also {endpoint}") + try: + r = c.get(endpoint).json() + if verbose: + pprint.pprint(r) + except Exception: + print("\tUnauthorized") + continue + if 'version' in r: + print_version(r) + elif 'versions' in r: + print_versions(r['versions']) + else: + print(f"\n\nUNKNOWN\n\n{r}") + else: + print_versions(r['versions']) diff --git a/tools/nova_version.py b/tools/nova_version.py new file mode 100644 index 0000000000..65546a9f4c --- /dev/null +++ b/tools/nova_version.py @@ -0,0 +1,63 @@ +# Copyright (c) 2017 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. +# You may obtain a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, +# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or +# implied. +# See the License for the specific language governing permissions and +# limitations under the License. + +import openstack.config + +ran = [] +for cloud in openstack.config.OpenStackConfig().get_all_clouds(): + if cloud.name in ran: + continue + ran.append(cloud.name) + c = cloud.get_session_client('compute') + try: + raw_endpoint = c.get_endpoint() + have_current = False + if raw_endpoint is None: + raise Exception('endpoint was empty') + endpoint = raw_endpoint.rsplit('/', 2)[0] + print(endpoint) + r = c.get(endpoint).json() + except Exception: + print(f"Error with {cloud.name}") + continue + for version in r['versions']: + if version['status'] == 'CURRENT': + have_current = True + print( + "\tVersion ID: {id} updated {updated}".format( + id=version.get('id'), updated=version.get('updated') + ) + ) + print("\tVersion Max: {max}".format(max=version.get('version'))) + print( + "\tVersion Min: {min}".format(min=version.get('min_version')) + ) + if not have_current: + for version in r['versions']: + if version['status'] == 'SUPPORTED': + have_current = True + print( + "\tVersion ID: {id} updated {updated}".format( + id=version.get('id'), updated=version.get('updated') + ) + ) + print( + "\tVersion Max: {max}".format(max=version.get('version')) + ) + print( + "\tVersion Min: {min}".format( + min=version.get('min_version') + ) + ) diff --git a/tools/print-services.py b/tools/print-services.py new file mode 100644 index 0000000000..9cbcc5bc26 --- /dev/null +++ b/tools/print-services.py @@ -0,0 +1,132 @@ +# Copyright 2018 Red Hat, Inc. +# +# Licensed under the Apache License, Version 2.0 (the "License"); you may +# not use this file except in compliance with the License. You may obtain +# a copy of the License at +# +# http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software +# distributed under the License is distributed on an "AS IS" BASIS, WITHOUT +# WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. See the +# License for the specific language governing permissions and limitations +# under the License. + +import importlib +import warnings + +import os_service_types + +from openstack import _log +from openstack import service_description + +_logger = _log.setup_logging('openstack') +_service_type_manager = os_service_types.ServiceTypes() + + +def make_names(): + imports = ['from openstack import service_description'] + services = [] + + for service in _service_type_manager.services: + service_type = service['service_type'] + if service_type == 'ec2-api': + # NOTE(mordred) It doesn't make any sense to use ec2-api + # from openstacksdk. The credentials API calls are all calls + # on identity endpoints. + continue + desc_class = _find_service_description_class(service_type) + + st = service_type.replace('-', '_') + + if desc_class.__module__ != 'openstack.service_description': + base_mod, dm = desc_class.__module__.rsplit('.', 1) + imports.append(f'from {base_mod} import {dm}') + else: + dm = 'service_description' + + dc = desc_class.__name__ + if dc == 'ServiceDescription': + dc = '{dc}[proxy.Proxy]' + + services.append( + f"{st} = {dm}.{dc}(service_type='{service_type}')", + ) + + # Register the descriptor class with every known alias. Don't + # add doc strings though - although they are supported, we don't + # want to give anybody any bad ideas. Making a second descriptor + # does not introduce runtime cost as the descriptors all use + # the same _proxies dict on the instance. + for alias_name in _get_aliases(st): + if alias_name[-1].isdigit(): + continue + services.append(f'{alias_name} = {st}') + services.append('') + print("# Generated file, to change, run tools/print-services.py") + for imp in sorted(imports): + print(imp) + print('\n') + print("class ServicesMixin:\n") + for attr in services: + if attr: + print(f" {attr}") + else: + print() + + +def _get_aliases(service_type, aliases=None): + # We make connection attributes for all official real type names + # and aliases. Three services have names they were called by in + # openstacksdk that are not covered by Service Types Authority aliases. + # Include them here - but take heed, no additional values should ever + # be added to this list. + # that were only used in openstacksdk resource naming. + LOCAL_ALIASES = { + 'baremetal': 'bare_metal', + 'block_storage': 'block_store', + 'clustering': 'cluster', + } + all_types = set(_service_type_manager.get_aliases(service_type)) + if aliases: + all_types.update(aliases) + if service_type in LOCAL_ALIASES: + all_types.add(LOCAL_ALIASES[service_type]) + all_aliases = set() + for alias in all_types: + all_aliases.add(alias.replace('-', '_')) + return all_aliases + + +def _find_service_description_class(service_type): + package_name = f'openstack.{service_type}'.replace('-', '_') + module_name = service_type.replace('-', '_') + '_service' + class_name = ''.join( + [part.capitalize() for part in module_name.split('_')] + ) + + # We have some exceptions :( + # This should have been called 'shared-filesystem' + if service_type == 'shared-file-system': + class_name = 'SharedFilesystemService' + + try: + import_name = '.'.join([package_name, module_name]) + service_description_module = importlib.import_module(import_name) + except ImportError as e: + # ImportWarning is ignored by default. This warning is here + # as an opt-in for people trying to figure out why something + # didn't work. + warnings.warn( + f"Could not import {service_type} service description: {e!s}", + ImportWarning, + ) + return service_description.ServiceDescription + + # There are no cases in which we should have a module but not the class + # inside it. + service_description_class = getattr(service_description_module, class_name) + return service_description_class + + +make_names() diff --git a/tox.ini b/tox.ini index ecb18c79c7..883fc19d49 100644 --- a/tox.ini +++ b/tox.ini @@ -1,49 +1,175 @@ [tox] -minversion = 1.6 -envlist = py35,py34,py27,pypy,pep8 -skipsdist = True +minversion = 4.3.0 +envlist = pep8,py3 [testenv] -usedevelop = True -install_command = pip install -U {opts} {packages} +description = + Run unit tests. +passenv = + OS_* + OPENSTACKSDK_* setenv = - VIRTUAL_ENV={envdir} -deps = -r{toxinidir}/test-requirements.txt -commands = ostestr {posargs} + LANG=en_US.UTF-8 + LANGUAGE=en_US:en + LC_ALL=C + OS_LOG_CAPTURE={env:OS_LOG_CAPTURE:true} + OS_STDOUT_CAPTURE={env:OS_STDOUT_CAPTURE:true} + OS_STDERR_CAPTURE={env:OS_STDERR_CAPTURE:true} +deps = + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/test-requirements.txt + -r{toxinidir}/requirements.txt +commands = + stestr run {posargs} + stestr slowest -[testenv:examples] -setenv = OS_TEST_PATH=./openstack/tests/examples -passenv = OS_* - -[functionalbase] -setenv = OS_TEST_PATH=./openstack/tests/functional -passenv = OS_* - -[testenv:functional] -basepython = python2.7 -setenv = {[functionalbase]setenv} -passenv = {[functionalbase]passenv} +[testenv:functional{,-py39,-py310,-py311,-py312}] +description = + Run functional tests. +# Some jobs (especially heat) takes longer, therefore increase default timeout +# This timeout should not be smaller, than the longest individual timeout +setenv = + {[testenv]setenv} + OS_TEST_TIMEOUT=600 + OPENSTACKSDK_FUNC_TEST_TIMEOUT_LOAD_BALANCER=600 + OPENSTACKSDK_EXAMPLE_CONFIG_KEY=functional + OPENSTACKSDK_FUNC_TEST_TIMEOUT_PROJECT_CLEANUP=1200 +commands = + stestr --test-path ./openstack/tests/functional/{env:OPENSTACKSDK_TESTS_SUBDIR:} run --serial {posargs} + stestr slowest -[testenv:functional3] -basepython = python3.4 -setenv = {[functionalbase]setenv} -passenv = {[functionalbase]passenv} +# Acceptance tests are the ones running on real clouds +[testenv:acceptance-regular-user] +description = + Run acceptance tests. +# This env intends to test functions of a regular user without admin privileges +# Some jobs (especially heat) takes longer, therefore increase default timeout +# This timeout should not be smaller, than the longest individual timeout +setenv = + {[testenv]setenv} + OS_TEST_TIMEOUT=600 + OPENSTACKSDK_FUNC_TEST_TIMEOUT_LOAD_BALANCER=600 + # OPENSTACKSDK_DEMO_CLOUD and OS_CLOUD should point to the cloud to test + # Othee clouds are explicitly set empty to let tests detect absense + OPENSTACKSDK_DEMO_CLOUD_ALT= + OPENSTACKSDK_OPERATOR_CLOUD= +commands = + stestr --test-path ./openstack/tests/functional/{env:OPENSTACKSDK_TESTS_SUBDIR:} run --serial {posargs} --include-list include-acceptance-regular-user.txt + stestr slowest [testenv:pep8] -commands = flake8 +description = + Run style checks. +deps = + pre-commit + {[testenv:mypy]deps} +commands = + pre-commit run -a + {[testenv:mypy]commands} + +[testenv:mypy] +description = + Run type checks. +deps = + {[testenv]deps} + mypy + types-decorator + types-jmespath + types-PyYAML + types-requests + types-simplejson +commands = + mypy --cache-dir="{envdir}/mypy_cache" {posargs:openstack} [testenv:venv] +description = + Run specified command in a virtual environment with all dependencies installed. +deps = + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/test-requirements.txt + -r{toxinidir}/requirements.txt + -r{toxinidir}/doc/requirements.txt commands = {posargs} -; If this fails for you, you may be running an old version of tox. -; Run 'pip install tox' to install a newer version of tox. +[testenv:debug] +description = + Run specified tests through oslo_debug_helper, which allows use of pdb. +# allow 1 year, or 31536000 seconds, to debug a test before it times out +setenv = + OS_TEST_TIMEOUT=31536000 +allowlist_externals = find +commands = + find . -type f -name "*.pyc" -delete + oslo_debug_helper -t openstack/tests {posargs} + [testenv:cover] -commands = python setup.py test --coverage --coverage-package-name=openstack --testr-args='{posargs}' +description = + Run unit tests and generate coverage report. +setenv = + {[testenv]setenv} + PYTHON=coverage run --source openstack --parallel-mode +commands = + stestr run {posargs} + coverage combine + coverage html -d cover + coverage xml -o cover/coverage.xml + +[testenv:ansible] +description = + Run ansible tests. +# Need to pass some env vars for the Ansible playbooks +passenv = + HOME + USER + ANSIBLE_VAR_* +deps = + {[testenv]deps} + ansible +commands = {toxinidir}/extras/run-ansible-tests.sh -e {envdir} {posargs} [testenv:docs] -commands = python setup.py build_sphinx +description = + Build documentation in HTML format. +deps = + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/doc/requirements.txt +commands = + sphinx-build -W --keep-going -b html -j auto doc/source/ doc/build/html + +[testenv:pdf-docs] +description = + Build documentation in PDF format. +deps = {[testenv:docs]deps} +allowlist_externals = + make +commands = + sphinx-build -W --keep-going -b latex -j auto doc/source/ doc/build/pdf + make -C doc/build/pdf + +[testenv:releasenotes] +description = + Build release note documentation in HTML format. +deps = + -c{env:TOX_CONSTRAINTS_FILE:https://releases.openstack.org/constraints/upper/master} + -r{toxinidir}/doc/requirements.txt +commands = + sphinx-build -W --keep-going -b html -j auto releasenotes/source releasenotes/build/html [flake8] -ignore=D100,D101,D102,D103,D104,D105,D200,D202,D204,D205,D211,D301,D400,D401 +# We only enable the hacking (H) and openstacksdk (O) checks +select = H,O +# H301 Black will put commas after imports that can't fit on one line +# H404 Docstrings don't always start with a newline +# H405 Multiline docstrings are okay +ignore = H301,H403,H404,H405 show-source = True -exclude=.venv,.git,.tox,dist,doc,*lib/python*,*egg,build +exclude = .venv,.git,.tox,dist,doc,*lib/python*,*egg,build,openstack/_services_mixin.py + +[flake8:local-plugins] +extension = + O300 = checks:assert_no_setupclass + O310 = checks:assert_no_deprecated_exceptions +paths = ./openstack/_hacking + +[doc8] +extensions = .rst, .yaml diff --git a/zuul.d/acceptance-jobs.yaml b/zuul.d/acceptance-jobs.yaml new file mode 100644 index 0000000000..3a94fdc920 --- /dev/null +++ b/zuul.d/acceptance-jobs.yaml @@ -0,0 +1,156 @@ +--- +- semaphore: + name: acceptance-cleura + max: 1 + +- job: + name: openstacksdk-acceptance-base + parent: openstack-tox + description: | + Acceptance test of the OpenStackSDK on real clouds. + + .. zuul:jobsvar::openstack_credentials + :type: dict + + This is expected to be a Zuul Secret with these keys: + + .. zuul:jobvar: auth + :type: dict + + Dictionary with authentication information with mandatory auth_url + and others. The structure mimics `clouds.yaml` structure. + + By default all jobs that inherit from here are non voting. + + attempts: 1 + voting: false + timeout: 3600 + pre-run: + - playbooks/acceptance/pre.yaml + post-run: + - playbooks/acceptance/post.yaml + vars: + tox_envlist: acceptance-regular-user + tox_environment: + OPENSTACKSDK_DEMO_CLOUD: acceptance + OS_CLOUD: acceptance + OS_TEST_CLOUD: acceptance + +# Acceptance tests for devstack are different from running for real cloud since +# we need to actually deploy devstack first and API is available only on the +# devstack host. +- job: + name: openstacksdk-acceptance-devstack + parent: openstacksdk-functional-devstack + description: Acceptance test of the OpenStackSDK on real clouds. + attempts: 1 + run: + - playbooks/acceptance/run-with-devstack.yaml + post-run: + - playbooks/acceptance/post.yaml + vars: + tox_envlist: acceptance-regular-user + tox_environment: + OPENSTACKSDK_DEMO_CLOUD: acceptance + OS_CLOUD: acceptance + OS_TEST_CLOUD: acceptance + auth_url: "https://{{ hostvars['controller']['nodepool']['private_ipv4'] }}/identity" + secrets: + - secret: credentials-devstack + name: openstack_credentials + +- job: + name: openstacksdk-acceptance-cleura + parent: openstacksdk-acceptance-base + description: Acceptance tests of the OpenStackSDK on Cleura + semaphores: + - name: acceptance-cleura + secrets: + - secret: credentials-cleura + name: openstack_credentials + pass-to-parent: true + +# Devstack secret is not specifying auth_url because of how Zuul treats secrets. +# Auth_url comes extra in the job vars and is being used if no auth_url in the +# secret is present. +- secret: + name: credentials-devstack + data: + auth: + username: demo + password: secretadmin + project_domain_id: default + project_name: demo + user_domain_id: default + region_name: RegionOne + verify: false + +# Contact: tobias [xdot] rydberg [mat] citynetwork [xdot] eu +- secret: + name: credentials-cleura + data: + auth: + auth_url: https://fra1.citycloud.com:5000 + user_domain_name: !encrypted/pkcs1-oaep + - B2+GBOl0HqQJ0umGR/8y6Y1SS+O6h7OK6rTa54797UavexKVxx2RZ144wPmW+IogX2QU2 + tWtGBveQnZTpI19nxlnLmQQA+YSz8RIzJoFuStBmiITyCHQnvRJPc7kObjnZJLuoVwCT2 + Rl3u1iGzJb/ZZvVDjvYH2ZW7a6aH+Ct7HfB+CGhvhETeoMAFDgb29QJ5U/T3OkVdTMwCY + XDtdwg2JvoErd2gnNCqYDcIiOMO6lXKcc+35VQtGMGfoaUvu+iMlEi9pJqbdVd7qz5lgY + AWBPG1mYt1mOaP8RRvzywhyRPnnnFgfUe2rf2ZozEUa7j4ObwXt7D8oRYXm+USEpk+YfD + 9V3CvGvAgmPuuidGWwnZdPcNX/w/VW5p9oWRgJFYChb5+XCu7y0tFJX/usduZEY9/MvJs + Iv0+OFf1TXc29qFqwGYVSyfimBroGFdYXmHSwK7wHJ1GUsdSRhQz4eYIdk+6c4LNx9JgO + 5Z+3Q29tlh9WwuuQKE/JlKJ/1I9LC0RmyJyxSaiTLDiL+7J2O/hULmyZimbXVcYuXqDdo + KAdPryYhmWWyBFkZfUa88GxwVf+WDLQqXhv+CDGRusbW2opVvv6p7NUwLh9PPOGnRLsS2 + y1fZDVtz60ZMp8MQPACYjlzvc2lF5Z1Cvskr3O9KbT27V7AyLXmU+tbMrDLpC0= + project_domain_name: !encrypted/pkcs1-oaep + - B2+GBOl0HqQJ0umGR/8y6Y1SS+O6h7OK6rTa54797UavexKVxx2RZ144wPmW+IogX2QU2 + tWtGBveQnZTpI19nxlnLmQQA+YSz8RIzJoFuStBmiITyCHQnvRJPc7kObjnZJLuoVwCT2 + Rl3u1iGzJb/ZZvVDjvYH2ZW7a6aH+Ct7HfB+CGhvhETeoMAFDgb29QJ5U/T3OkVdTMwCY + XDtdwg2JvoErd2gnNCqYDcIiOMO6lXKcc+35VQtGMGfoaUvu+iMlEi9pJqbdVd7qz5lgY + AWBPG1mYt1mOaP8RRvzywhyRPnnnFgfUe2rf2ZozEUa7j4ObwXt7D8oRYXm+USEpk+YfD + 9V3CvGvAgmPuuidGWwnZdPcNX/w/VW5p9oWRgJFYChb5+XCu7y0tFJX/usduZEY9/MvJs + Iv0+OFf1TXc29qFqwGYVSyfimBroGFdYXmHSwK7wHJ1GUsdSRhQz4eYIdk+6c4LNx9JgO + 5Z+3Q29tlh9WwuuQKE/JlKJ/1I9LC0RmyJyxSaiTLDiL+7J2O/hULmyZimbXVcYuXqDdo + KAdPryYhmWWyBFkZfUa88GxwVf+WDLQqXhv+CDGRusbW2opVvv6p7NUwLh9PPOGnRLsS2 + y1fZDVtz60ZMp8MQPACYjlzvc2lF5Z1Cvskr3O9KbT27V7AyLXmU+tbMrDLpC0= + project_name: !encrypted/pkcs1-oaep + - IRSHyf964g3q7vHY08reyx69cGDLG/+kkEnZ4fs4qiwBw1RL1wKW3r3Omi1PLXDHHCHfC + jlRrwvZh80CzG3nqt94WSiASjn4XvZtCV0++UZxCkdEs/2SXN1YYpBGLqotM91NhQHCpo + Xu6KD7U8ckZgjAQFzV/rF7pnFSvzb14PQqBiQ4Ei7nFyrg6sW20ratjC+pBboUORPvPjG + wuY/lt8kRXYnPlI/oeFngXMl/WD7z5k0kLwUcg/z9x3uF6b6xozR8Vzjal13RR7FU5Tu7 + T5Qr8uREPHlK8aU90XnNrlJqIAfIFuAlmZCeckIMlVqGjGBekI2W/zPXhL/SjR2SNeTIl + SwKfInnT0SfGqKTAjgPJAocZSNppt4ql1EsS3Rdp8SQ0EGW7pXs73svexNRhh4k1m7gM1 + 54OoyS2wtMaTR3Q3L92ZuT2DdxmPbvXThbRO5P2g0yDpp/HuWkQyHq9b1tZD+p7akU7p+ + g8fIQFKFueFP0T6XszQSPySjjaTZOWd0CQC2oTlivcf7oZ4etp22Zh7IDCXWLX39C2LkF + XLBaEa9LRxn1UwJ2bz2nUPjqDsOz2nRskC9Yz0XOOEKMokJ4POj+uac1iRfAf+hAGd9uE + 7rNIp/7oV5ABOimJ5bgCI1SWAsz2F1lRq+bulzbONLmWfPik52bo/elXTxRais= + username: !encrypted/pkcs1-oaep + - bTHRzdAYEKXeFhrU3sBRN19ygO2t2zzXdeuB4DQq7Q+7VW7Apo8Vo1eaqpqjUnpI2jPG+ + DJSg0ZG3tUsnRwwKo3N8RzwFNWj5wcUEtHjmFgMmLBvlv9Jv6OeN7R7AH7b21agTMTvwz + X7hGWbYSEgDLn80uNTwcm4GVA0mycXDtIvZ4lPiCGkUJYav9++YbGYzDyiy2pBgVU0r5G + 7GTO+cHQWUw+LL/scBijL4khLIxiHNgUNNfgAYOI/JQ720DxXSDF30SN8fRy0H0jl54Gr + w0exl9QPBjI+o+qvFKq2Bni8dTp96MaC8pDxP/1/R8mEMYD2Ei3Ame1dfeUz7OgrQfpQv + xlDSE/sM2/g0PG3YlpG+aCllZ1el2qM/B+pyq5JXf26swp1RdjehvUSIi3gQaqkC3qpRt + 2FgZDKdHW6PYRmRlCphS5WK1otdCQEvyJ+s4QB4PooMcD4rqAf5hURGd5zr/aajqmEgXX + eJKeLQrQD+4yJWeopcq7a66R3LeL07Dko2LWWlL6adGeQ5yd3eIZK7zwObTVE64DSbXDs + 3UI8U6Qa3EMlrfEk8TXcK1QW2EM1JFiPBSm9e8zojTtg/caAyROXgn1T9qv0FKMcJZrOo + Qt+n7vv1wkCSUoUEQFIadcMUn5EoXeTcRbjAOsRFN/OOh6+4jyNTh17cOC5dkc= + password: !encrypted/pkcs1-oaep + - FbeRKkCs2YlDYm944EUuUbY2mVcTwSgE00gMZokmXR2WjKqRsuLFpkOe9opndwqV1tUyj + mxAGizoGlzI+Lg8VnS47zShM+UqgaNzC148iY+WBuLXAEoxS3c9Gxz03Gm/Q2Tu6MJoCG + OY8JvQkq+pjwkV61sIawTfQRTZkwjFO8F/viSOuF75PDZthY5SuMN5MEJ8B8Ska0WNbjw + Edo623gZnyZsPvZwnqnP+yK0HW0smohKkvjHPZ5SGFiQ0G3eTSHaL5wrYWbkcZ5Gb4UgX + x1edebv0ata0fZ8nhIwTrDIVe9icuijuV1ZkvHMGPvB50fkup4/QyObx6QUhL6D0mXaK5 + fIq+dgrzkvcoODrwpXvBVxjNYnM+DBeMbN0V8d4vDvsRPsWCxIenETse1gD0PJyXx29br + /Vild1xO1JnxoU469fl/gzdntyoV/QaLDteLKMFJISAFuVrcCEUz63s37iKAy6LnCtv/J + PjciFvc2OR0cGUC/an3xtmqi18GWcWdinaBA0+OEnArdOdSc79MTZnMifICAeCQ3yiEnA + 001hbBrRYTHgitpo4gYJOFMVufhcfvq6yB9wi3MqvpKP8wGH2SyNz7y5Gy9zbUgQFsRP7 + 2h3LRDRCVGYBVgBLD5mcIMn93HddOko8Q8RO8qVZM13R39dgGAi0KMEhF3bpjA= + +# We define additional project entity not to handle acceptance jobs in +# already complex enough general project entity. +- project: + post-review: + jobs: + - openstacksdk-acceptance-devstack + - openstacksdk-acceptance-cleura diff --git a/zuul.d/functional-jobs.yaml b/zuul.d/functional-jobs.yaml new file mode 100644 index 0000000000..70c613bf62 --- /dev/null +++ b/zuul.d/functional-jobs.yaml @@ -0,0 +1,443 @@ +--- +# Definitions of functional jobs +- job: + name: openstacksdk-functional-devstack-minimum + parent: devstack-tox-functional + description: | + Minimum job for devstack-based functional tests + post-run: playbooks/devstack/post.yaml + roles: + # NOTE: We pull in roles from the tempest repo for stackviz processing. + - zuul: opendev.org/openstack/tempest + required-projects: + - name: openstack/openstacksdk + - name: openstack/os-client-config + timeout: 9000 + vars: + devstack_localrc: + Q_ML2_PLUGIN_EXT_DRIVERS: qos,port_security + Q_AGENT: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch + devstack_services: + # OVN services + ovn-controller: false + ovn-northd: false + ovs-vswitchd: false + ovsdb-server: false + q-ovn-metadata-agent: false + # Neutron services + q-agt: true + q-dhcp: true + q-l3: true + q-metering: true + q-svc: true + # sdk doesn't need vnc access + n-novnc: false + # sdk testing uses config drive only + n-api-meta: false + q-meta: false + tox_environment: + # Do we really need to set this? It's cargo culted + PYTHONUNBUFFERED: 'true' + # Is there a way we can query the localconf variable to get these + # rather than setting them explicitly? + OPENSTACKSDK_HAS_DESIGNATE: 0 + OPENSTACKSDK_HAS_HEAT: 0 + OPENSTACKSDK_HAS_MAGNUM: 0 + OPENSTACKSDK_HAS_NEUTRON: 1 + OPENSTACKSDK_HAS_SWIFT: 1 + tox_install_siblings: false + tox_envlist: functional + zuul_copy_output: + '{{ ansible_user_dir }}/stackviz': logs + zuul_work_dir: src/opendev.org/openstack/openstacksdk + +- job: + name: openstacksdk-functional-devstack-base + parent: openstacksdk-functional-devstack-minimum + description: | + Base job for devstack-based functional tests + vars: + devstack_plugins: + neutron: https://opendev.org/openstack/neutron + devstack_local_conf: + post-config: + $CINDER_CONF: + DEFAULT: + osapi_max_limit: 6 + +- job: + name: openstacksdk-functional-devstack + parent: openstacksdk-functional-devstack-base + description: | + Run openstacksdk functional tests against a master devstack + required-projects: + - openstack/heat + vars: + devstack_localrc: + DISABLE_AMP_IMAGE_BUILD: true + Q_SERVICE_PLUGIN_CLASSES: qos,trunk + # TODO(frickler): drop this once heat no longer needs it + KEYSTONE_ADMIN_ENDPOINT: true + devstack_plugins: + heat: https://opendev.org/openstack/heat + tox_environment: + OPENSTACKSDK_HAS_HEAT: 1 + devstack_services: + neutron-qos: true + neutron-trunk: true + neutron-port-forwarding: true + +- job: + name: openstacksdk-functional-devstack-networking + parent: openstacksdk-functional-devstack + description: | + Run openstacksdk functional tests against a devstack with advanced + networking services enabled. + required-projects: + - openstack/designate + - openstack/octavia + vars: + configure_swap_size: 8192 + devstack_local_conf: + post-config: + $OCTAVIA_CONF: + DEFAULT: + debug: true + controller_worker: + amphora_driver: amphora_noop_driver + compute_driver: compute_noop_driver + network_driver: network_noop_driver + certificates: + cert_manager: local_cert_manager + devstack_localrc: + Q_SERVICE_PLUGIN_CLASSES: qos,trunk + devstack_plugins: + designate: https://opendev.org/openstack/designate + octavia: https://opendev.org/openstack/octavia + devstack_services: + designate: true + octavia: true + o-api: true + o-cw: true + o-hm: true + o-hk: true + neutron-dns: true + s-account: false + s-container: false + s-object: false + s-proxy: false + h-eng: false + h-api: false + h-api-cfn: false + tox_environment: + OPENSTACKSDK_HAS_DESIGNATE: 1 + OPENSTACKSDK_HAS_SWIFT: 0 + OPENSTACKSDK_HAS_HEAT: 0 + +- job: + name: openstacksdk-functional-devstack-networking-ext + parent: openstacksdk-functional-devstack-networking + description: | + Run openstacksdk functional tests against a devstack with super advanced + networking services enabled (VPNaas, FWaas) which still require ovs. + required-projects: + - openstack/neutron-fwaas + - openstack/neutron-vpnaas + - openstack/tap-as-a-service + vars: + INSTALL_OVN: False + configure_swap_size: 8192 + devstack_local_conf: + post-config: + $OCTAVIA_CONF: + DEFAULT: + debug: true + controller_worker: + amphora_driver: amphora_noop_driver + compute_driver: compute_noop_driver + network_driver: network_noop_driver + certificates: + cert_manager: local_cert_manager + $NEUTRON_CONF: + DEFAULT: + router_distributed: True + l3_ha: True + "/$NEUTRON_CORE_PLUGIN_CONF": + ovs: + tunnel_bridge: br-tun + bridge_mappings: public:br-ex + $NEUTRON_L3_CONF: + DEFAULT: + agent_mode: dvr_snat + agent: + availability_zone: nova + debug_iptables_rules: True + $NEUTRON_DHCP_CONF: + agent: + availability_zone: nova + devstack_localrc: + Q_SERVICE_PLUGIN_CLASSES: qos,trunk,taas + NETWORK_API_EXTENSIONS: "agent,binding,dhcp_agent_scheduler,external-net,ext-gw-mode,extra_dhcp_opts,quotas,router,security-group,subnet_allocation,network-ip-availability,auto-allocated-topology,timestamp_core,tag,service-type,rbac-policies,standard-attr-description,pagination,sorting,project-id,fwaas_v2,vpnaas,taas,tap_mirror" + Q_AGENT: openvswitch + Q_ML2_TENANT_NETWORK_TYPE: vxlan + Q_ML2_PLUGIN_MECHANISM_DRIVERS: openvswitch + IPSEC_PACKAGE: libreswan + TAAS_SERVICE_DRIVER: TAAS:TAAS:neutron_taas.services.taas.service_drivers.taas_rpc.TaasRpcDriver:default + devstack_plugins: + designate: https://opendev.org/openstack/designate + octavia: https://opendev.org/openstack/octavia + neutron-fwaas: https://opendev.org/openstack/neutron-fwaas.git + neutron-vpnaas: https://opendev.org/openstack/neutron-vpnaas.git + tap-as-a-service: https://opendev.org/openstack/tap-as-a-service.git + devstack_services: + designate: true + octavia: true + o-api: true + o-cw: true + o-hm: true + o-hk: true + neutron-dns: true + s-account: false + s-container: false + s-object: false + s-proxy: false + h-eng: false + h-api: false + h-api-cfn: false + q-fwaas-v2: true + taas: true + tap_mirror: true + tox_environment: + OPENSTACKSDK_HAS_DESIGNATE: 1 + OPENSTACKSDK_HAS_SWIFT: 0 + OPENSTACKSDK_HAS_HEAT: 0 + +- job: + name: openstacksdk-functional-devstack-tips + parent: openstacksdk-functional-devstack + description: | + Run openstacksdk functional tests with tips of library dependencies + against a master devstack. + required-projects: + - openstack/keystoneauth + - openstack/openstacksdk + - openstack/os-client-config + vars: + tox_install_siblings: true + +- job: + name: openstacksdk-functional-devstack-magnum + parent: openstacksdk-functional-devstack + description: | + Run openstacksdk functional tests against a master devstack with magnum + required-projects: + - openstack/magnum + - openstack/python-magnumclient + vars: + devstack_plugins: + magnum: https://opendev.org/openstack/magnum + devstack_localrc: + MAGNUM_GUEST_IMAGE_URL: https://tarballs.openstack.org/magnum/images/fedora-atomic-f23-dib.qcow2 + MAGNUM_IMAGE_NAME: fedora-atomic-f23-dib + devstack_services: + s-account: false + s-container: false + s-object: false + s-proxy: false + tox_environment: + OPENSTACKSDK_HAS_SWIFT: 0 + OPENSTACKSDK_HAS_MAGNUM: 1 + +- job: + name: openstacksdk-functional-devstack-ironic + parent: openstacksdk-functional-devstack-minimum + description: | + Run openstacksdk functional tests against a master devstack with ironic + required-projects: + - openstack/ironic + - openstack/ironic-python-agent-builder + vars: + devstack_localrc: + OVERRIDE_PUBLIC_BRIDGE_MTU: 1400 + IRONIC_BAREMETAL_BASIC_OPS: true + IRONIC_BUILD_DEPLOY_RAMDISK: false + IRONIC_CALLBACK_TIMEOUT: 600 + IRONIC_DEPLOY_DRIVER: ipmi + IRONIC_VM_COUNT: 2 + IRONIC_VM_LOG_DIR: '{{ devstack_base_dir }}/ironic-bm-logs' + IRONIC_VM_SPECS_RAM: 2500 + devstack_plugins: + ironic: https://opendev.org/openstack/ironic + devstack_services: + c-api: false + c-bak: false + c-sch: false + c-vol: false + cinder: false + s-account: false + s-container: false + s-object: false + s-proxy: false + n-api: false + n-api-meta: false + n-cond: false + n-cpu: false + n-novnc: false + n-sch: false + nova: false + placement-api: false + dstat: false + tox_environment: + OPENSTACKSDK_HAS_IRONIC: 1 + # NOTE(dtantsur): this job cannot run many regular tests (e.g. compute + # tests will take too long), so limiting it to baremetal tests only. + OPENSTACKSDK_TESTS_SUBDIR: baremetal + zuul_copy_output: + '{{ devstack_base_dir }}/ironic-bm-logs': logs + +- job: + name: openstacksdk-ansible-functional-devstack + parent: openstacksdk-functional-devstack + description: | + Run openstacksdk ansible functional tests against a master devstack + using released version of ansible. + vars: + tox_envlist: ansible + +- job: + name: openstacksdk-ansible-stable-2.8-functional-devstack + parent: openstacksdk-ansible-functional-devstack + description: | + Run openstacksdk ansible functional tests against a master devstack + using git stable-2.8 branch version of ansible. + required-projects: + - name: github.com/ansible/ansible + override-checkout: stable-2.8 + - name: openstack/openstacksdk + override-checkout: master + - name: openstack/devstack + override-checkout: master + vars: + # test-matrix grabs branch from the zuul branch setting. If the job + # is triggered by ansible, that branch will be devel which doesn't + # make sense to devstack. Override so that we run the right thing. + test_matrix_branch: master + tox_install_siblings: true + +- job: + name: openstacksdk-ansible-stable-2.9-functional-devstack + parent: openstacksdk-ansible-functional-devstack + description: | + Run openstacksdk ansible functional tests against a master devstack + using git stable-2.9 branch version of ansible. + required-projects: + - name: github.com/ansible/ansible + override-checkout: stable-2.9 + - name: openstack/openstacksdk + override-checkout: master + - name: openstack/devstack + override-checkout: master + vars: + # test-matrix grabs branch from the zuul branch setting. If the job + # is triggered by ansible, that branch will be devel which doesn't + # make sense to devstack. Override so that we run the right thing. + test_matrix_branch: master + tox_install_siblings: true + +- job: + name: openstacksdk-functional-devstack-masakari + parent: openstacksdk-functional-devstack-minimum + description: | + Run openstacksdk functional tests against a master devstack with masakari + required-projects: + - openstack/masakari + - openstack/masakari-monitors + vars: + devstack_plugins: + masakari: https://opendev.org/openstack/masakari + devstack_services: + masakari-api: true + masakari-engine: true + tox_environment: + OPENSTACKSDK_HAS_MASAKARI: 1 + OPENSTACKSDK_TESTS_SUBDIR: instance_ha + zuul_copy_output: + '{{ devstack_base_dir }}/masakari-logs': logs + +- job: + name: openstacksdk-functional-devstack-manila + parent: openstacksdk-functional-devstack-minimum + description: | + Run openstacksdk functional tests against a master devstack with manila + required-projects: + - openstack/manila + - openstack/openstacksdk + vars: + devstack_localrc: + # Set up manila with a fake driver - makes things super fast and should + # have no impact on the API + MANILA_INSTALL_TEMPEST_PLUGIN_SYSTEMWIDE: false + SHARE_DRIVER: manila.tests.share.drivers.dummy.DummyDriver + MANILA_CONFIGURE_GROUPS: alpha,beta,gamma,membernet + MANILA_CONFIGURE_DEFAULT_TYPES: true + MANILA_SERVICE_IMAGE_ENABLED: false + MANILA_SHARE_MIGRATION_PERIOD_TASK_INTERVAL: 1 + MANILA_SERVER_MIGRATION_PERIOD_TASK_INTERVAL: 10 + MANILA_REPLICA_STATE_UPDATE_INTERVAL: 10 + MANILA_DEFAULT_SHARE_TYPE_EXTRA_SPECS: 'snapshot_support=True create_share_from_snapshot_support=True revert_to_snapshot_support=True mount_snapshot_support=True' + MANILA_ENABLED_BACKENDS: alpha,beta,gamma + MANILA_OPTGROUP_alpha_driver_handles_share_servers: false + MANILA_OPTGROUP_alpha_replication_domain: DUMMY_DOMAIN + MANILA_OPTGROUP_alpha_share_backend_name: ALPHA + MANILA_OPTGROUP_alpha_share_driver: manila.tests.share.drivers.dummy.DummyDriver + MANILA_OPTGROUP_beta_driver_handles_share_servers: false + MANILA_OPTGROUP_beta_replication_domain: DUMMY_DOMAIN + MANILA_OPTGROUP_beta_share_backend_name: BETA + MANILA_OPTGROUP_beta_share_driver: manila.tests.share.drivers.dummy.DummyDriver + MANILA_OPTGROUP_gamma_driver_handles_share_servers: true + MANILA_OPTGROUP_gamma_network_config_group: membernet + MANILA_OPTGROUP_gamma_share_backend_name: GAMMA + MANILA_OPTGROUP_gamma_share_driver: manila.tests.share.drivers.dummy.DummyDriver + MANILA_OPTGROUP_gamma_admin_network_config_group: membernet + MANILA_OPTGROUP_membernet_network_api_class: manila.network.standalone_network_plugin.StandaloneNetworkPlugin + MANILA_OPTGROUP_membernet_network_plugin_ipv4_enabled: true + MANILA_OPTGROUP_membernet_standalone_network_plugin_allowed_ip_ranges: 10.0.0.10-10.0.0.209 + MANILA_OPTGROUP_membernet_standalone_network_plugin_gateway: 10.0.0.1 + MANILA_OPTGROUP_membernet_standalone_network_plugin_mask: 24 + MANILA_OPTGROUP_membernet_standalone_network_plugin_network_type: vlan + MANILA_OPTGROUP_membernet_standalone_network_plugin_segmentation_id: 1010 + devstack_plugins: + manila: https://opendev.org/openstack/manila + devstack_services: + c-api: false + c-bak: false + c-sch: false + c-vol: false + cinder: false + s-account: false + s-container: false + s-object: false + s-proxy: false + n-api: false + n-api-meta: false + n-cond: false + n-cpu: false + n-novnc: false + n-sch: false + nova: false + placement-api: false + dstat: false + tox_environment: + OPENSTACKSDK_HAS_MANILA: 1 + OPENSTACKSDK_TESTS_SUBDIR: shared_file_system + +- project-template: + name: openstacksdk-functional-tips + check: + jobs: + - openstacksdk-functional-devstack-tips + gate: + jobs: + - openstacksdk-functional-devstack-tips diff --git a/zuul.d/metal-jobs.yaml b/zuul.d/metal-jobs.yaml new file mode 100644 index 0000000000..26118f6d21 --- /dev/null +++ b/zuul.d/metal-jobs.yaml @@ -0,0 +1,32 @@ +--- +# Definitions of Ironic based jobs with a dedicated project entry to keep them +# out of general entry. +- job: + name: metalsmith-integration-openstacksdk-src + parent: metalsmith-integration-http-cirros + required-projects: + - openstack/openstacksdk + +- job: + name: bifrost-integration-openstacksdk-src + parent: bifrost-integration-on-ubuntu-jammy + required-projects: + - openstack/ansible-collections-openstack + - openstack/openstacksdk + +- job: + name: ironic-inspector-tempest-openstacksdk-src + parent: ironic-inspector-tempest + required-projects: + - openstack/openstacksdk + +- project: + check: + jobs: + # Ironic jobs, non-voting to avoid tight coupling + - ironic-inspector-tempest-openstacksdk-src: + voting: false + - bifrost-integration-openstacksdk-src: + voting: false + - metalsmith-integration-openstacksdk-src: + voting: false diff --git a/zuul.d/project.yaml b/zuul.d/project.yaml new file mode 100644 index 0000000000..757e9c15ef --- /dev/null +++ b/zuul.d/project.yaml @@ -0,0 +1,35 @@ +--- +# Central project entity. It pulls general templates and basic jobs. +# functional-jobs, metal-jobs and acceptance-jobs are being +# merged with this entity into singe one. +- project: + templates: + - check-requirements + - openstack-python3-jobs + - openstacksdk-functional-tips + - openstacksdk-tox-tips + - os-client-config-tox-tips + - osc-tox-unit-tips + - publish-openstack-docs-pti + - release-notes-jobs-python3 + check: + jobs: + - openstack-tox-py312 + - openstacksdk-functional-devstack + - openstacksdk-functional-devstack-networking + - openstacksdk-functional-devstack-networking-ext + - openstacksdk-functional-devstack-magnum: + voting: false + - openstacksdk-functional-devstack-manila + - openstacksdk-functional-devstack-masakari + - openstacksdk-functional-devstack-ironic + - osc-functional-devstack-tips: + voting: false + - ansible-collections-openstack-functional-devstack: + voting: false + gate: + jobs: + - openstacksdk-functional-devstack + - openstacksdk-functional-devstack-networking + - openstacksdk-functional-devstack-networking-ext + - openstacksdk-functional-devstack-ironic